]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.43-201107191826.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.43-201107191826.patch
CommitLineData
9ab4939c
PK
1diff -urNp linux-2.6.32.43/arch/alpha/include/asm/elf.h linux-2.6.32.43/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.43/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.43/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.43/arch/alpha/include/asm/pgtable.h linux-2.6.32.43/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.43/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.43/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.43/arch/alpha/kernel/module.c linux-2.6.32.43/arch/alpha/kernel/module.c
40--- linux-2.6.32.43/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.43/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.43/arch/alpha/kernel/osf_sys.c linux-2.6.32.43/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.43/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53+++ linux-2.6.32.43/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58- if (namelen > 32)
59+ if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63@@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67- if (len > count)
68+ if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72@@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76- if (nbytes < sizeof(*hwrpb))
77+ if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81@@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85+ unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89@@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93- ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94+ ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95+ (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102+ err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106@@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110- if (!vma || addr + len <= vma->vm_start)
111+ if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115@@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119+#ifdef CONFIG_PAX_RANDMMAP
120+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121+#endif
122+
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126@@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131- len, limit);
132+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133+
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137diff -urNp linux-2.6.32.43/arch/alpha/mm/fault.c linux-2.6.32.43/arch/alpha/mm/fault.c
138--- linux-2.6.32.43/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139+++ linux-2.6.32.43/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144+#ifdef CONFIG_PAX_PAGEEXEC
145+/*
146+ * PaX: decide what to do with offenders (regs->pc = fault address)
147+ *
148+ * returns 1 when task should be killed
149+ * 2 when patched PLT trampoline was detected
150+ * 3 when unpatched PLT trampoline was detected
151+ */
152+static int pax_handle_fetch_fault(struct pt_regs *regs)
153+{
154+
155+#ifdef CONFIG_PAX_EMUPLT
156+ int err;
157+
158+ do { /* PaX: patched PLT emulation #1 */
159+ unsigned int ldah, ldq, jmp;
160+
161+ err = get_user(ldah, (unsigned int *)regs->pc);
162+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164+
165+ if (err)
166+ break;
167+
168+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170+ jmp == 0x6BFB0000U)
171+ {
172+ unsigned long r27, addr;
173+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175+
176+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177+ err = get_user(r27, (unsigned long *)addr);
178+ if (err)
179+ break;
180+
181+ regs->r27 = r27;
182+ regs->pc = r27;
183+ return 2;
184+ }
185+ } while (0);
186+
187+ do { /* PaX: patched PLT emulation #2 */
188+ unsigned int ldah, lda, br;
189+
190+ err = get_user(ldah, (unsigned int *)regs->pc);
191+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
192+ err |= get_user(br, (unsigned int *)(regs->pc+8));
193+
194+ if (err)
195+ break;
196+
197+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
199+ (br & 0xFFE00000U) == 0xC3E00000U)
200+ {
201+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204+
205+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207+ return 2;
208+ }
209+ } while (0);
210+
211+ do { /* PaX: unpatched PLT emulation */
212+ unsigned int br;
213+
214+ err = get_user(br, (unsigned int *)regs->pc);
215+
216+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217+ unsigned int br2, ldq, nop, jmp;
218+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219+
220+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221+ err = get_user(br2, (unsigned int *)addr);
222+ err |= get_user(ldq, (unsigned int *)(addr+4));
223+ err |= get_user(nop, (unsigned int *)(addr+8));
224+ err |= get_user(jmp, (unsigned int *)(addr+12));
225+ err |= get_user(resolver, (unsigned long *)(addr+16));
226+
227+ if (err)
228+ break;
229+
230+ if (br2 == 0xC3600000U &&
231+ ldq == 0xA77B000CU &&
232+ nop == 0x47FF041FU &&
233+ jmp == 0x6B7B0000U)
234+ {
235+ regs->r28 = regs->pc+4;
236+ regs->r27 = addr+16;
237+ regs->pc = resolver;
238+ return 3;
239+ }
240+ }
241+ } while (0);
242+#endif
243+
244+ return 1;
245+}
246+
247+void pax_report_insns(void *pc, void *sp)
248+{
249+ unsigned long i;
250+
251+ printk(KERN_ERR "PAX: bytes at PC: ");
252+ for (i = 0; i < 5; i++) {
253+ unsigned int c;
254+ if (get_user(c, (unsigned int *)pc+i))
255+ printk(KERN_CONT "???????? ");
256+ else
257+ printk(KERN_CONT "%08x ", c);
258+ }
259+ printk("\n");
260+}
261+#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269- if (!(vma->vm_flags & VM_EXEC))
270+ if (!(vma->vm_flags & VM_EXEC)) {
271+
272+#ifdef CONFIG_PAX_PAGEEXEC
273+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274+ goto bad_area;
275+
276+ up_read(&mm->mmap_sem);
277+ switch (pax_handle_fetch_fault(regs)) {
278+
279+#ifdef CONFIG_PAX_EMUPLT
280+ case 2:
281+ case 3:
282+ return;
283+#endif
284+
285+ }
286+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287+ do_group_exit(SIGKILL);
288+#else
289 goto bad_area;
290+#endif
291+
292+ }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296diff -urNp linux-2.6.32.43/arch/arm/include/asm/elf.h linux-2.6.32.43/arch/arm/include/asm/elf.h
297--- linux-2.6.32.43/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298+++ linux-2.6.32.43/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305+
306+#ifdef CONFIG_PAX_ASLR
307+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308+
309+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311+#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315diff -urNp linux-2.6.32.43/arch/arm/include/asm/kmap_types.h linux-2.6.32.43/arch/arm/include/asm/kmap_types.h
316--- linux-2.6.32.43/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317+++ linux-2.6.32.43/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318@@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322+ KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326diff -urNp linux-2.6.32.43/arch/arm/include/asm/uaccess.h linux-2.6.32.43/arch/arm/include/asm/uaccess.h
327--- linux-2.6.32.43/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328+++ linux-2.6.32.43/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
329@@ -22,6 +22,8 @@
330 #define VERIFY_READ 0
331 #define VERIFY_WRITE 1
332
333+extern void check_object_size(const void *ptr, unsigned long n, bool to);
334+
335 /*
336 * The exception table consists of pairs of addresses: the first is the
337 * address of an instruction that is allowed to fault, and the second is
338@@ -387,8 +389,23 @@ do { \
339
340
341 #ifdef CONFIG_MMU
342-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
343-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
344+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
345+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
346+
347+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
348+{
349+ if (!__builtin_constant_p(n))
350+ check_object_size(to, n, false);
351+ return ___copy_from_user(to, from, n);
352+}
353+
354+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
355+{
356+ if (!__builtin_constant_p(n))
357+ check_object_size(from, n, true);
358+ return ___copy_to_user(to, from, n);
359+}
360+
361 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
362 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
363 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
364@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
365
366 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
367 {
368+ if ((long)n < 0)
369+ return n;
370+
371 if (access_ok(VERIFY_READ, from, n))
372 n = __copy_from_user(to, from, n);
373 else /* security hole - plug it */
374@@ -412,6 +432,9 @@ static inline unsigned long __must_check
375
376 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
377 {
378+ if ((long)n < 0)
379+ return n;
380+
381 if (access_ok(VERIFY_WRITE, to, n))
382 n = __copy_to_user(to, from, n);
383 return n;
384diff -urNp linux-2.6.32.43/arch/arm/kernel/armksyms.c linux-2.6.32.43/arch/arm/kernel/armksyms.c
385--- linux-2.6.32.43/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
386+++ linux-2.6.32.43/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
387@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
388 #ifdef CONFIG_MMU
389 EXPORT_SYMBOL(copy_page);
390
391-EXPORT_SYMBOL(__copy_from_user);
392-EXPORT_SYMBOL(__copy_to_user);
393+EXPORT_SYMBOL(___copy_from_user);
394+EXPORT_SYMBOL(___copy_to_user);
395 EXPORT_SYMBOL(__clear_user);
396
397 EXPORT_SYMBOL(__get_user_1);
398diff -urNp linux-2.6.32.43/arch/arm/kernel/kgdb.c linux-2.6.32.43/arch/arm/kernel/kgdb.c
399--- linux-2.6.32.43/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
400+++ linux-2.6.32.43/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
401@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
402 * and we handle the normal undef case within the do_undefinstr
403 * handler.
404 */
405-struct kgdb_arch arch_kgdb_ops = {
406+const struct kgdb_arch arch_kgdb_ops = {
407 #ifndef __ARMEB__
408 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
409 #else /* ! __ARMEB__ */
410diff -urNp linux-2.6.32.43/arch/arm/kernel/traps.c linux-2.6.32.43/arch/arm/kernel/traps.c
411--- linux-2.6.32.43/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
412+++ linux-2.6.32.43/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
413@@ -247,6 +247,8 @@ static void __die(const char *str, int e
414
415 DEFINE_SPINLOCK(die_lock);
416
417+extern void gr_handle_kernel_exploit(void);
418+
419 /*
420 * This function is protected against re-entrancy.
421 */
422@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
423 if (panic_on_oops)
424 panic("Fatal exception");
425
426+ gr_handle_kernel_exploit();
427+
428 do_exit(SIGSEGV);
429 }
430
431diff -urNp linux-2.6.32.43/arch/arm/lib/copy_from_user.S linux-2.6.32.43/arch/arm/lib/copy_from_user.S
432--- linux-2.6.32.43/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.43/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
434@@ -16,7 +16,7 @@
435 /*
436 * Prototype:
437 *
438- * size_t __copy_from_user(void *to, const void *from, size_t n)
439+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
440 *
441 * Purpose:
442 *
443@@ -84,11 +84,11 @@
444
445 .text
446
447-ENTRY(__copy_from_user)
448+ENTRY(___copy_from_user)
449
450 #include "copy_template.S"
451
452-ENDPROC(__copy_from_user)
453+ENDPROC(___copy_from_user)
454
455 .section .fixup,"ax"
456 .align 0
457diff -urNp linux-2.6.32.43/arch/arm/lib/copy_to_user.S linux-2.6.32.43/arch/arm/lib/copy_to_user.S
458--- linux-2.6.32.43/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
459+++ linux-2.6.32.43/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
460@@ -16,7 +16,7 @@
461 /*
462 * Prototype:
463 *
464- * size_t __copy_to_user(void *to, const void *from, size_t n)
465+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
466 *
467 * Purpose:
468 *
469@@ -88,11 +88,11 @@
470 .text
471
472 ENTRY(__copy_to_user_std)
473-WEAK(__copy_to_user)
474+WEAK(___copy_to_user)
475
476 #include "copy_template.S"
477
478-ENDPROC(__copy_to_user)
479+ENDPROC(___copy_to_user)
480
481 .section .fixup,"ax"
482 .align 0
483diff -urNp linux-2.6.32.43/arch/arm/lib/uaccess.S linux-2.6.32.43/arch/arm/lib/uaccess.S
484--- linux-2.6.32.43/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
485+++ linux-2.6.32.43/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
486@@ -19,7 +19,7 @@
487
488 #define PAGE_SHIFT 12
489
490-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
491+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
492 * Purpose : copy a block to user memory from kernel memory
493 * Params : to - user memory
494 * : from - kernel memory
495@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
496 sub r2, r2, ip
497 b .Lc2u_dest_aligned
498
499-ENTRY(__copy_to_user)
500+ENTRY(___copy_to_user)
501 stmfd sp!, {r2, r4 - r7, lr}
502 cmp r2, #4
503 blt .Lc2u_not_enough
504@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
505 ldrgtb r3, [r1], #0
506 USER( strgtbt r3, [r0], #1) @ May fault
507 b .Lc2u_finished
508-ENDPROC(__copy_to_user)
509+ENDPROC(___copy_to_user)
510
511 .section .fixup,"ax"
512 .align 0
513 9001: ldmfd sp!, {r0, r4 - r7, pc}
514 .previous
515
516-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
517+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
518 * Purpose : copy a block from user memory to kernel memory
519 * Params : to - kernel memory
520 * : from - user memory
521@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
522 sub r2, r2, ip
523 b .Lcfu_dest_aligned
524
525-ENTRY(__copy_from_user)
526+ENTRY(___copy_from_user)
527 stmfd sp!, {r0, r2, r4 - r7, lr}
528 cmp r2, #4
529 blt .Lcfu_not_enough
530@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
531 USER( ldrgtbt r3, [r1], #1) @ May fault
532 strgtb r3, [r0], #1
533 b .Lcfu_finished
534-ENDPROC(__copy_from_user)
535+ENDPROC(___copy_from_user)
536
537 .section .fixup,"ax"
538 .align 0
539diff -urNp linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c
540--- linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
541+++ linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
542@@ -97,7 +97,7 @@ out:
543 }
544
545 unsigned long
546-__copy_to_user(void __user *to, const void *from, unsigned long n)
547+___copy_to_user(void __user *to, const void *from, unsigned long n)
548 {
549 /*
550 * This test is stubbed out of the main function above to keep
551diff -urNp linux-2.6.32.43/arch/arm/mach-at91/pm.c linux-2.6.32.43/arch/arm/mach-at91/pm.c
552--- linux-2.6.32.43/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
553+++ linux-2.6.32.43/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
554@@ -348,7 +348,7 @@ static void at91_pm_end(void)
555 }
556
557
558-static struct platform_suspend_ops at91_pm_ops ={
559+static const struct platform_suspend_ops at91_pm_ops ={
560 .valid = at91_pm_valid_state,
561 .begin = at91_pm_begin,
562 .enter = at91_pm_enter,
563diff -urNp linux-2.6.32.43/arch/arm/mach-omap1/pm.c linux-2.6.32.43/arch/arm/mach-omap1/pm.c
564--- linux-2.6.32.43/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
565+++ linux-2.6.32.43/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
566@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
567
568
569
570-static struct platform_suspend_ops omap_pm_ops ={
571+static const struct platform_suspend_ops omap_pm_ops ={
572 .prepare = omap_pm_prepare,
573 .enter = omap_pm_enter,
574 .finish = omap_pm_finish,
575diff -urNp linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c
576--- linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
577+++ linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
578@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
579 enable_hlt();
580 }
581
582-static struct platform_suspend_ops omap_pm_ops = {
583+static const struct platform_suspend_ops omap_pm_ops = {
584 .prepare = omap2_pm_prepare,
585 .enter = omap2_pm_enter,
586 .finish = omap2_pm_finish,
587diff -urNp linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c
588--- linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
589+++ linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
590@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
591 return;
592 }
593
594-static struct platform_suspend_ops omap_pm_ops = {
595+static const struct platform_suspend_ops omap_pm_ops = {
596 .begin = omap3_pm_begin,
597 .end = omap3_pm_end,
598 .prepare = omap3_pm_prepare,
599diff -urNp linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c
600--- linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
601+++ linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
602@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
603 (state == PM_SUSPEND_MEM);
604 }
605
606-static struct platform_suspend_ops pnx4008_pm_ops = {
607+static const struct platform_suspend_ops pnx4008_pm_ops = {
608 .enter = pnx4008_pm_enter,
609 .valid = pnx4008_pm_valid,
610 };
611diff -urNp linux-2.6.32.43/arch/arm/mach-pxa/pm.c linux-2.6.32.43/arch/arm/mach-pxa/pm.c
612--- linux-2.6.32.43/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
613+++ linux-2.6.32.43/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
614@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
615 pxa_cpu_pm_fns->finish();
616 }
617
618-static struct platform_suspend_ops pxa_pm_ops = {
619+static const struct platform_suspend_ops pxa_pm_ops = {
620 .valid = pxa_pm_valid,
621 .enter = pxa_pm_enter,
622 .prepare = pxa_pm_prepare,
623diff -urNp linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c
624--- linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
625+++ linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
626@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
627 }
628
629 #ifdef CONFIG_PM
630-static struct platform_suspend_ops sharpsl_pm_ops = {
631+static const struct platform_suspend_ops sharpsl_pm_ops = {
632 .prepare = pxa_pm_prepare,
633 .finish = pxa_pm_finish,
634 .enter = corgi_pxa_pm_enter,
635diff -urNp linux-2.6.32.43/arch/arm/mach-sa1100/pm.c linux-2.6.32.43/arch/arm/mach-sa1100/pm.c
636--- linux-2.6.32.43/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
637+++ linux-2.6.32.43/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
638@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
639 return virt_to_phys(sp);
640 }
641
642-static struct platform_suspend_ops sa11x0_pm_ops = {
643+static const struct platform_suspend_ops sa11x0_pm_ops = {
644 .enter = sa11x0_pm_enter,
645 .valid = suspend_valid_only_mem,
646 };
647diff -urNp linux-2.6.32.43/arch/arm/mm/fault.c linux-2.6.32.43/arch/arm/mm/fault.c
648--- linux-2.6.32.43/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
649+++ linux-2.6.32.43/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
650@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
651 }
652 #endif
653
654+#ifdef CONFIG_PAX_PAGEEXEC
655+ if (fsr & FSR_LNX_PF) {
656+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
657+ do_group_exit(SIGKILL);
658+ }
659+#endif
660+
661 tsk->thread.address = addr;
662 tsk->thread.error_code = fsr;
663 tsk->thread.trap_no = 14;
664@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
665 }
666 #endif /* CONFIG_MMU */
667
668+#ifdef CONFIG_PAX_PAGEEXEC
669+void pax_report_insns(void *pc, void *sp)
670+{
671+ long i;
672+
673+ printk(KERN_ERR "PAX: bytes at PC: ");
674+ for (i = 0; i < 20; i++) {
675+ unsigned char c;
676+ if (get_user(c, (__force unsigned char __user *)pc+i))
677+ printk(KERN_CONT "?? ");
678+ else
679+ printk(KERN_CONT "%02x ", c);
680+ }
681+ printk("\n");
682+
683+ printk(KERN_ERR "PAX: bytes at SP-4: ");
684+ for (i = -1; i < 20; i++) {
685+ unsigned long c;
686+ if (get_user(c, (__force unsigned long __user *)sp+i))
687+ printk(KERN_CONT "???????? ");
688+ else
689+ printk(KERN_CONT "%08lx ", c);
690+ }
691+ printk("\n");
692+}
693+#endif
694+
695 /*
696 * First Level Translation Fault Handler
697 *
698diff -urNp linux-2.6.32.43/arch/arm/mm/mmap.c linux-2.6.32.43/arch/arm/mm/mmap.c
699--- linux-2.6.32.43/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.43/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
701@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
702 if (len > TASK_SIZE)
703 return -ENOMEM;
704
705+#ifdef CONFIG_PAX_RANDMMAP
706+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
707+#endif
708+
709 if (addr) {
710 if (do_align)
711 addr = COLOUR_ALIGN(addr, pgoff);
712@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
713 addr = PAGE_ALIGN(addr);
714
715 vma = find_vma(mm, addr);
716- if (TASK_SIZE - len >= addr &&
717- (!vma || addr + len <= vma->vm_start))
718+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
719 return addr;
720 }
721 if (len > mm->cached_hole_size) {
722- start_addr = addr = mm->free_area_cache;
723+ start_addr = addr = mm->free_area_cache;
724 } else {
725- start_addr = addr = TASK_UNMAPPED_BASE;
726- mm->cached_hole_size = 0;
727+ start_addr = addr = mm->mmap_base;
728+ mm->cached_hole_size = 0;
729 }
730
731 full_search:
732@@ -94,14 +97,14 @@ full_search:
733 * Start a new search - just in case we missed
734 * some holes.
735 */
736- if (start_addr != TASK_UNMAPPED_BASE) {
737- start_addr = addr = TASK_UNMAPPED_BASE;
738+ if (start_addr != mm->mmap_base) {
739+ start_addr = addr = mm->mmap_base;
740 mm->cached_hole_size = 0;
741 goto full_search;
742 }
743 return -ENOMEM;
744 }
745- if (!vma || addr + len <= vma->vm_start) {
746+ if (check_heap_stack_gap(vma, addr, len)) {
747 /*
748 * Remember the place where we stopped the search:
749 */
750diff -urNp linux-2.6.32.43/arch/arm/plat-s3c/pm.c linux-2.6.32.43/arch/arm/plat-s3c/pm.c
751--- linux-2.6.32.43/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
752+++ linux-2.6.32.43/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
753@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
754 s3c_pm_check_cleanup();
755 }
756
757-static struct platform_suspend_ops s3c_pm_ops = {
758+static const struct platform_suspend_ops s3c_pm_ops = {
759 .enter = s3c_pm_enter,
760 .prepare = s3c_pm_prepare,
761 .finish = s3c_pm_finish,
762diff -urNp linux-2.6.32.43/arch/avr32/include/asm/elf.h linux-2.6.32.43/arch/avr32/include/asm/elf.h
763--- linux-2.6.32.43/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
764+++ linux-2.6.32.43/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
765@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
766 the loader. We need to make sure that it is out of the way of the program
767 that it will "exec", and that there is sufficient room for the brk. */
768
769-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
770+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
771
772+#ifdef CONFIG_PAX_ASLR
773+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
774+
775+#define PAX_DELTA_MMAP_LEN 15
776+#define PAX_DELTA_STACK_LEN 15
777+#endif
778
779 /* This yields a mask that user programs can use to figure out what
780 instruction set this CPU supports. This could be done in user space,
781diff -urNp linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h
782--- linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
783+++ linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
784@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
785 D(11) KM_IRQ1,
786 D(12) KM_SOFTIRQ0,
787 D(13) KM_SOFTIRQ1,
788-D(14) KM_TYPE_NR
789+D(14) KM_CLEARPAGE,
790+D(15) KM_TYPE_NR
791 };
792
793 #undef D
794diff -urNp linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c
795--- linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
796+++ linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
797@@ -176,7 +176,7 @@ out:
798 return 0;
799 }
800
801-static struct platform_suspend_ops avr32_pm_ops = {
802+static const struct platform_suspend_ops avr32_pm_ops = {
803 .valid = avr32_pm_valid_state,
804 .enter = avr32_pm_enter,
805 };
806diff -urNp linux-2.6.32.43/arch/avr32/mm/fault.c linux-2.6.32.43/arch/avr32/mm/fault.c
807--- linux-2.6.32.43/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
808+++ linux-2.6.32.43/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
809@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
810
811 int exception_trace = 1;
812
813+#ifdef CONFIG_PAX_PAGEEXEC
814+void pax_report_insns(void *pc, void *sp)
815+{
816+ unsigned long i;
817+
818+ printk(KERN_ERR "PAX: bytes at PC: ");
819+ for (i = 0; i < 20; i++) {
820+ unsigned char c;
821+ if (get_user(c, (unsigned char *)pc+i))
822+ printk(KERN_CONT "???????? ");
823+ else
824+ printk(KERN_CONT "%02x ", c);
825+ }
826+ printk("\n");
827+}
828+#endif
829+
830 /*
831 * This routine handles page faults. It determines the address and the
832 * problem, and then passes it off to one of the appropriate routines.
833@@ -157,6 +174,16 @@ bad_area:
834 up_read(&mm->mmap_sem);
835
836 if (user_mode(regs)) {
837+
838+#ifdef CONFIG_PAX_PAGEEXEC
839+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
840+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
841+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
842+ do_group_exit(SIGKILL);
843+ }
844+ }
845+#endif
846+
847 if (exception_trace && printk_ratelimit())
848 printk("%s%s[%d]: segfault at %08lx pc %08lx "
849 "sp %08lx ecr %lu\n",
850diff -urNp linux-2.6.32.43/arch/blackfin/kernel/kgdb.c linux-2.6.32.43/arch/blackfin/kernel/kgdb.c
851--- linux-2.6.32.43/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
852+++ linux-2.6.32.43/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
853@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
854 return -1; /* this means that we do not want to exit from the handler */
855 }
856
857-struct kgdb_arch arch_kgdb_ops = {
858+const struct kgdb_arch arch_kgdb_ops = {
859 .gdb_bpt_instr = {0xa1},
860 #ifdef CONFIG_SMP
861 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
862diff -urNp linux-2.6.32.43/arch/blackfin/mach-common/pm.c linux-2.6.32.43/arch/blackfin/mach-common/pm.c
863--- linux-2.6.32.43/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
864+++ linux-2.6.32.43/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
865@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
866 return 0;
867 }
868
869-struct platform_suspend_ops bfin_pm_ops = {
870+const struct platform_suspend_ops bfin_pm_ops = {
871 .enter = bfin_pm_enter,
872 .valid = bfin_pm_valid,
873 };
874diff -urNp linux-2.6.32.43/arch/frv/include/asm/kmap_types.h linux-2.6.32.43/arch/frv/include/asm/kmap_types.h
875--- linux-2.6.32.43/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
876+++ linux-2.6.32.43/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
877@@ -23,6 +23,7 @@ enum km_type {
878 KM_IRQ1,
879 KM_SOFTIRQ0,
880 KM_SOFTIRQ1,
881+ KM_CLEARPAGE,
882 KM_TYPE_NR
883 };
884
885diff -urNp linux-2.6.32.43/arch/frv/mm/elf-fdpic.c linux-2.6.32.43/arch/frv/mm/elf-fdpic.c
886--- linux-2.6.32.43/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.43/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
888@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
889 if (addr) {
890 addr = PAGE_ALIGN(addr);
891 vma = find_vma(current->mm, addr);
892- if (TASK_SIZE - len >= addr &&
893- (!vma || addr + len <= vma->vm_start))
894+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
895 goto success;
896 }
897
898@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
899 for (; vma; vma = vma->vm_next) {
900 if (addr > limit)
901 break;
902- if (addr + len <= vma->vm_start)
903+ if (check_heap_stack_gap(vma, addr, len))
904 goto success;
905 addr = vma->vm_end;
906 }
907@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
908 for (; vma; vma = vma->vm_next) {
909 if (addr > limit)
910 break;
911- if (addr + len <= vma->vm_start)
912+ if (check_heap_stack_gap(vma, addr, len))
913 goto success;
914 addr = vma->vm_end;
915 }
916diff -urNp linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c
917--- linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
918+++ linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
919@@ -17,7 +17,7 @@
920 #include <linux/swiotlb.h>
921 #include <asm/machvec.h>
922
923-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
924+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
925
926 /* swiotlb declarations & definitions: */
927 extern int swiotlb_late_init_with_default_size (size_t size);
928@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
929 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
930 }
931
932-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
933+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
934 {
935 if (use_swiotlb(dev))
936 return &swiotlb_dma_ops;
937diff -urNp linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c
938--- linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
939+++ linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
940@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
941 },
942 };
943
944-extern struct dma_map_ops swiotlb_dma_ops;
945+extern const struct dma_map_ops swiotlb_dma_ops;
946
947 static int __init
948 sba_init(void)
949@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
950
951 __setup("sbapagesize=",sba_page_override);
952
953-struct dma_map_ops sba_dma_ops = {
954+const struct dma_map_ops sba_dma_ops = {
955 .alloc_coherent = sba_alloc_coherent,
956 .free_coherent = sba_free_coherent,
957 .map_page = sba_map_page,
958diff -urNp linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c
959--- linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
960+++ linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
961@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
962
963 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
964
965+#ifdef CONFIG_PAX_ASLR
966+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
967+
968+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
969+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
970+#endif
971+
972 /* Ugly but avoids duplication */
973 #include "../../../fs/binfmt_elf.c"
974
975diff -urNp linux-2.6.32.43/arch/ia64/ia32/ia32priv.h linux-2.6.32.43/arch/ia64/ia32/ia32priv.h
976--- linux-2.6.32.43/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
977+++ linux-2.6.32.43/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
978@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
979 #define ELF_DATA ELFDATA2LSB
980 #define ELF_ARCH EM_386
981
982-#define IA32_STACK_TOP IA32_PAGE_OFFSET
983+#ifdef CONFIG_PAX_RANDUSTACK
984+#define __IA32_DELTA_STACK (current->mm->delta_stack)
985+#else
986+#define __IA32_DELTA_STACK 0UL
987+#endif
988+
989+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
990+
991 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
992 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
993
994diff -urNp linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h
995--- linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
996+++ linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
997@@ -12,7 +12,7 @@
998
999 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1000
1001-extern struct dma_map_ops *dma_ops;
1002+extern const struct dma_map_ops *dma_ops;
1003 extern struct ia64_machine_vector ia64_mv;
1004 extern void set_iommu_machvec(void);
1005
1006@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
1007 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1008 dma_addr_t *daddr, gfp_t gfp)
1009 {
1010- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1011+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1012 void *caddr;
1013
1014 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1015@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
1016 static inline void dma_free_coherent(struct device *dev, size_t size,
1017 void *caddr, dma_addr_t daddr)
1018 {
1019- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1020+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1021 debug_dma_free_coherent(dev, size, caddr, daddr);
1022 ops->free_coherent(dev, size, caddr, daddr);
1023 }
1024@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
1025
1026 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1027 {
1028- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1029+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1030 return ops->mapping_error(dev, daddr);
1031 }
1032
1033 static inline int dma_supported(struct device *dev, u64 mask)
1034 {
1035- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1036+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1037 return ops->dma_supported(dev, mask);
1038 }
1039
1040diff -urNp linux-2.6.32.43/arch/ia64/include/asm/elf.h linux-2.6.32.43/arch/ia64/include/asm/elf.h
1041--- linux-2.6.32.43/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1042+++ linux-2.6.32.43/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1043@@ -43,6 +43,13 @@
1044 */
1045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1046
1047+#ifdef CONFIG_PAX_ASLR
1048+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1049+
1050+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1051+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1052+#endif
1053+
1054 #define PT_IA_64_UNWIND 0x70000001
1055
1056 /* IA-64 relocations: */
1057diff -urNp linux-2.6.32.43/arch/ia64/include/asm/machvec.h linux-2.6.32.43/arch/ia64/include/asm/machvec.h
1058--- linux-2.6.32.43/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1059+++ linux-2.6.32.43/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1060@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1061 /* DMA-mapping interface: */
1062 typedef void ia64_mv_dma_init (void);
1063 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1064-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1065+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1066
1067 /*
1068 * WARNING: The legacy I/O space is _architected_. Platforms are
1069@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1070 # endif /* CONFIG_IA64_GENERIC */
1071
1072 extern void swiotlb_dma_init(void);
1073-extern struct dma_map_ops *dma_get_ops(struct device *);
1074+extern const struct dma_map_ops *dma_get_ops(struct device *);
1075
1076 /*
1077 * Define default versions so we can extend machvec for new platforms without having
1078diff -urNp linux-2.6.32.43/arch/ia64/include/asm/pgtable.h linux-2.6.32.43/arch/ia64/include/asm/pgtable.h
1079--- linux-2.6.32.43/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1080+++ linux-2.6.32.43/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1081@@ -12,7 +12,7 @@
1082 * David Mosberger-Tang <davidm@hpl.hp.com>
1083 */
1084
1085-
1086+#include <linux/const.h>
1087 #include <asm/mman.h>
1088 #include <asm/page.h>
1089 #include <asm/processor.h>
1090@@ -143,6 +143,17 @@
1091 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1092 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1093 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1094+
1095+#ifdef CONFIG_PAX_PAGEEXEC
1096+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1097+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1098+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1099+#else
1100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1101+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1102+# define PAGE_COPY_NOEXEC PAGE_COPY
1103+#endif
1104+
1105 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1106 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1107 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1108diff -urNp linux-2.6.32.43/arch/ia64/include/asm/spinlock.h linux-2.6.32.43/arch/ia64/include/asm/spinlock.h
1109--- linux-2.6.32.43/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1110+++ linux-2.6.32.43/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1111@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1112 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1113
1114 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1115- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1116+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1117 }
1118
1119 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1120diff -urNp linux-2.6.32.43/arch/ia64/include/asm/uaccess.h linux-2.6.32.43/arch/ia64/include/asm/uaccess.h
1121--- linux-2.6.32.43/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1122+++ linux-2.6.32.43/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1123@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1124 const void *__cu_from = (from); \
1125 long __cu_len = (n); \
1126 \
1127- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1128+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1129 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1130 __cu_len; \
1131 })
1132@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1133 long __cu_len = (n); \
1134 \
1135 __chk_user_ptr(__cu_from); \
1136- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1137+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1138 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1139 __cu_len; \
1140 })
1141diff -urNp linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c
1142--- linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1143+++ linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1144@@ -3,7 +3,7 @@
1145 /* Set this to 1 if there is a HW IOMMU in the system */
1146 int iommu_detected __read_mostly;
1147
1148-struct dma_map_ops *dma_ops;
1149+const struct dma_map_ops *dma_ops;
1150 EXPORT_SYMBOL(dma_ops);
1151
1152 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1153@@ -16,7 +16,7 @@ static int __init dma_init(void)
1154 }
1155 fs_initcall(dma_init);
1156
1157-struct dma_map_ops *dma_get_ops(struct device *dev)
1158+const struct dma_map_ops *dma_get_ops(struct device *dev)
1159 {
1160 return dma_ops;
1161 }
1162diff -urNp linux-2.6.32.43/arch/ia64/kernel/module.c linux-2.6.32.43/arch/ia64/kernel/module.c
1163--- linux-2.6.32.43/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1164+++ linux-2.6.32.43/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1165@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1166 void
1167 module_free (struct module *mod, void *module_region)
1168 {
1169- if (mod && mod->arch.init_unw_table &&
1170- module_region == mod->module_init) {
1171+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1172 unw_remove_unwind_table(mod->arch.init_unw_table);
1173 mod->arch.init_unw_table = NULL;
1174 }
1175@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1176 }
1177
1178 static inline int
1179+in_init_rx (const struct module *mod, uint64_t addr)
1180+{
1181+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1182+}
1183+
1184+static inline int
1185+in_init_rw (const struct module *mod, uint64_t addr)
1186+{
1187+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1188+}
1189+
1190+static inline int
1191 in_init (const struct module *mod, uint64_t addr)
1192 {
1193- return addr - (uint64_t) mod->module_init < mod->init_size;
1194+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1195+}
1196+
1197+static inline int
1198+in_core_rx (const struct module *mod, uint64_t addr)
1199+{
1200+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1201+}
1202+
1203+static inline int
1204+in_core_rw (const struct module *mod, uint64_t addr)
1205+{
1206+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1207 }
1208
1209 static inline int
1210 in_core (const struct module *mod, uint64_t addr)
1211 {
1212- return addr - (uint64_t) mod->module_core < mod->core_size;
1213+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1214 }
1215
1216 static inline int
1217@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1218 break;
1219
1220 case RV_BDREL:
1221- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1222+ if (in_init_rx(mod, val))
1223+ val -= (uint64_t) mod->module_init_rx;
1224+ else if (in_init_rw(mod, val))
1225+ val -= (uint64_t) mod->module_init_rw;
1226+ else if (in_core_rx(mod, val))
1227+ val -= (uint64_t) mod->module_core_rx;
1228+ else if (in_core_rw(mod, val))
1229+ val -= (uint64_t) mod->module_core_rw;
1230 break;
1231
1232 case RV_LTV:
1233@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1234 * addresses have been selected...
1235 */
1236 uint64_t gp;
1237- if (mod->core_size > MAX_LTOFF)
1238+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1239 /*
1240 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1241 * at the end of the module.
1242 */
1243- gp = mod->core_size - MAX_LTOFF / 2;
1244+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1245 else
1246- gp = mod->core_size / 2;
1247- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1248+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1249+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1250 mod->arch.gp = gp;
1251 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1252 }
1253diff -urNp linux-2.6.32.43/arch/ia64/kernel/pci-dma.c linux-2.6.32.43/arch/ia64/kernel/pci-dma.c
1254--- linux-2.6.32.43/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1255+++ linux-2.6.32.43/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1256@@ -43,7 +43,7 @@ struct device fallback_dev = {
1257 .dma_mask = &fallback_dev.coherent_dma_mask,
1258 };
1259
1260-extern struct dma_map_ops intel_dma_ops;
1261+extern const struct dma_map_ops intel_dma_ops;
1262
1263 static int __init pci_iommu_init(void)
1264 {
1265@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1266 }
1267 EXPORT_SYMBOL(iommu_dma_supported);
1268
1269+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1270+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1271+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1272+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1273+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1274+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1275+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1276+
1277+static const struct dma_map_ops intel_iommu_dma_ops = {
1278+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1279+ .alloc_coherent = intel_alloc_coherent,
1280+ .free_coherent = intel_free_coherent,
1281+ .map_sg = intel_map_sg,
1282+ .unmap_sg = intel_unmap_sg,
1283+ .map_page = intel_map_page,
1284+ .unmap_page = intel_unmap_page,
1285+ .mapping_error = intel_mapping_error,
1286+
1287+ .sync_single_for_cpu = machvec_dma_sync_single,
1288+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1289+ .sync_single_for_device = machvec_dma_sync_single,
1290+ .sync_sg_for_device = machvec_dma_sync_sg,
1291+ .dma_supported = iommu_dma_supported,
1292+};
1293+
1294 void __init pci_iommu_alloc(void)
1295 {
1296- dma_ops = &intel_dma_ops;
1297-
1298- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1299- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1300- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1301- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1302- dma_ops->dma_supported = iommu_dma_supported;
1303+ dma_ops = &intel_iommu_dma_ops;
1304
1305 /*
1306 * The order of these functions is important for
1307diff -urNp linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c
1308--- linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1309+++ linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1310@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1311 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1312 }
1313
1314-struct dma_map_ops swiotlb_dma_ops = {
1315+const struct dma_map_ops swiotlb_dma_ops = {
1316 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1317 .free_coherent = swiotlb_free_coherent,
1318 .map_page = swiotlb_map_page,
1319diff -urNp linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c
1320--- linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1321+++ linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1322@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1323 if (REGION_NUMBER(addr) == RGN_HPAGE)
1324 addr = 0;
1325 #endif
1326+
1327+#ifdef CONFIG_PAX_RANDMMAP
1328+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1329+ addr = mm->free_area_cache;
1330+ else
1331+#endif
1332+
1333 if (!addr)
1334 addr = mm->free_area_cache;
1335
1336@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1337 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1338 /* At this point: (!vma || addr < vma->vm_end). */
1339 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1340- if (start_addr != TASK_UNMAPPED_BASE) {
1341+ if (start_addr != mm->mmap_base) {
1342 /* Start a new search --- just in case we missed some holes. */
1343- addr = TASK_UNMAPPED_BASE;
1344+ addr = mm->mmap_base;
1345 goto full_search;
1346 }
1347 return -ENOMEM;
1348 }
1349- if (!vma || addr + len <= vma->vm_start) {
1350+ if (check_heap_stack_gap(vma, addr, len)) {
1351 /* Remember the address where we stopped this search: */
1352 mm->free_area_cache = addr + len;
1353 return addr;
1354diff -urNp linux-2.6.32.43/arch/ia64/kernel/topology.c linux-2.6.32.43/arch/ia64/kernel/topology.c
1355--- linux-2.6.32.43/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1356+++ linux-2.6.32.43/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1357@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1358 return ret;
1359 }
1360
1361-static struct sysfs_ops cache_sysfs_ops = {
1362+static const struct sysfs_ops cache_sysfs_ops = {
1363 .show = cache_show
1364 };
1365
1366diff -urNp linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S
1367--- linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1368+++ linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1369@@ -190,7 +190,7 @@ SECTIONS
1370 /* Per-cpu data: */
1371 . = ALIGN(PERCPU_PAGE_SIZE);
1372 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1373- __phys_per_cpu_start = __per_cpu_load;
1374+ __phys_per_cpu_start = per_cpu_load;
1375 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1376 * into percpu page size
1377 */
1378diff -urNp linux-2.6.32.43/arch/ia64/mm/fault.c linux-2.6.32.43/arch/ia64/mm/fault.c
1379--- linux-2.6.32.43/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.43/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1382 return pte_present(pte);
1383 }
1384
1385+#ifdef CONFIG_PAX_PAGEEXEC
1386+void pax_report_insns(void *pc, void *sp)
1387+{
1388+ unsigned long i;
1389+
1390+ printk(KERN_ERR "PAX: bytes at PC: ");
1391+ for (i = 0; i < 8; i++) {
1392+ unsigned int c;
1393+ if (get_user(c, (unsigned int *)pc+i))
1394+ printk(KERN_CONT "???????? ");
1395+ else
1396+ printk(KERN_CONT "%08x ", c);
1397+ }
1398+ printk("\n");
1399+}
1400+#endif
1401+
1402 void __kprobes
1403 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1404 {
1405@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1406 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1407 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1408
1409- if ((vma->vm_flags & mask) != mask)
1410+ if ((vma->vm_flags & mask) != mask) {
1411+
1412+#ifdef CONFIG_PAX_PAGEEXEC
1413+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1414+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1415+ goto bad_area;
1416+
1417+ up_read(&mm->mmap_sem);
1418+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1419+ do_group_exit(SIGKILL);
1420+ }
1421+#endif
1422+
1423 goto bad_area;
1424
1425+ }
1426+
1427 survive:
1428 /*
1429 * If for any reason at all we couldn't handle the fault, make
1430diff -urNp linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c
1431--- linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1432+++ linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1433@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1434 /* At this point: (!vmm || addr < vmm->vm_end). */
1435 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1436 return -ENOMEM;
1437- if (!vmm || (addr + len) <= vmm->vm_start)
1438+ if (check_heap_stack_gap(vmm, addr, len))
1439 return addr;
1440 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1441 }
1442diff -urNp linux-2.6.32.43/arch/ia64/mm/init.c linux-2.6.32.43/arch/ia64/mm/init.c
1443--- linux-2.6.32.43/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1444+++ linux-2.6.32.43/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1445@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1446 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1447 vma->vm_end = vma->vm_start + PAGE_SIZE;
1448 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1449+
1450+#ifdef CONFIG_PAX_PAGEEXEC
1451+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1452+ vma->vm_flags &= ~VM_EXEC;
1453+
1454+#ifdef CONFIG_PAX_MPROTECT
1455+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1456+ vma->vm_flags &= ~VM_MAYEXEC;
1457+#endif
1458+
1459+ }
1460+#endif
1461+
1462 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1463 down_write(&current->mm->mmap_sem);
1464 if (insert_vm_struct(current->mm, vma)) {
1465diff -urNp linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c
1466--- linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1467+++ linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1468@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1469 return ret;
1470 }
1471
1472-static struct dma_map_ops sn_dma_ops = {
1473+static const struct dma_map_ops sn_dma_ops = {
1474 .alloc_coherent = sn_dma_alloc_coherent,
1475 .free_coherent = sn_dma_free_coherent,
1476 .map_page = sn_dma_map_page,
1477diff -urNp linux-2.6.32.43/arch/m32r/lib/usercopy.c linux-2.6.32.43/arch/m32r/lib/usercopy.c
1478--- linux-2.6.32.43/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1479+++ linux-2.6.32.43/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1480@@ -14,6 +14,9 @@
1481 unsigned long
1482 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1483 {
1484+ if ((long)n < 0)
1485+ return n;
1486+
1487 prefetch(from);
1488 if (access_ok(VERIFY_WRITE, to, n))
1489 __copy_user(to,from,n);
1490@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1491 unsigned long
1492 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1493 {
1494+ if ((long)n < 0)
1495+ return n;
1496+
1497 prefetchw(to);
1498 if (access_ok(VERIFY_READ, from, n))
1499 __copy_user_zeroing(to,from,n);
1500diff -urNp linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c
1501--- linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1502+++ linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1503@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1504
1505 }
1506
1507-static struct platform_suspend_ops db1x_pm_ops = {
1508+static const struct platform_suspend_ops db1x_pm_ops = {
1509 .valid = suspend_valid_only_mem,
1510 .begin = db1x_pm_begin,
1511 .enter = db1x_pm_enter,
1512diff -urNp linux-2.6.32.43/arch/mips/include/asm/elf.h linux-2.6.32.43/arch/mips/include/asm/elf.h
1513--- linux-2.6.32.43/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1514+++ linux-2.6.32.43/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1515@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1516 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1517 #endif
1518
1519+#ifdef CONFIG_PAX_ASLR
1520+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1521+
1522+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1524+#endif
1525+
1526 #endif /* _ASM_ELF_H */
1527diff -urNp linux-2.6.32.43/arch/mips/include/asm/page.h linux-2.6.32.43/arch/mips/include/asm/page.h
1528--- linux-2.6.32.43/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1529+++ linux-2.6.32.43/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1530@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1531 #ifdef CONFIG_CPU_MIPS32
1532 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1533 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1534- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1535+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1536 #else
1537 typedef struct { unsigned long long pte; } pte_t;
1538 #define pte_val(x) ((x).pte)
1539diff -urNp linux-2.6.32.43/arch/mips/include/asm/system.h linux-2.6.32.43/arch/mips/include/asm/system.h
1540--- linux-2.6.32.43/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1541+++ linux-2.6.32.43/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1542@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1543 */
1544 #define __ARCH_WANT_UNLOCKED_CTXSW
1545
1546-extern unsigned long arch_align_stack(unsigned long sp);
1547+#define arch_align_stack(x) ((x) & ~0xfUL)
1548
1549 #endif /* _ASM_SYSTEM_H */
1550diff -urNp linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c
1551--- linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1552+++ linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1553@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1554 #undef ELF_ET_DYN_BASE
1555 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1556
1557+#ifdef CONFIG_PAX_ASLR
1558+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1559+
1560+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1561+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1562+#endif
1563+
1564 #include <asm/processor.h>
1565 #include <linux/module.h>
1566 #include <linux/elfcore.h>
1567diff -urNp linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c
1568--- linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1569+++ linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1570@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1571 #undef ELF_ET_DYN_BASE
1572 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1573
1574+#ifdef CONFIG_PAX_ASLR
1575+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1576+
1577+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1578+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1579+#endif
1580+
1581 #include <asm/processor.h>
1582
1583 /*
1584diff -urNp linux-2.6.32.43/arch/mips/kernel/kgdb.c linux-2.6.32.43/arch/mips/kernel/kgdb.c
1585--- linux-2.6.32.43/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1586+++ linux-2.6.32.43/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1587@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1588 return -1;
1589 }
1590
1591+/* cannot be const */
1592 struct kgdb_arch arch_kgdb_ops;
1593
1594 /*
1595diff -urNp linux-2.6.32.43/arch/mips/kernel/process.c linux-2.6.32.43/arch/mips/kernel/process.c
1596--- linux-2.6.32.43/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1597+++ linux-2.6.32.43/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1598@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1599 out:
1600 return pc;
1601 }
1602-
1603-/*
1604- * Don't forget that the stack pointer must be aligned on a 8 bytes
1605- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1606- */
1607-unsigned long arch_align_stack(unsigned long sp)
1608-{
1609- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1610- sp -= get_random_int() & ~PAGE_MASK;
1611-
1612- return sp & ALMASK;
1613-}
1614diff -urNp linux-2.6.32.43/arch/mips/kernel/syscall.c linux-2.6.32.43/arch/mips/kernel/syscall.c
1615--- linux-2.6.32.43/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1616+++ linux-2.6.32.43/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1617@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1618 do_color_align = 0;
1619 if (filp || (flags & MAP_SHARED))
1620 do_color_align = 1;
1621+
1622+#ifdef CONFIG_PAX_RANDMMAP
1623+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1624+#endif
1625+
1626 if (addr) {
1627 if (do_color_align)
1628 addr = COLOUR_ALIGN(addr, pgoff);
1629 else
1630 addr = PAGE_ALIGN(addr);
1631 vmm = find_vma(current->mm, addr);
1632- if (task_size - len >= addr &&
1633- (!vmm || addr + len <= vmm->vm_start))
1634+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1635 return addr;
1636 }
1637- addr = TASK_UNMAPPED_BASE;
1638+ addr = current->mm->mmap_base;
1639 if (do_color_align)
1640 addr = COLOUR_ALIGN(addr, pgoff);
1641 else
1642@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1643 /* At this point: (!vmm || addr < vmm->vm_end). */
1644 if (task_size - len < addr)
1645 return -ENOMEM;
1646- if (!vmm || addr + len <= vmm->vm_start)
1647+ if (check_heap_stack_gap(vmm, addr, len))
1648 return addr;
1649 addr = vmm->vm_end;
1650 if (do_color_align)
1651diff -urNp linux-2.6.32.43/arch/mips/mm/fault.c linux-2.6.32.43/arch/mips/mm/fault.c
1652--- linux-2.6.32.43/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1653+++ linux-2.6.32.43/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1654@@ -26,6 +26,23 @@
1655 #include <asm/ptrace.h>
1656 #include <asm/highmem.h> /* For VMALLOC_END */
1657
1658+#ifdef CONFIG_PAX_PAGEEXEC
1659+void pax_report_insns(void *pc, void *sp)
1660+{
1661+ unsigned long i;
1662+
1663+ printk(KERN_ERR "PAX: bytes at PC: ");
1664+ for (i = 0; i < 5; i++) {
1665+ unsigned int c;
1666+ if (get_user(c, (unsigned int *)pc+i))
1667+ printk(KERN_CONT "???????? ");
1668+ else
1669+ printk(KERN_CONT "%08x ", c);
1670+ }
1671+ printk("\n");
1672+}
1673+#endif
1674+
1675 /*
1676 * This routine handles page faults. It determines the address,
1677 * and the problem, and then passes it off to one of the appropriate
1678diff -urNp linux-2.6.32.43/arch/parisc/include/asm/elf.h linux-2.6.32.43/arch/parisc/include/asm/elf.h
1679--- linux-2.6.32.43/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1680+++ linux-2.6.32.43/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1681@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1682
1683 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1684
1685+#ifdef CONFIG_PAX_ASLR
1686+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1687+
1688+#define PAX_DELTA_MMAP_LEN 16
1689+#define PAX_DELTA_STACK_LEN 16
1690+#endif
1691+
1692 /* This yields a mask that user programs can use to figure out what
1693 instruction set this CPU supports. This could be done in user space,
1694 but it's not easy, and we've already done it here. */
1695diff -urNp linux-2.6.32.43/arch/parisc/include/asm/pgtable.h linux-2.6.32.43/arch/parisc/include/asm/pgtable.h
1696--- linux-2.6.32.43/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1697+++ linux-2.6.32.43/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1698@@ -207,6 +207,17 @@
1699 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1700 #define PAGE_COPY PAGE_EXECREAD
1701 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1702+
1703+#ifdef CONFIG_PAX_PAGEEXEC
1704+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1705+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1706+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1707+#else
1708+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1709+# define PAGE_COPY_NOEXEC PAGE_COPY
1710+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1711+#endif
1712+
1713 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1714 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1715 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1716diff -urNp linux-2.6.32.43/arch/parisc/kernel/module.c linux-2.6.32.43/arch/parisc/kernel/module.c
1717--- linux-2.6.32.43/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1718+++ linux-2.6.32.43/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1719@@ -95,16 +95,38 @@
1720
1721 /* three functions to determine where in the module core
1722 * or init pieces the location is */
1723+static inline int in_init_rx(struct module *me, void *loc)
1724+{
1725+ return (loc >= me->module_init_rx &&
1726+ loc < (me->module_init_rx + me->init_size_rx));
1727+}
1728+
1729+static inline int in_init_rw(struct module *me, void *loc)
1730+{
1731+ return (loc >= me->module_init_rw &&
1732+ loc < (me->module_init_rw + me->init_size_rw));
1733+}
1734+
1735 static inline int in_init(struct module *me, void *loc)
1736 {
1737- return (loc >= me->module_init &&
1738- loc <= (me->module_init + me->init_size));
1739+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1740+}
1741+
1742+static inline int in_core_rx(struct module *me, void *loc)
1743+{
1744+ return (loc >= me->module_core_rx &&
1745+ loc < (me->module_core_rx + me->core_size_rx));
1746+}
1747+
1748+static inline int in_core_rw(struct module *me, void *loc)
1749+{
1750+ return (loc >= me->module_core_rw &&
1751+ loc < (me->module_core_rw + me->core_size_rw));
1752 }
1753
1754 static inline int in_core(struct module *me, void *loc)
1755 {
1756- return (loc >= me->module_core &&
1757- loc <= (me->module_core + me->core_size));
1758+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1759 }
1760
1761 static inline int in_local(struct module *me, void *loc)
1762@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1763 }
1764
1765 /* align things a bit */
1766- me->core_size = ALIGN(me->core_size, 16);
1767- me->arch.got_offset = me->core_size;
1768- me->core_size += gots * sizeof(struct got_entry);
1769-
1770- me->core_size = ALIGN(me->core_size, 16);
1771- me->arch.fdesc_offset = me->core_size;
1772- me->core_size += fdescs * sizeof(Elf_Fdesc);
1773+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1774+ me->arch.got_offset = me->core_size_rw;
1775+ me->core_size_rw += gots * sizeof(struct got_entry);
1776+
1777+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1778+ me->arch.fdesc_offset = me->core_size_rw;
1779+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1780
1781 me->arch.got_max = gots;
1782 me->arch.fdesc_max = fdescs;
1783@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1784
1785 BUG_ON(value == 0);
1786
1787- got = me->module_core + me->arch.got_offset;
1788+ got = me->module_core_rw + me->arch.got_offset;
1789 for (i = 0; got[i].addr; i++)
1790 if (got[i].addr == value)
1791 goto out;
1792@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1793 #ifdef CONFIG_64BIT
1794 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1795 {
1796- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1797+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1798
1799 if (!value) {
1800 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1801@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1802
1803 /* Create new one */
1804 fdesc->addr = value;
1805- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1806+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1807 return (Elf_Addr)fdesc;
1808 }
1809 #endif /* CONFIG_64BIT */
1810@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1811
1812 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1813 end = table + sechdrs[me->arch.unwind_section].sh_size;
1814- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1815+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1816
1817 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1818 me->arch.unwind_section, table, end, gp);
1819diff -urNp linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c
1820--- linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1821+++ linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1822@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1823 /* At this point: (!vma || addr < vma->vm_end). */
1824 if (TASK_SIZE - len < addr)
1825 return -ENOMEM;
1826- if (!vma || addr + len <= vma->vm_start)
1827+ if (check_heap_stack_gap(vma, addr, len))
1828 return addr;
1829 addr = vma->vm_end;
1830 }
1831@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1832 /* At this point: (!vma || addr < vma->vm_end). */
1833 if (TASK_SIZE - len < addr)
1834 return -ENOMEM;
1835- if (!vma || addr + len <= vma->vm_start)
1836+ if (check_heap_stack_gap(vma, addr, len))
1837 return addr;
1838 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1839 if (addr < vma->vm_end) /* handle wraparound */
1840@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1841 if (flags & MAP_FIXED)
1842 return addr;
1843 if (!addr)
1844- addr = TASK_UNMAPPED_BASE;
1845+ addr = current->mm->mmap_base;
1846
1847 if (filp) {
1848 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1849diff -urNp linux-2.6.32.43/arch/parisc/kernel/traps.c linux-2.6.32.43/arch/parisc/kernel/traps.c
1850--- linux-2.6.32.43/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1851+++ linux-2.6.32.43/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1852@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1853
1854 down_read(&current->mm->mmap_sem);
1855 vma = find_vma(current->mm,regs->iaoq[0]);
1856- if (vma && (regs->iaoq[0] >= vma->vm_start)
1857- && (vma->vm_flags & VM_EXEC)) {
1858-
1859+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1860 fault_address = regs->iaoq[0];
1861 fault_space = regs->iasq[0];
1862
1863diff -urNp linux-2.6.32.43/arch/parisc/mm/fault.c linux-2.6.32.43/arch/parisc/mm/fault.c
1864--- linux-2.6.32.43/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1865+++ linux-2.6.32.43/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1866@@ -15,6 +15,7 @@
1867 #include <linux/sched.h>
1868 #include <linux/interrupt.h>
1869 #include <linux/module.h>
1870+#include <linux/unistd.h>
1871
1872 #include <asm/uaccess.h>
1873 #include <asm/traps.h>
1874@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1875 static unsigned long
1876 parisc_acctyp(unsigned long code, unsigned int inst)
1877 {
1878- if (code == 6 || code == 16)
1879+ if (code == 6 || code == 7 || code == 16)
1880 return VM_EXEC;
1881
1882 switch (inst & 0xf0000000) {
1883@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1884 }
1885 #endif
1886
1887+#ifdef CONFIG_PAX_PAGEEXEC
1888+/*
1889+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1890+ *
1891+ * returns 1 when task should be killed
1892+ * 2 when rt_sigreturn trampoline was detected
1893+ * 3 when unpatched PLT trampoline was detected
1894+ */
1895+static int pax_handle_fetch_fault(struct pt_regs *regs)
1896+{
1897+
1898+#ifdef CONFIG_PAX_EMUPLT
1899+ int err;
1900+
1901+ do { /* PaX: unpatched PLT emulation */
1902+ unsigned int bl, depwi;
1903+
1904+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1905+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1906+
1907+ if (err)
1908+ break;
1909+
1910+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1911+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1912+
1913+ err = get_user(ldw, (unsigned int *)addr);
1914+ err |= get_user(bv, (unsigned int *)(addr+4));
1915+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1916+
1917+ if (err)
1918+ break;
1919+
1920+ if (ldw == 0x0E801096U &&
1921+ bv == 0xEAC0C000U &&
1922+ ldw2 == 0x0E881095U)
1923+ {
1924+ unsigned int resolver, map;
1925+
1926+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1927+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1928+ if (err)
1929+ break;
1930+
1931+ regs->gr[20] = instruction_pointer(regs)+8;
1932+ regs->gr[21] = map;
1933+ regs->gr[22] = resolver;
1934+ regs->iaoq[0] = resolver | 3UL;
1935+ regs->iaoq[1] = regs->iaoq[0] + 4;
1936+ return 3;
1937+ }
1938+ }
1939+ } while (0);
1940+#endif
1941+
1942+#ifdef CONFIG_PAX_EMUTRAMP
1943+
1944+#ifndef CONFIG_PAX_EMUSIGRT
1945+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1946+ return 1;
1947+#endif
1948+
1949+ do { /* PaX: rt_sigreturn emulation */
1950+ unsigned int ldi1, ldi2, bel, nop;
1951+
1952+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1953+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1954+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1955+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1961+ ldi2 == 0x3414015AU &&
1962+ bel == 0xE4008200U &&
1963+ nop == 0x08000240U)
1964+ {
1965+ regs->gr[25] = (ldi1 & 2) >> 1;
1966+ regs->gr[20] = __NR_rt_sigreturn;
1967+ regs->gr[31] = regs->iaoq[1] + 16;
1968+ regs->sr[0] = regs->iasq[1];
1969+ regs->iaoq[0] = 0x100UL;
1970+ regs->iaoq[1] = regs->iaoq[0] + 4;
1971+ regs->iasq[0] = regs->sr[2];
1972+ regs->iasq[1] = regs->sr[2];
1973+ return 2;
1974+ }
1975+ } while (0);
1976+#endif
1977+
1978+ return 1;
1979+}
1980+
1981+void pax_report_insns(void *pc, void *sp)
1982+{
1983+ unsigned long i;
1984+
1985+ printk(KERN_ERR "PAX: bytes at PC: ");
1986+ for (i = 0; i < 5; i++) {
1987+ unsigned int c;
1988+ if (get_user(c, (unsigned int *)pc+i))
1989+ printk(KERN_CONT "???????? ");
1990+ else
1991+ printk(KERN_CONT "%08x ", c);
1992+ }
1993+ printk("\n");
1994+}
1995+#endif
1996+
1997 int fixup_exception(struct pt_regs *regs)
1998 {
1999 const struct exception_table_entry *fix;
2000@@ -192,8 +303,33 @@ good_area:
2001
2002 acc_type = parisc_acctyp(code,regs->iir);
2003
2004- if ((vma->vm_flags & acc_type) != acc_type)
2005+ if ((vma->vm_flags & acc_type) != acc_type) {
2006+
2007+#ifdef CONFIG_PAX_PAGEEXEC
2008+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2009+ (address & ~3UL) == instruction_pointer(regs))
2010+ {
2011+ up_read(&mm->mmap_sem);
2012+ switch (pax_handle_fetch_fault(regs)) {
2013+
2014+#ifdef CONFIG_PAX_EMUPLT
2015+ case 3:
2016+ return;
2017+#endif
2018+
2019+#ifdef CONFIG_PAX_EMUTRAMP
2020+ case 2:
2021+ return;
2022+#endif
2023+
2024+ }
2025+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2026+ do_group_exit(SIGKILL);
2027+ }
2028+#endif
2029+
2030 goto bad_area;
2031+ }
2032
2033 /*
2034 * If for any reason at all we couldn't handle the fault, make
2035diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/device.h linux-2.6.32.43/arch/powerpc/include/asm/device.h
2036--- linux-2.6.32.43/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2037+++ linux-2.6.32.43/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2038@@ -14,7 +14,7 @@ struct dev_archdata {
2039 struct device_node *of_node;
2040
2041 /* DMA operations on that device */
2042- struct dma_map_ops *dma_ops;
2043+ const struct dma_map_ops *dma_ops;
2044
2045 /*
2046 * When an iommu is in use, dma_data is used as a ptr to the base of the
2047diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h
2048--- linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2049+++ linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2050@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2051 #ifdef CONFIG_PPC64
2052 extern struct dma_map_ops dma_iommu_ops;
2053 #endif
2054-extern struct dma_map_ops dma_direct_ops;
2055+extern const struct dma_map_ops dma_direct_ops;
2056
2057-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2058+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2059 {
2060 /* We don't handle the NULL dev case for ISA for now. We could
2061 * do it via an out of line call but it is not needed for now. The
2062@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2063 return dev->archdata.dma_ops;
2064 }
2065
2066-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2067+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2068 {
2069 dev->archdata.dma_ops = ops;
2070 }
2071@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2072
2073 static inline int dma_supported(struct device *dev, u64 mask)
2074 {
2075- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2076+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2077
2078 if (unlikely(dma_ops == NULL))
2079 return 0;
2080@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2081
2082 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2083 {
2084- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2085+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2086
2087 if (unlikely(dma_ops == NULL))
2088 return -EIO;
2089@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2090 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2091 dma_addr_t *dma_handle, gfp_t flag)
2092 {
2093- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2094+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2095 void *cpu_addr;
2096
2097 BUG_ON(!dma_ops);
2098@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2099 static inline void dma_free_coherent(struct device *dev, size_t size,
2100 void *cpu_addr, dma_addr_t dma_handle)
2101 {
2102- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2103+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2104
2105 BUG_ON(!dma_ops);
2106
2107@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2108
2109 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2110 {
2111- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2112+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2113
2114 if (dma_ops->mapping_error)
2115 return dma_ops->mapping_error(dev, dma_addr);
2116diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/elf.h linux-2.6.32.43/arch/powerpc/include/asm/elf.h
2117--- linux-2.6.32.43/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2118+++ linux-2.6.32.43/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2119@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2120 the loader. We need to make sure that it is out of the way of the program
2121 that it will "exec", and that there is sufficient room for the brk. */
2122
2123-extern unsigned long randomize_et_dyn(unsigned long base);
2124-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2125+#define ELF_ET_DYN_BASE (0x20000000)
2126+
2127+#ifdef CONFIG_PAX_ASLR
2128+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2129+
2130+#ifdef __powerpc64__
2131+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2132+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2133+#else
2134+#define PAX_DELTA_MMAP_LEN 15
2135+#define PAX_DELTA_STACK_LEN 15
2136+#endif
2137+#endif
2138
2139 /*
2140 * Our registers are always unsigned longs, whether we're a 32 bit
2141@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2142 (0x7ff >> (PAGE_SHIFT - 12)) : \
2143 (0x3ffff >> (PAGE_SHIFT - 12)))
2144
2145-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2146-#define arch_randomize_brk arch_randomize_brk
2147-
2148 #endif /* __KERNEL__ */
2149
2150 /*
2151diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/iommu.h linux-2.6.32.43/arch/powerpc/include/asm/iommu.h
2152--- linux-2.6.32.43/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2153+++ linux-2.6.32.43/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2154@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2155 extern void iommu_init_early_dart(void);
2156 extern void iommu_init_early_pasemi(void);
2157
2158+/* dma-iommu.c */
2159+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2160+
2161 #ifdef CONFIG_PCI
2162 extern void pci_iommu_init(void);
2163 extern void pci_direct_iommu_init(void);
2164diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h
2165--- linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2166+++ linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2167@@ -26,6 +26,7 @@ enum km_type {
2168 KM_SOFTIRQ1,
2169 KM_PPC_SYNC_PAGE,
2170 KM_PPC_SYNC_ICACHE,
2171+ KM_CLEARPAGE,
2172 KM_TYPE_NR
2173 };
2174
2175diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/page_64.h linux-2.6.32.43/arch/powerpc/include/asm/page_64.h
2176--- linux-2.6.32.43/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2177+++ linux-2.6.32.43/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2178@@ -180,15 +180,18 @@ do { \
2179 * stack by default, so in the absense of a PT_GNU_STACK program header
2180 * we turn execute permission off.
2181 */
2182-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2183- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2184+#define VM_STACK_DEFAULT_FLAGS32 \
2185+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2186+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2187
2188 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2190
2191+#ifndef CONFIG_PAX_PAGEEXEC
2192 #define VM_STACK_DEFAULT_FLAGS \
2193 (test_thread_flag(TIF_32BIT) ? \
2194 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2195+#endif
2196
2197 #include <asm-generic/getorder.h>
2198
2199diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/page.h linux-2.6.32.43/arch/powerpc/include/asm/page.h
2200--- linux-2.6.32.43/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2201+++ linux-2.6.32.43/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2202@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2203 * and needs to be executable. This means the whole heap ends
2204 * up being executable.
2205 */
2206-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2207- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2208+#define VM_DATA_DEFAULT_FLAGS32 \
2209+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2210+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2211
2212 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2213 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2214@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2215 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2216 #endif
2217
2218+#define ktla_ktva(addr) (addr)
2219+#define ktva_ktla(addr) (addr)
2220+
2221 #ifndef __ASSEMBLY__
2222
2223 #undef STRICT_MM_TYPECHECKS
2224diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pci.h linux-2.6.32.43/arch/powerpc/include/asm/pci.h
2225--- linux-2.6.32.43/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2226+++ linux-2.6.32.43/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2227@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2228 }
2229
2230 #ifdef CONFIG_PCI
2231-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2232-extern struct dma_map_ops *get_pci_dma_ops(void);
2233+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2234+extern const struct dma_map_ops *get_pci_dma_ops(void);
2235 #else /* CONFIG_PCI */
2236 #define set_pci_dma_ops(d)
2237 #define get_pci_dma_ops() NULL
2238diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h
2239--- linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2240+++ linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2241@@ -2,6 +2,7 @@
2242 #define _ASM_POWERPC_PGTABLE_H
2243 #ifdef __KERNEL__
2244
2245+#include <linux/const.h>
2246 #ifndef __ASSEMBLY__
2247 #include <asm/processor.h> /* For TASK_SIZE */
2248 #include <asm/mmu.h>
2249diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h
2250--- linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2251+++ linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2252@@ -21,6 +21,7 @@
2253 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2254 #define _PAGE_USER 0x004 /* usermode access allowed */
2255 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2256+#define _PAGE_EXEC _PAGE_GUARDED
2257 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2258 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2259 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2260diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/reg.h linux-2.6.32.43/arch/powerpc/include/asm/reg.h
2261--- linux-2.6.32.43/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2262+++ linux-2.6.32.43/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2263@@ -191,6 +191,7 @@
2264 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2265 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2266 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2267+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2268 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2269 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2270 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2271diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h
2272--- linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2273+++ linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2274@@ -13,7 +13,7 @@
2275
2276 #include <linux/swiotlb.h>
2277
2278-extern struct dma_map_ops swiotlb_dma_ops;
2279+extern const struct dma_map_ops swiotlb_dma_ops;
2280
2281 static inline void dma_mark_clean(void *addr, size_t size) {}
2282
2283diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/system.h linux-2.6.32.43/arch/powerpc/include/asm/system.h
2284--- linux-2.6.32.43/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2285+++ linux-2.6.32.43/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2286@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2287 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2288 #endif
2289
2290-extern unsigned long arch_align_stack(unsigned long sp);
2291+#define arch_align_stack(x) ((x) & ~0xfUL)
2292
2293 /* Used in very early kernel initialization. */
2294 extern unsigned long reloc_offset(void);
2295diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h
2296--- linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2297+++ linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2298@@ -13,6 +13,8 @@
2299 #define VERIFY_READ 0
2300 #define VERIFY_WRITE 1
2301
2302+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2303+
2304 /*
2305 * The fs value determines whether argument validity checking should be
2306 * performed or not. If get_fs() == USER_DS, checking is performed, with
2307@@ -327,52 +329,6 @@ do { \
2308 extern unsigned long __copy_tofrom_user(void __user *to,
2309 const void __user *from, unsigned long size);
2310
2311-#ifndef __powerpc64__
2312-
2313-static inline unsigned long copy_from_user(void *to,
2314- const void __user *from, unsigned long n)
2315-{
2316- unsigned long over;
2317-
2318- if (access_ok(VERIFY_READ, from, n))
2319- return __copy_tofrom_user((__force void __user *)to, from, n);
2320- if ((unsigned long)from < TASK_SIZE) {
2321- over = (unsigned long)from + n - TASK_SIZE;
2322- return __copy_tofrom_user((__force void __user *)to, from,
2323- n - over) + over;
2324- }
2325- return n;
2326-}
2327-
2328-static inline unsigned long copy_to_user(void __user *to,
2329- const void *from, unsigned long n)
2330-{
2331- unsigned long over;
2332-
2333- if (access_ok(VERIFY_WRITE, to, n))
2334- return __copy_tofrom_user(to, (__force void __user *)from, n);
2335- if ((unsigned long)to < TASK_SIZE) {
2336- over = (unsigned long)to + n - TASK_SIZE;
2337- return __copy_tofrom_user(to, (__force void __user *)from,
2338- n - over) + over;
2339- }
2340- return n;
2341-}
2342-
2343-#else /* __powerpc64__ */
2344-
2345-#define __copy_in_user(to, from, size) \
2346- __copy_tofrom_user((to), (from), (size))
2347-
2348-extern unsigned long copy_from_user(void *to, const void __user *from,
2349- unsigned long n);
2350-extern unsigned long copy_to_user(void __user *to, const void *from,
2351- unsigned long n);
2352-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2353- unsigned long n);
2354-
2355-#endif /* __powerpc64__ */
2356-
2357 static inline unsigned long __copy_from_user_inatomic(void *to,
2358 const void __user *from, unsigned long n)
2359 {
2360@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2361 if (ret == 0)
2362 return 0;
2363 }
2364+
2365+ if (!__builtin_constant_p(n))
2366+ check_object_size(to, n, false);
2367+
2368 return __copy_tofrom_user((__force void __user *)to, from, n);
2369 }
2370
2371@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2372 if (ret == 0)
2373 return 0;
2374 }
2375+
2376+ if (!__builtin_constant_p(n))
2377+ check_object_size(from, n, true);
2378+
2379 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2380 }
2381
2382@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2383 return __copy_to_user_inatomic(to, from, size);
2384 }
2385
2386+#ifndef __powerpc64__
2387+
2388+static inline unsigned long __must_check copy_from_user(void *to,
2389+ const void __user *from, unsigned long n)
2390+{
2391+ unsigned long over;
2392+
2393+ if ((long)n < 0)
2394+ return n;
2395+
2396+ if (access_ok(VERIFY_READ, from, n)) {
2397+ if (!__builtin_constant_p(n))
2398+ check_object_size(to, n, false);
2399+ return __copy_tofrom_user((__force void __user *)to, from, n);
2400+ }
2401+ if ((unsigned long)from < TASK_SIZE) {
2402+ over = (unsigned long)from + n - TASK_SIZE;
2403+ if (!__builtin_constant_p(n - over))
2404+ check_object_size(to, n - over, false);
2405+ return __copy_tofrom_user((__force void __user *)to, from,
2406+ n - over) + over;
2407+ }
2408+ return n;
2409+}
2410+
2411+static inline unsigned long __must_check copy_to_user(void __user *to,
2412+ const void *from, unsigned long n)
2413+{
2414+ unsigned long over;
2415+
2416+ if ((long)n < 0)
2417+ return n;
2418+
2419+ if (access_ok(VERIFY_WRITE, to, n)) {
2420+ if (!__builtin_constant_p(n))
2421+ check_object_size(from, n, true);
2422+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2423+ }
2424+ if ((unsigned long)to < TASK_SIZE) {
2425+ over = (unsigned long)to + n - TASK_SIZE;
2426+ if (!__builtin_constant_p(n))
2427+ check_object_size(from, n - over, true);
2428+ return __copy_tofrom_user(to, (__force void __user *)from,
2429+ n - over) + over;
2430+ }
2431+ return n;
2432+}
2433+
2434+#else /* __powerpc64__ */
2435+
2436+#define __copy_in_user(to, from, size) \
2437+ __copy_tofrom_user((to), (from), (size))
2438+
2439+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2440+{
2441+ if ((long)n < 0 || n > INT_MAX)
2442+ return n;
2443+
2444+ if (!__builtin_constant_p(n))
2445+ check_object_size(to, n, false);
2446+
2447+ if (likely(access_ok(VERIFY_READ, from, n)))
2448+ n = __copy_from_user(to, from, n);
2449+ else
2450+ memset(to, 0, n);
2451+ return n;
2452+}
2453+
2454+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2455+{
2456+ if ((long)n < 0 || n > INT_MAX)
2457+ return n;
2458+
2459+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2460+ if (!__builtin_constant_p(n))
2461+ check_object_size(from, n, true);
2462+ n = __copy_to_user(to, from, n);
2463+ }
2464+ return n;
2465+}
2466+
2467+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2468+ unsigned long n);
2469+
2470+#endif /* __powerpc64__ */
2471+
2472 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2473
2474 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2475diff -urNp linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c
2476--- linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2477+++ linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2478@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2479 &cache_assoc_attr,
2480 };
2481
2482-static struct sysfs_ops cache_index_ops = {
2483+static const struct sysfs_ops cache_index_ops = {
2484 .show = cache_index_show,
2485 };
2486
2487diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma.c linux-2.6.32.43/arch/powerpc/kernel/dma.c
2488--- linux-2.6.32.43/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2489+++ linux-2.6.32.43/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2490@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2491 }
2492 #endif
2493
2494-struct dma_map_ops dma_direct_ops = {
2495+const struct dma_map_ops dma_direct_ops = {
2496 .alloc_coherent = dma_direct_alloc_coherent,
2497 .free_coherent = dma_direct_free_coherent,
2498 .map_sg = dma_direct_map_sg,
2499diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c
2500--- linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2501+++ linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2502@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2503 }
2504
2505 /* We support DMA to/from any memory page via the iommu */
2506-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2507+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2508 {
2509 struct iommu_table *tbl = get_iommu_table_base(dev);
2510
2511diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c
2512--- linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2513+++ linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2514@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2515 * map_page, and unmap_page on highmem, use normal dma_ops
2516 * for everything else.
2517 */
2518-struct dma_map_ops swiotlb_dma_ops = {
2519+const struct dma_map_ops swiotlb_dma_ops = {
2520 .alloc_coherent = dma_direct_alloc_coherent,
2521 .free_coherent = dma_direct_free_coherent,
2522 .map_sg = swiotlb_map_sg_attrs,
2523diff -urNp linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S
2524--- linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2525+++ linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2526@@ -455,6 +455,7 @@ storage_fault_common:
2527 std r14,_DAR(r1)
2528 std r15,_DSISR(r1)
2529 addi r3,r1,STACK_FRAME_OVERHEAD
2530+ bl .save_nvgprs
2531 mr r4,r14
2532 mr r5,r15
2533 ld r14,PACA_EXGEN+EX_R14(r13)
2534@@ -464,8 +465,7 @@ storage_fault_common:
2535 cmpdi r3,0
2536 bne- 1f
2537 b .ret_from_except_lite
2538-1: bl .save_nvgprs
2539- mr r5,r3
2540+1: mr r5,r3
2541 addi r3,r1,STACK_FRAME_OVERHEAD
2542 ld r4,_DAR(r1)
2543 bl .bad_page_fault
2544diff -urNp linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S
2545--- linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2546+++ linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2547@@ -818,10 +818,10 @@ handle_page_fault:
2548 11: ld r4,_DAR(r1)
2549 ld r5,_DSISR(r1)
2550 addi r3,r1,STACK_FRAME_OVERHEAD
2551+ bl .save_nvgprs
2552 bl .do_page_fault
2553 cmpdi r3,0
2554 beq+ 13f
2555- bl .save_nvgprs
2556 mr r5,r3
2557 addi r3,r1,STACK_FRAME_OVERHEAD
2558 lwz r4,_DAR(r1)
2559diff -urNp linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c
2560--- linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2561+++ linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2562@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2563 return 1;
2564 }
2565
2566-static struct dma_map_ops ibmebus_dma_ops = {
2567+static const struct dma_map_ops ibmebus_dma_ops = {
2568 .alloc_coherent = ibmebus_alloc_coherent,
2569 .free_coherent = ibmebus_free_coherent,
2570 .map_sg = ibmebus_map_sg,
2571diff -urNp linux-2.6.32.43/arch/powerpc/kernel/kgdb.c linux-2.6.32.43/arch/powerpc/kernel/kgdb.c
2572--- linux-2.6.32.43/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2573+++ linux-2.6.32.43/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2574@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2575 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2576 return 0;
2577
2578- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2579+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2580 regs->nip += 4;
2581
2582 return 1;
2583@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2584 /*
2585 * Global data
2586 */
2587-struct kgdb_arch arch_kgdb_ops = {
2588+const struct kgdb_arch arch_kgdb_ops = {
2589 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2590 };
2591
2592diff -urNp linux-2.6.32.43/arch/powerpc/kernel/module_32.c linux-2.6.32.43/arch/powerpc/kernel/module_32.c
2593--- linux-2.6.32.43/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2594+++ linux-2.6.32.43/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2595@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2596 me->arch.core_plt_section = i;
2597 }
2598 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2599- printk("Module doesn't contain .plt or .init.plt sections.\n");
2600+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2601 return -ENOEXEC;
2602 }
2603
2604@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2605
2606 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2607 /* Init, or core PLT? */
2608- if (location >= mod->module_core
2609- && location < mod->module_core + mod->core_size)
2610+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2611+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2612 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2613- else
2614+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2615+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2616 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2617+ else {
2618+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2619+ return ~0UL;
2620+ }
2621
2622 /* Find this entry, or if that fails, the next avail. entry */
2623 while (entry->jump[0]) {
2624diff -urNp linux-2.6.32.43/arch/powerpc/kernel/module.c linux-2.6.32.43/arch/powerpc/kernel/module.c
2625--- linux-2.6.32.43/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2626+++ linux-2.6.32.43/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2627@@ -31,11 +31,24 @@
2628
2629 LIST_HEAD(module_bug_list);
2630
2631+#ifdef CONFIG_PAX_KERNEXEC
2632 void *module_alloc(unsigned long size)
2633 {
2634 if (size == 0)
2635 return NULL;
2636
2637+ return vmalloc(size);
2638+}
2639+
2640+void *module_alloc_exec(unsigned long size)
2641+#else
2642+void *module_alloc(unsigned long size)
2643+#endif
2644+
2645+{
2646+ if (size == 0)
2647+ return NULL;
2648+
2649 return vmalloc_exec(size);
2650 }
2651
2652@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2653 vfree(module_region);
2654 }
2655
2656+#ifdef CONFIG_PAX_KERNEXEC
2657+void module_free_exec(struct module *mod, void *module_region)
2658+{
2659+ module_free(mod, module_region);
2660+}
2661+#endif
2662+
2663 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2664 const Elf_Shdr *sechdrs,
2665 const char *name)
2666diff -urNp linux-2.6.32.43/arch/powerpc/kernel/pci-common.c linux-2.6.32.43/arch/powerpc/kernel/pci-common.c
2667--- linux-2.6.32.43/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2668+++ linux-2.6.32.43/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2669@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2670 unsigned int ppc_pci_flags = 0;
2671
2672
2673-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2674+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2675
2676-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2677+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2678 {
2679 pci_dma_ops = dma_ops;
2680 }
2681
2682-struct dma_map_ops *get_pci_dma_ops(void)
2683+const struct dma_map_ops *get_pci_dma_ops(void)
2684 {
2685 return pci_dma_ops;
2686 }
2687diff -urNp linux-2.6.32.43/arch/powerpc/kernel/process.c linux-2.6.32.43/arch/powerpc/kernel/process.c
2688--- linux-2.6.32.43/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2689+++ linux-2.6.32.43/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2690@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2691 * Lookup NIP late so we have the best change of getting the
2692 * above info out without failing
2693 */
2694- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2695- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2696+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2697+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2698 #endif
2699 show_stack(current, (unsigned long *) regs->gpr[1]);
2700 if (!user_mode(regs))
2701@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2702 newsp = stack[0];
2703 ip = stack[STACK_FRAME_LR_SAVE];
2704 if (!firstframe || ip != lr) {
2705- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2706+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2707 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2708 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2709- printk(" (%pS)",
2710+ printk(" (%pA)",
2711 (void *)current->ret_stack[curr_frame].ret);
2712 curr_frame--;
2713 }
2714@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2715 struct pt_regs *regs = (struct pt_regs *)
2716 (sp + STACK_FRAME_OVERHEAD);
2717 lr = regs->link;
2718- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2719+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2720 regs->trap, (void *)regs->nip, (void *)lr);
2721 firstframe = 1;
2722 }
2723@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2724 }
2725
2726 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2727-
2728-unsigned long arch_align_stack(unsigned long sp)
2729-{
2730- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2731- sp -= get_random_int() & ~PAGE_MASK;
2732- return sp & ~0xf;
2733-}
2734-
2735-static inline unsigned long brk_rnd(void)
2736-{
2737- unsigned long rnd = 0;
2738-
2739- /* 8MB for 32bit, 1GB for 64bit */
2740- if (is_32bit_task())
2741- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2742- else
2743- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2744-
2745- return rnd << PAGE_SHIFT;
2746-}
2747-
2748-unsigned long arch_randomize_brk(struct mm_struct *mm)
2749-{
2750- unsigned long base = mm->brk;
2751- unsigned long ret;
2752-
2753-#ifdef CONFIG_PPC_STD_MMU_64
2754- /*
2755- * If we are using 1TB segments and we are allowed to randomise
2756- * the heap, we can put it above 1TB so it is backed by a 1TB
2757- * segment. Otherwise the heap will be in the bottom 1TB
2758- * which always uses 256MB segments and this may result in a
2759- * performance penalty.
2760- */
2761- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2762- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2763-#endif
2764-
2765- ret = PAGE_ALIGN(base + brk_rnd());
2766-
2767- if (ret < mm->brk)
2768- return mm->brk;
2769-
2770- return ret;
2771-}
2772-
2773-unsigned long randomize_et_dyn(unsigned long base)
2774-{
2775- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2776-
2777- if (ret < base)
2778- return base;
2779-
2780- return ret;
2781-}
2782diff -urNp linux-2.6.32.43/arch/powerpc/kernel/signal_32.c linux-2.6.32.43/arch/powerpc/kernel/signal_32.c
2783--- linux-2.6.32.43/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2784+++ linux-2.6.32.43/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2785@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2786 /* Save user registers on the stack */
2787 frame = &rt_sf->uc.uc_mcontext;
2788 addr = frame;
2789- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2790+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 if (save_user_regs(regs, frame, 0, 1))
2792 goto badframe;
2793 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2794diff -urNp linux-2.6.32.43/arch/powerpc/kernel/signal_64.c linux-2.6.32.43/arch/powerpc/kernel/signal_64.c
2795--- linux-2.6.32.43/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2796+++ linux-2.6.32.43/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2797@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2798 current->thread.fpscr.val = 0;
2799
2800 /* Set up to return from userspace. */
2801- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2802+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2803 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2804 } else {
2805 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2806diff -urNp linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c
2807--- linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2808+++ linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2809@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2810 if (oldlenp) {
2811 if (!error) {
2812 if (get_user(oldlen, oldlenp) ||
2813- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2814+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2815+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2816 error = -EFAULT;
2817 }
2818- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2819 }
2820 return error;
2821 }
2822diff -urNp linux-2.6.32.43/arch/powerpc/kernel/traps.c linux-2.6.32.43/arch/powerpc/kernel/traps.c
2823--- linux-2.6.32.43/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2824+++ linux-2.6.32.43/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2825@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2826 static inline void pmac_backlight_unblank(void) { }
2827 #endif
2828
2829+extern void gr_handle_kernel_exploit(void);
2830+
2831 int die(const char *str, struct pt_regs *regs, long err)
2832 {
2833 static struct {
2834@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2835 if (panic_on_oops)
2836 panic("Fatal exception");
2837
2838+ gr_handle_kernel_exploit();
2839+
2840 oops_exit();
2841 do_exit(err);
2842
2843diff -urNp linux-2.6.32.43/arch/powerpc/kernel/vdso.c linux-2.6.32.43/arch/powerpc/kernel/vdso.c
2844--- linux-2.6.32.43/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2845+++ linux-2.6.32.43/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2846@@ -36,6 +36,7 @@
2847 #include <asm/firmware.h>
2848 #include <asm/vdso.h>
2849 #include <asm/vdso_datapage.h>
2850+#include <asm/mman.h>
2851
2852 #include "setup.h"
2853
2854@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2855 vdso_base = VDSO32_MBASE;
2856 #endif
2857
2858- current->mm->context.vdso_base = 0;
2859+ current->mm->context.vdso_base = ~0UL;
2860
2861 /* vDSO has a problem and was disabled, just don't "enable" it for the
2862 * process
2863@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2864 vdso_base = get_unmapped_area(NULL, vdso_base,
2865 (vdso_pages << PAGE_SHIFT) +
2866 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2867- 0, 0);
2868+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2869 if (IS_ERR_VALUE(vdso_base)) {
2870 rc = vdso_base;
2871 goto fail_mmapsem;
2872diff -urNp linux-2.6.32.43/arch/powerpc/kernel/vio.c linux-2.6.32.43/arch/powerpc/kernel/vio.c
2873--- linux-2.6.32.43/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2874+++ linux-2.6.32.43/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2875@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2876 vio_cmo_dealloc(viodev, alloc_size);
2877 }
2878
2879-struct dma_map_ops vio_dma_mapping_ops = {
2880+static const struct dma_map_ops vio_dma_mapping_ops = {
2881 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2882 .free_coherent = vio_dma_iommu_free_coherent,
2883 .map_sg = vio_dma_iommu_map_sg,
2884 .unmap_sg = vio_dma_iommu_unmap_sg,
2885+ .dma_supported = dma_iommu_dma_supported,
2886 .map_page = vio_dma_iommu_map_page,
2887 .unmap_page = vio_dma_iommu_unmap_page,
2888
2889@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2890
2891 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2892 {
2893- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2894 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2895 }
2896
2897diff -urNp linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c
2898--- linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2899+++ linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2900@@ -9,22 +9,6 @@
2901 #include <linux/module.h>
2902 #include <asm/uaccess.h>
2903
2904-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2905-{
2906- if (likely(access_ok(VERIFY_READ, from, n)))
2907- n = __copy_from_user(to, from, n);
2908- else
2909- memset(to, 0, n);
2910- return n;
2911-}
2912-
2913-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2914-{
2915- if (likely(access_ok(VERIFY_WRITE, to, n)))
2916- n = __copy_to_user(to, from, n);
2917- return n;
2918-}
2919-
2920 unsigned long copy_in_user(void __user *to, const void __user *from,
2921 unsigned long n)
2922 {
2923@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2924 return n;
2925 }
2926
2927-EXPORT_SYMBOL(copy_from_user);
2928-EXPORT_SYMBOL(copy_to_user);
2929 EXPORT_SYMBOL(copy_in_user);
2930
2931diff -urNp linux-2.6.32.43/arch/powerpc/mm/fault.c linux-2.6.32.43/arch/powerpc/mm/fault.c
2932--- linux-2.6.32.43/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2933+++ linux-2.6.32.43/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2934@@ -30,6 +30,10 @@
2935 #include <linux/kprobes.h>
2936 #include <linux/kdebug.h>
2937 #include <linux/perf_event.h>
2938+#include <linux/slab.h>
2939+#include <linux/pagemap.h>
2940+#include <linux/compiler.h>
2941+#include <linux/unistd.h>
2942
2943 #include <asm/firmware.h>
2944 #include <asm/page.h>
2945@@ -40,6 +44,7 @@
2946 #include <asm/uaccess.h>
2947 #include <asm/tlbflush.h>
2948 #include <asm/siginfo.h>
2949+#include <asm/ptrace.h>
2950
2951
2952 #ifdef CONFIG_KPROBES
2953@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2954 }
2955 #endif
2956
2957+#ifdef CONFIG_PAX_PAGEEXEC
2958+/*
2959+ * PaX: decide what to do with offenders (regs->nip = fault address)
2960+ *
2961+ * returns 1 when task should be killed
2962+ */
2963+static int pax_handle_fetch_fault(struct pt_regs *regs)
2964+{
2965+ return 1;
2966+}
2967+
2968+void pax_report_insns(void *pc, void *sp)
2969+{
2970+ unsigned long i;
2971+
2972+ printk(KERN_ERR "PAX: bytes at PC: ");
2973+ for (i = 0; i < 5; i++) {
2974+ unsigned int c;
2975+ if (get_user(c, (unsigned int __user *)pc+i))
2976+ printk(KERN_CONT "???????? ");
2977+ else
2978+ printk(KERN_CONT "%08x ", c);
2979+ }
2980+ printk("\n");
2981+}
2982+#endif
2983+
2984 /*
2985 * Check whether the instruction at regs->nip is a store using
2986 * an update addressing form which will update r1.
2987@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2988 * indicate errors in DSISR but can validly be set in SRR1.
2989 */
2990 if (trap == 0x400)
2991- error_code &= 0x48200000;
2992+ error_code &= 0x58200000;
2993 else
2994 is_write = error_code & DSISR_ISSTORE;
2995 #else
2996@@ -250,7 +282,7 @@ good_area:
2997 * "undefined". Of those that can be set, this is the only
2998 * one which seems bad.
2999 */
3000- if (error_code & 0x10000000)
3001+ if (error_code & DSISR_GUARDED)
3002 /* Guarded storage error. */
3003 goto bad_area;
3004 #endif /* CONFIG_8xx */
3005@@ -265,7 +297,7 @@ good_area:
3006 * processors use the same I/D cache coherency mechanism
3007 * as embedded.
3008 */
3009- if (error_code & DSISR_PROTFAULT)
3010+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3011 goto bad_area;
3012 #endif /* CONFIG_PPC_STD_MMU */
3013
3014@@ -335,6 +367,23 @@ bad_area:
3015 bad_area_nosemaphore:
3016 /* User mode accesses cause a SIGSEGV */
3017 if (user_mode(regs)) {
3018+
3019+#ifdef CONFIG_PAX_PAGEEXEC
3020+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3021+#ifdef CONFIG_PPC_STD_MMU
3022+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3023+#else
3024+ if (is_exec && regs->nip == address) {
3025+#endif
3026+ switch (pax_handle_fetch_fault(regs)) {
3027+ }
3028+
3029+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3030+ do_group_exit(SIGKILL);
3031+ }
3032+ }
3033+#endif
3034+
3035 _exception(SIGSEGV, regs, code, address);
3036 return 0;
3037 }
3038diff -urNp linux-2.6.32.43/arch/powerpc/mm/mmap_64.c linux-2.6.32.43/arch/powerpc/mm/mmap_64.c
3039--- linux-2.6.32.43/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3040+++ linux-2.6.32.43/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3041@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3042 */
3043 if (mmap_is_legacy()) {
3044 mm->mmap_base = TASK_UNMAPPED_BASE;
3045+
3046+#ifdef CONFIG_PAX_RANDMMAP
3047+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3048+ mm->mmap_base += mm->delta_mmap;
3049+#endif
3050+
3051 mm->get_unmapped_area = arch_get_unmapped_area;
3052 mm->unmap_area = arch_unmap_area;
3053 } else {
3054 mm->mmap_base = mmap_base();
3055+
3056+#ifdef CONFIG_PAX_RANDMMAP
3057+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3058+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3059+#endif
3060+
3061 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3062 mm->unmap_area = arch_unmap_area_topdown;
3063 }
3064diff -urNp linux-2.6.32.43/arch/powerpc/mm/slice.c linux-2.6.32.43/arch/powerpc/mm/slice.c
3065--- linux-2.6.32.43/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3066+++ linux-2.6.32.43/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3067@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3068 if ((mm->task_size - len) < addr)
3069 return 0;
3070 vma = find_vma(mm, addr);
3071- return (!vma || (addr + len) <= vma->vm_start);
3072+ return check_heap_stack_gap(vma, addr, len);
3073 }
3074
3075 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3076@@ -256,7 +256,7 @@ full_search:
3077 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3078 continue;
3079 }
3080- if (!vma || addr + len <= vma->vm_start) {
3081+ if (check_heap_stack_gap(vma, addr, len)) {
3082 /*
3083 * Remember the place where we stopped the search:
3084 */
3085@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3086 }
3087 }
3088
3089- addr = mm->mmap_base;
3090- while (addr > len) {
3091+ if (mm->mmap_base < len)
3092+ addr = -ENOMEM;
3093+ else
3094+ addr = mm->mmap_base - len;
3095+
3096+ while (!IS_ERR_VALUE(addr)) {
3097 /* Go down by chunk size */
3098- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3099+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3100
3101 /* Check for hit with different page size */
3102 mask = slice_range_to_mask(addr, len);
3103@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3104 * return with success:
3105 */
3106 vma = find_vma(mm, addr);
3107- if (!vma || (addr + len) <= vma->vm_start) {
3108+ if (check_heap_stack_gap(vma, addr, len)) {
3109 /* remember the address as a hint for next time */
3110 if (use_cache)
3111 mm->free_area_cache = addr;
3112@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3113 mm->cached_hole_size = vma->vm_start - addr;
3114
3115 /* try just below the current vma->vm_start */
3116- addr = vma->vm_start;
3117+ addr = skip_heap_stack_gap(vma, len);
3118 }
3119
3120 /*
3121@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3122 if (fixed && addr > (mm->task_size - len))
3123 return -EINVAL;
3124
3125+#ifdef CONFIG_PAX_RANDMMAP
3126+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3127+ addr = 0;
3128+#endif
3129+
3130 /* If hint, make sure it matches our alignment restrictions */
3131 if (!fixed && addr) {
3132 addr = _ALIGN_UP(addr, 1ul << pshift);
3133diff -urNp linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c
3134--- linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3135+++ linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3136@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3137 lite5200_pm_target_state = PM_SUSPEND_ON;
3138 }
3139
3140-static struct platform_suspend_ops lite5200_pm_ops = {
3141+static const struct platform_suspend_ops lite5200_pm_ops = {
3142 .valid = lite5200_pm_valid,
3143 .begin = lite5200_pm_begin,
3144 .prepare = lite5200_pm_prepare,
3145diff -urNp linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3146--- linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3147+++ linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3148@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3149 iounmap(mbar);
3150 }
3151
3152-static struct platform_suspend_ops mpc52xx_pm_ops = {
3153+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3154 .valid = mpc52xx_pm_valid,
3155 .prepare = mpc52xx_pm_prepare,
3156 .enter = mpc52xx_pm_enter,
3157diff -urNp linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c
3158--- linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3159+++ linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3160@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3161 return ret;
3162 }
3163
3164-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3165+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3166 .valid = mpc83xx_suspend_valid,
3167 .begin = mpc83xx_suspend_begin,
3168 .enter = mpc83xx_suspend_enter,
3169diff -urNp linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c
3170--- linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3171+++ linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3172@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3173
3174 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3175
3176-struct dma_map_ops dma_iommu_fixed_ops = {
3177+const struct dma_map_ops dma_iommu_fixed_ops = {
3178 .alloc_coherent = dma_fixed_alloc_coherent,
3179 .free_coherent = dma_fixed_free_coherent,
3180 .map_sg = dma_fixed_map_sg,
3181diff -urNp linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c
3182--- linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3183+++ linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3184@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3185 return mask >= DMA_BIT_MASK(32);
3186 }
3187
3188-static struct dma_map_ops ps3_sb_dma_ops = {
3189+static const struct dma_map_ops ps3_sb_dma_ops = {
3190 .alloc_coherent = ps3_alloc_coherent,
3191 .free_coherent = ps3_free_coherent,
3192 .map_sg = ps3_sb_map_sg,
3193@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3194 .unmap_page = ps3_unmap_page,
3195 };
3196
3197-static struct dma_map_ops ps3_ioc0_dma_ops = {
3198+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3199 .alloc_coherent = ps3_alloc_coherent,
3200 .free_coherent = ps3_free_coherent,
3201 .map_sg = ps3_ioc0_map_sg,
3202diff -urNp linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig
3203--- linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3204+++ linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3205@@ -2,6 +2,8 @@ config PPC_PSERIES
3206 depends on PPC64 && PPC_BOOK3S
3207 bool "IBM pSeries & new (POWER5-based) iSeries"
3208 select MPIC
3209+ select PCI_MSI
3210+ select XICS
3211 select PPC_I8259
3212 select PPC_RTAS
3213 select RTAS_ERROR_LOGGING
3214diff -urNp linux-2.6.32.43/arch/s390/include/asm/elf.h linux-2.6.32.43/arch/s390/include/asm/elf.h
3215--- linux-2.6.32.43/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3216+++ linux-2.6.32.43/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3217@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3218 that it will "exec", and that there is sufficient room for the brk. */
3219 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3220
3221+#ifdef CONFIG_PAX_ASLR
3222+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3223+
3224+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3225+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3226+#endif
3227+
3228 /* This yields a mask that user programs can use to figure out what
3229 instruction set this CPU supports. */
3230
3231diff -urNp linux-2.6.32.43/arch/s390/include/asm/setup.h linux-2.6.32.43/arch/s390/include/asm/setup.h
3232--- linux-2.6.32.43/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3233+++ linux-2.6.32.43/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3234@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3235 void detect_memory_layout(struct mem_chunk chunk[]);
3236
3237 #ifdef CONFIG_S390_SWITCH_AMODE
3238-extern unsigned int switch_amode;
3239+#define switch_amode (1)
3240 #else
3241 #define switch_amode (0)
3242 #endif
3243
3244 #ifdef CONFIG_S390_EXEC_PROTECT
3245-extern unsigned int s390_noexec;
3246+#define s390_noexec (1)
3247 #else
3248 #define s390_noexec (0)
3249 #endif
3250diff -urNp linux-2.6.32.43/arch/s390/include/asm/uaccess.h linux-2.6.32.43/arch/s390/include/asm/uaccess.h
3251--- linux-2.6.32.43/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3252+++ linux-2.6.32.43/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3253@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3254 copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 might_fault();
3257+
3258+ if ((long)n < 0)
3259+ return n;
3260+
3261 if (access_ok(VERIFY_WRITE, to, n))
3262 n = __copy_to_user(to, from, n);
3263 return n;
3264@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3265 static inline unsigned long __must_check
3266 __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268+ if ((long)n < 0)
3269+ return n;
3270+
3271 if (__builtin_constant_p(n) && (n <= 256))
3272 return uaccess.copy_from_user_small(n, from, to);
3273 else
3274@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3275 copy_from_user(void *to, const void __user *from, unsigned long n)
3276 {
3277 might_fault();
3278+
3279+ if ((long)n < 0)
3280+ return n;
3281+
3282 if (access_ok(VERIFY_READ, from, n))
3283 n = __copy_from_user(to, from, n);
3284 else
3285diff -urNp linux-2.6.32.43/arch/s390/Kconfig linux-2.6.32.43/arch/s390/Kconfig
3286--- linux-2.6.32.43/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3287+++ linux-2.6.32.43/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3288@@ -194,28 +194,26 @@ config AUDIT_ARCH
3289
3290 config S390_SWITCH_AMODE
3291 bool "Switch kernel/user addressing modes"
3292+ default y
3293 help
3294 This option allows to switch the addressing modes of kernel and user
3295- space. The kernel parameter switch_amode=on will enable this feature,
3296- default is disabled. Enabling this (via kernel parameter) on machines
3297- earlier than IBM System z9-109 EC/BC will reduce system performance.
3298+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3299+ will reduce system performance.
3300
3301 Note that this option will also be selected by selecting the execute
3302- protection option below. Enabling the execute protection via the
3303- noexec kernel parameter will also switch the addressing modes,
3304- independent of the switch_amode kernel parameter.
3305+ protection option below. Enabling the execute protection will also
3306+ switch the addressing modes, independent of this option.
3307
3308
3309 config S390_EXEC_PROTECT
3310 bool "Data execute protection"
3311+ default y
3312 select S390_SWITCH_AMODE
3313 help
3314 This option allows to enable a buffer overflow protection for user
3315 space programs and it also selects the addressing mode option above.
3316- The kernel parameter noexec=on will enable this feature and also
3317- switch the addressing modes, default is disabled. Enabling this (via
3318- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3319- will reduce system performance.
3320+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3321+ reduce system performance.
3322
3323 comment "Code generation options"
3324
3325diff -urNp linux-2.6.32.43/arch/s390/kernel/module.c linux-2.6.32.43/arch/s390/kernel/module.c
3326--- linux-2.6.32.43/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3327+++ linux-2.6.32.43/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3328@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3329
3330 /* Increase core size by size of got & plt and set start
3331 offsets for got and plt. */
3332- me->core_size = ALIGN(me->core_size, 4);
3333- me->arch.got_offset = me->core_size;
3334- me->core_size += me->arch.got_size;
3335- me->arch.plt_offset = me->core_size;
3336- me->core_size += me->arch.plt_size;
3337+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3338+ me->arch.got_offset = me->core_size_rw;
3339+ me->core_size_rw += me->arch.got_size;
3340+ me->arch.plt_offset = me->core_size_rx;
3341+ me->core_size_rx += me->arch.plt_size;
3342 return 0;
3343 }
3344
3345@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3346 if (info->got_initialized == 0) {
3347 Elf_Addr *gotent;
3348
3349- gotent = me->module_core + me->arch.got_offset +
3350+ gotent = me->module_core_rw + me->arch.got_offset +
3351 info->got_offset;
3352 *gotent = val;
3353 info->got_initialized = 1;
3354@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3355 else if (r_type == R_390_GOTENT ||
3356 r_type == R_390_GOTPLTENT)
3357 *(unsigned int *) loc =
3358- (val + (Elf_Addr) me->module_core - loc) >> 1;
3359+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3360 else if (r_type == R_390_GOT64 ||
3361 r_type == R_390_GOTPLT64)
3362 *(unsigned long *) loc = val;
3363@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3364 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3365 if (info->plt_initialized == 0) {
3366 unsigned int *ip;
3367- ip = me->module_core + me->arch.plt_offset +
3368+ ip = me->module_core_rx + me->arch.plt_offset +
3369 info->plt_offset;
3370 #ifndef CONFIG_64BIT
3371 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3372@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3373 val - loc + 0xffffUL < 0x1ffffeUL) ||
3374 (r_type == R_390_PLT32DBL &&
3375 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3376- val = (Elf_Addr) me->module_core +
3377+ val = (Elf_Addr) me->module_core_rx +
3378 me->arch.plt_offset +
3379 info->plt_offset;
3380 val += rela->r_addend - loc;
3381@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3382 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3383 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3384 val = val + rela->r_addend -
3385- ((Elf_Addr) me->module_core + me->arch.got_offset);
3386+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3387 if (r_type == R_390_GOTOFF16)
3388 *(unsigned short *) loc = val;
3389 else if (r_type == R_390_GOTOFF32)
3390@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3391 break;
3392 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3393 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3394- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3395+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3396 rela->r_addend - loc;
3397 if (r_type == R_390_GOTPC)
3398 *(unsigned int *) loc = val;
3399diff -urNp linux-2.6.32.43/arch/s390/kernel/setup.c linux-2.6.32.43/arch/s390/kernel/setup.c
3400--- linux-2.6.32.43/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3401+++ linux-2.6.32.43/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3402@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3403 early_param("mem", early_parse_mem);
3404
3405 #ifdef CONFIG_S390_SWITCH_AMODE
3406-unsigned int switch_amode = 0;
3407-EXPORT_SYMBOL_GPL(switch_amode);
3408-
3409 static int set_amode_and_uaccess(unsigned long user_amode,
3410 unsigned long user32_amode)
3411 {
3412@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3413 return 0;
3414 }
3415 }
3416-
3417-/*
3418- * Switch kernel/user addressing modes?
3419- */
3420-static int __init early_parse_switch_amode(char *p)
3421-{
3422- switch_amode = 1;
3423- return 0;
3424-}
3425-early_param("switch_amode", early_parse_switch_amode);
3426-
3427 #else /* CONFIG_S390_SWITCH_AMODE */
3428 static inline int set_amode_and_uaccess(unsigned long user_amode,
3429 unsigned long user32_amode)
3430@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3431 }
3432 #endif /* CONFIG_S390_SWITCH_AMODE */
3433
3434-#ifdef CONFIG_S390_EXEC_PROTECT
3435-unsigned int s390_noexec = 0;
3436-EXPORT_SYMBOL_GPL(s390_noexec);
3437-
3438-/*
3439- * Enable execute protection?
3440- */
3441-static int __init early_parse_noexec(char *p)
3442-{
3443- if (!strncmp(p, "off", 3))
3444- return 0;
3445- switch_amode = 1;
3446- s390_noexec = 1;
3447- return 0;
3448-}
3449-early_param("noexec", early_parse_noexec);
3450-#endif /* CONFIG_S390_EXEC_PROTECT */
3451-
3452 static void setup_addressing_mode(void)
3453 {
3454 if (s390_noexec) {
3455diff -urNp linux-2.6.32.43/arch/s390/mm/mmap.c linux-2.6.32.43/arch/s390/mm/mmap.c
3456--- linux-2.6.32.43/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3457+++ linux-2.6.32.43/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3458@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3459 */
3460 if (mmap_is_legacy()) {
3461 mm->mmap_base = TASK_UNMAPPED_BASE;
3462+
3463+#ifdef CONFIG_PAX_RANDMMAP
3464+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3465+ mm->mmap_base += mm->delta_mmap;
3466+#endif
3467+
3468 mm->get_unmapped_area = arch_get_unmapped_area;
3469 mm->unmap_area = arch_unmap_area;
3470 } else {
3471 mm->mmap_base = mmap_base();
3472+
3473+#ifdef CONFIG_PAX_RANDMMAP
3474+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3475+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3476+#endif
3477+
3478 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3479 mm->unmap_area = arch_unmap_area_topdown;
3480 }
3481@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3482 */
3483 if (mmap_is_legacy()) {
3484 mm->mmap_base = TASK_UNMAPPED_BASE;
3485+
3486+#ifdef CONFIG_PAX_RANDMMAP
3487+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3488+ mm->mmap_base += mm->delta_mmap;
3489+#endif
3490+
3491 mm->get_unmapped_area = s390_get_unmapped_area;
3492 mm->unmap_area = arch_unmap_area;
3493 } else {
3494 mm->mmap_base = mmap_base();
3495+
3496+#ifdef CONFIG_PAX_RANDMMAP
3497+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3498+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3499+#endif
3500+
3501 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3502 mm->unmap_area = arch_unmap_area_topdown;
3503 }
3504diff -urNp linux-2.6.32.43/arch/score/include/asm/system.h linux-2.6.32.43/arch/score/include/asm/system.h
3505--- linux-2.6.32.43/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3506+++ linux-2.6.32.43/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3507@@ -17,7 +17,7 @@ do { \
3508 #define finish_arch_switch(prev) do {} while (0)
3509
3510 typedef void (*vi_handler_t)(void);
3511-extern unsigned long arch_align_stack(unsigned long sp);
3512+#define arch_align_stack(x) (x)
3513
3514 #define mb() barrier()
3515 #define rmb() barrier()
3516diff -urNp linux-2.6.32.43/arch/score/kernel/process.c linux-2.6.32.43/arch/score/kernel/process.c
3517--- linux-2.6.32.43/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3518+++ linux-2.6.32.43/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3519@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3520
3521 return task_pt_regs(task)->cp0_epc;
3522 }
3523-
3524-unsigned long arch_align_stack(unsigned long sp)
3525-{
3526- return sp;
3527-}
3528diff -urNp linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c
3529--- linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3530+++ linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3531@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3532 return 0;
3533 }
3534
3535-static struct platform_suspend_ops hp6x0_pm_ops = {
3536+static const struct platform_suspend_ops hp6x0_pm_ops = {
3537 .enter = hp6x0_pm_enter,
3538 .valid = suspend_valid_only_mem,
3539 };
3540diff -urNp linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c
3541--- linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3542+++ linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3543@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3544 NULL,
3545 };
3546
3547-static struct sysfs_ops sq_sysfs_ops = {
3548+static const struct sysfs_ops sq_sysfs_ops = {
3549 .show = sq_sysfs_show,
3550 .store = sq_sysfs_store,
3551 };
3552diff -urNp linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c
3553--- linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3554+++ linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3555@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3556 return 0;
3557 }
3558
3559-static struct platform_suspend_ops sh_pm_ops = {
3560+static const struct platform_suspend_ops sh_pm_ops = {
3561 .enter = sh_pm_enter,
3562 .valid = suspend_valid_only_mem,
3563 };
3564diff -urNp linux-2.6.32.43/arch/sh/kernel/kgdb.c linux-2.6.32.43/arch/sh/kernel/kgdb.c
3565--- linux-2.6.32.43/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3566+++ linux-2.6.32.43/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3567@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3568 {
3569 }
3570
3571-struct kgdb_arch arch_kgdb_ops = {
3572+const struct kgdb_arch arch_kgdb_ops = {
3573 /* Breakpoint instruction: trapa #0x3c */
3574 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3575 .gdb_bpt_instr = { 0x3c, 0xc3 },
3576diff -urNp linux-2.6.32.43/arch/sh/mm/mmap.c linux-2.6.32.43/arch/sh/mm/mmap.c
3577--- linux-2.6.32.43/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3578+++ linux-2.6.32.43/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3579@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3580 addr = PAGE_ALIGN(addr);
3581
3582 vma = find_vma(mm, addr);
3583- if (TASK_SIZE - len >= addr &&
3584- (!vma || addr + len <= vma->vm_start))
3585+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3586 return addr;
3587 }
3588
3589@@ -106,7 +105,7 @@ full_search:
3590 }
3591 return -ENOMEM;
3592 }
3593- if (likely(!vma || addr + len <= vma->vm_start)) {
3594+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3595 /*
3596 * Remember the place where we stopped the search:
3597 */
3598@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3599 addr = PAGE_ALIGN(addr);
3600
3601 vma = find_vma(mm, addr);
3602- if (TASK_SIZE - len >= addr &&
3603- (!vma || addr + len <= vma->vm_start))
3604+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3605 return addr;
3606 }
3607
3608@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3609 /* make sure it can fit in the remaining address space */
3610 if (likely(addr > len)) {
3611 vma = find_vma(mm, addr-len);
3612- if (!vma || addr <= vma->vm_start) {
3613+ if (check_heap_stack_gap(vma, addr - len, len)) {
3614 /* remember the address as a hint for next time */
3615 return (mm->free_area_cache = addr-len);
3616 }
3617@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3618 if (unlikely(mm->mmap_base < len))
3619 goto bottomup;
3620
3621- addr = mm->mmap_base-len;
3622- if (do_colour_align)
3623- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3624+ addr = mm->mmap_base - len;
3625
3626 do {
3627+ if (do_colour_align)
3628+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3629 /*
3630 * Lookup failure means no vma is above this address,
3631 * else if new region fits below vma->vm_start,
3632 * return with success:
3633 */
3634 vma = find_vma(mm, addr);
3635- if (likely(!vma || addr+len <= vma->vm_start)) {
3636+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3637 /* remember the address as a hint for next time */
3638 return (mm->free_area_cache = addr);
3639 }
3640@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3641 mm->cached_hole_size = vma->vm_start - addr;
3642
3643 /* try just below the current vma->vm_start */
3644- addr = vma->vm_start-len;
3645- if (do_colour_align)
3646- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3647- } while (likely(len < vma->vm_start));
3648+ addr = skip_heap_stack_gap(vma, len);
3649+ } while (!IS_ERR_VALUE(addr));
3650
3651 bottomup:
3652 /*
3653diff -urNp linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h
3654--- linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3655+++ linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h 2011-07-13 22:22:56.000000000 -0400
3656@@ -14,18 +14,40 @@
3657 #define ATOMIC64_INIT(i) { (i) }
3658
3659 #define atomic_read(v) ((v)->counter)
3660+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3661+{
3662+ return v->counter;
3663+}
3664 #define atomic64_read(v) ((v)->counter)
3665+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3666+{
3667+ return v->counter;
3668+}
3669
3670 #define atomic_set(v, i) (((v)->counter) = i)
3671+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3672+{
3673+ v->counter = i;
3674+}
3675 #define atomic64_set(v, i) (((v)->counter) = i)
3676+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3677+{
3678+ v->counter = i;
3679+}
3680
3681 extern void atomic_add(int, atomic_t *);
3682+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3683 extern void atomic64_add(long, atomic64_t *);
3684+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3685 extern void atomic_sub(int, atomic_t *);
3686+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3687 extern void atomic64_sub(long, atomic64_t *);
3688+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3689
3690 extern int atomic_add_ret(int, atomic_t *);
3691+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3692 extern long atomic64_add_ret(long, atomic64_t *);
3693+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3694 extern int atomic_sub_ret(int, atomic_t *);
3695 extern long atomic64_sub_ret(long, atomic64_t *);
3696
3697@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3698 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3699
3700 #define atomic_inc_return(v) atomic_add_ret(1, v)
3701+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3702+{
3703+ return atomic_add_ret_unchecked(1, v);
3704+}
3705 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3706+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3707+{
3708+ return atomic64_add_ret_unchecked(1, v);
3709+}
3710
3711 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3712 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3713
3714 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3715+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3716+{
3717+ return atomic_add_ret_unchecked(i, v);
3718+}
3719 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3720+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3721+{
3722+ return atomic64_add_ret_unchecked(i, v);
3723+}
3724
3725 /*
3726 * atomic_inc_and_test - increment and test
3727@@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
3728 * other cases.
3729 */
3730 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3731+#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3732 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3733
3734 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3735@@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
3736 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3737
3738 #define atomic_inc(v) atomic_add(1, v)
3739+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3740+{
3741+ atomic_add_unchecked(1, v);
3742+}
3743 #define atomic64_inc(v) atomic64_add(1, v)
3744+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3745+{
3746+ atomic64_add_unchecked(1, v);
3747+}
3748
3749 #define atomic_dec(v) atomic_sub(1, v)
3750+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3751+{
3752+ atomic_sub_unchecked(1, v);
3753+}
3754 #define atomic64_dec(v) atomic64_sub(1, v)
3755+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3756+{
3757+ atomic64_sub_unchecked(1, v);
3758+}
3759
3760 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3761 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3762
3763 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3764+#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3765 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3766+#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3767
3768 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3769 {
3770- int c, old;
3771+ int c, old, new;
3772 c = atomic_read(v);
3773 for (;;) {
3774- if (unlikely(c == (u)))
3775+ if (unlikely(c == u))
3776 break;
3777- old = atomic_cmpxchg((v), c, c + (a));
3778+
3779+ asm volatile("addcc %2, %0, %0\n"
3780+
3781+#ifdef CONFIG_PAX_REFCOUNT
3782+ "tvs %%icc, 6\n"
3783+#endif
3784+
3785+ : "=r" (new)
3786+ : "0" (c), "ir" (a)
3787+ : "cc");
3788+
3789+ old = atomic_cmpxchg(v, c, new);
3790 if (likely(old == c))
3791 break;
3792 c = old;
3793 }
3794- return c != (u);
3795+ return c != u;
3796 }
3797
3798 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3799@@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3800
3801 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3802 {
3803- long c, old;
3804+ long c, old, new;
3805 c = atomic64_read(v);
3806 for (;;) {
3807- if (unlikely(c == (u)))
3808+ if (unlikely(c == u))
3809 break;
3810- old = atomic64_cmpxchg((v), c, c + (a));
3811+
3812+ asm volatile("addcc %2, %0, %0\n"
3813+
3814+#ifdef CONFIG_PAX_REFCOUNT
3815+ "tvs %%xcc, 6\n"
3816+#endif
3817+
3818+ : "=r" (new)
3819+ : "0" (c), "ir" (a)
3820+ : "cc");
3821+
3822+ old = atomic64_cmpxchg(v, c, new);
3823 if (likely(old == c))
3824 break;
3825 c = old;
3826 }
3827- return c != (u);
3828+ return c != u;
3829 }
3830
3831 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3832diff -urNp linux-2.6.32.43/arch/sparc/include/asm/cache.h linux-2.6.32.43/arch/sparc/include/asm/cache.h
3833--- linux-2.6.32.43/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3834+++ linux-2.6.32.43/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3835@@ -8,7 +8,7 @@
3836 #define _SPARC_CACHE_H
3837
3838 #define L1_CACHE_SHIFT 5
3839-#define L1_CACHE_BYTES 32
3840+#define L1_CACHE_BYTES 32UL
3841 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3842
3843 #ifdef CONFIG_SPARC32
3844diff -urNp linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h
3845--- linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3846+++ linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3847@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3848 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3849 #define dma_is_consistent(d, h) (1)
3850
3851-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3852+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3853 extern struct bus_type pci_bus_type;
3854
3855-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3856+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3857 {
3858 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3859 if (dev->bus == &pci_bus_type)
3860@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3861 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3862 dma_addr_t *dma_handle, gfp_t flag)
3863 {
3864- struct dma_map_ops *ops = get_dma_ops(dev);
3865+ const struct dma_map_ops *ops = get_dma_ops(dev);
3866 void *cpu_addr;
3867
3868 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3869@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3870 static inline void dma_free_coherent(struct device *dev, size_t size,
3871 void *cpu_addr, dma_addr_t dma_handle)
3872 {
3873- struct dma_map_ops *ops = get_dma_ops(dev);
3874+ const struct dma_map_ops *ops = get_dma_ops(dev);
3875
3876 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3877 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3878diff -urNp linux-2.6.32.43/arch/sparc/include/asm/elf_32.h linux-2.6.32.43/arch/sparc/include/asm/elf_32.h
3879--- linux-2.6.32.43/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3880+++ linux-2.6.32.43/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3881@@ -116,6 +116,13 @@ typedef struct {
3882
3883 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3884
3885+#ifdef CONFIG_PAX_ASLR
3886+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3887+
3888+#define PAX_DELTA_MMAP_LEN 16
3889+#define PAX_DELTA_STACK_LEN 16
3890+#endif
3891+
3892 /* This yields a mask that user programs can use to figure out what
3893 instruction set this cpu supports. This can NOT be done in userspace
3894 on Sparc. */
3895diff -urNp linux-2.6.32.43/arch/sparc/include/asm/elf_64.h linux-2.6.32.43/arch/sparc/include/asm/elf_64.h
3896--- linux-2.6.32.43/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3897+++ linux-2.6.32.43/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3898@@ -163,6 +163,12 @@ typedef struct {
3899 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3900 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3901
3902+#ifdef CONFIG_PAX_ASLR
3903+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3904+
3905+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3906+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3907+#endif
3908
3909 /* This yields a mask that user programs can use to figure out what
3910 instruction set this cpu supports. */
3911diff -urNp linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h
3912--- linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3913+++ linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3914@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3915 BTFIXUPDEF_INT(page_none)
3916 BTFIXUPDEF_INT(page_copy)
3917 BTFIXUPDEF_INT(page_readonly)
3918+
3919+#ifdef CONFIG_PAX_PAGEEXEC
3920+BTFIXUPDEF_INT(page_shared_noexec)
3921+BTFIXUPDEF_INT(page_copy_noexec)
3922+BTFIXUPDEF_INT(page_readonly_noexec)
3923+#endif
3924+
3925 BTFIXUPDEF_INT(page_kernel)
3926
3927 #define PMD_SHIFT SUN4C_PMD_SHIFT
3928@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3929 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3930 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3931
3932+#ifdef CONFIG_PAX_PAGEEXEC
3933+extern pgprot_t PAGE_SHARED_NOEXEC;
3934+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3935+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3936+#else
3937+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3938+# define PAGE_COPY_NOEXEC PAGE_COPY
3939+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3940+#endif
3941+
3942 extern unsigned long page_kernel;
3943
3944 #ifdef MODULE
3945diff -urNp linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h
3946--- linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3947+++ linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3948@@ -115,6 +115,13 @@
3949 SRMMU_EXEC | SRMMU_REF)
3950 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3951 SRMMU_EXEC | SRMMU_REF)
3952+
3953+#ifdef CONFIG_PAX_PAGEEXEC
3954+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3955+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3956+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3957+#endif
3958+
3959 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3960 SRMMU_DIRTY | SRMMU_REF)
3961
3962diff -urNp linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h
3963--- linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3964+++ linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3965@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3966
3967 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3968
3969-static void inline arch_read_lock(raw_rwlock_t *lock)
3970+static inline void arch_read_lock(raw_rwlock_t *lock)
3971 {
3972 unsigned long tmp1, tmp2;
3973
3974 __asm__ __volatile__ (
3975 "1: ldsw [%2], %0\n"
3976 " brlz,pn %0, 2f\n"
3977-"4: add %0, 1, %1\n"
3978+"4: addcc %0, 1, %1\n"
3979+
3980+#ifdef CONFIG_PAX_REFCOUNT
3981+" tvs %%icc, 6\n"
3982+#endif
3983+
3984 " cas [%2], %0, %1\n"
3985 " cmp %0, %1\n"
3986 " bne,pn %%icc, 1b\n"
3987@@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3988 " .previous"
3989 : "=&r" (tmp1), "=&r" (tmp2)
3990 : "r" (lock)
3991- : "memory");
3992+ : "memory", "cc");
3993 }
3994
3995 static int inline arch_read_trylock(raw_rwlock_t *lock)
3996@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3997 "1: ldsw [%2], %0\n"
3998 " brlz,a,pn %0, 2f\n"
3999 " mov 0, %0\n"
4000-" add %0, 1, %1\n"
4001+" addcc %0, 1, %1\n"
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+" tvs %%icc, 6\n"
4005+#endif
4006+
4007 " cas [%2], %0, %1\n"
4008 " cmp %0, %1\n"
4009 " bne,pn %%icc, 1b\n"
4010@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4011 return tmp1;
4012 }
4013
4014-static void inline arch_read_unlock(raw_rwlock_t *lock)
4015+static inline void arch_read_unlock(raw_rwlock_t *lock)
4016 {
4017 unsigned long tmp1, tmp2;
4018
4019 __asm__ __volatile__(
4020 "1: lduw [%2], %0\n"
4021-" sub %0, 1, %1\n"
4022+" subcc %0, 1, %1\n"
4023+
4024+#ifdef CONFIG_PAX_REFCOUNT
4025+" tvs %%icc, 6\n"
4026+#endif
4027+
4028 " cas [%2], %0, %1\n"
4029 " cmp %0, %1\n"
4030 " bne,pn %%xcc, 1b\n"
4031@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4032 : "memory");
4033 }
4034
4035-static void inline arch_write_lock(raw_rwlock_t *lock)
4036+static inline void arch_write_lock(raw_rwlock_t *lock)
4037 {
4038 unsigned long mask, tmp1, tmp2;
4039
4040@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4041 : "memory");
4042 }
4043
4044-static void inline arch_write_unlock(raw_rwlock_t *lock)
4045+static inline void arch_write_unlock(raw_rwlock_t *lock)
4046 {
4047 __asm__ __volatile__(
4048 " stw %%g0, [%0]"
4049diff -urNp linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h
4050--- linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4051+++ linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4052@@ -50,6 +50,8 @@ struct thread_info {
4053 unsigned long w_saved;
4054
4055 struct restart_block restart_block;
4056+
4057+ unsigned long lowest_stack;
4058 };
4059
4060 /*
4061diff -urNp linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h
4062--- linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4063+++ linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4064@@ -68,6 +68,8 @@ struct thread_info {
4065 struct pt_regs *kern_una_regs;
4066 unsigned int kern_una_insn;
4067
4068+ unsigned long lowest_stack;
4069+
4070 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4071 };
4072
4073diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h
4074--- linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4075+++ linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4076@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4077
4078 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4079 {
4080- if (n && __access_ok((unsigned long) to, n))
4081+ if ((long)n < 0)
4082+ return n;
4083+
4084+ if (n && __access_ok((unsigned long) to, n)) {
4085+ if (!__builtin_constant_p(n))
4086+ check_object_size(from, n, true);
4087 return __copy_user(to, (__force void __user *) from, n);
4088- else
4089+ } else
4090 return n;
4091 }
4092
4093 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4094 {
4095+ if ((long)n < 0)
4096+ return n;
4097+
4098+ if (!__builtin_constant_p(n))
4099+ check_object_size(from, n, true);
4100+
4101 return __copy_user(to, (__force void __user *) from, n);
4102 }
4103
4104 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4105 {
4106- if (n && __access_ok((unsigned long) from, n))
4107+ if ((long)n < 0)
4108+ return n;
4109+
4110+ if (n && __access_ok((unsigned long) from, n)) {
4111+ if (!__builtin_constant_p(n))
4112+ check_object_size(to, n, false);
4113 return __copy_user((__force void __user *) to, from, n);
4114- else
4115+ } else
4116 return n;
4117 }
4118
4119 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4120 {
4121+ if ((long)n < 0)
4122+ return n;
4123+
4124 return __copy_user((__force void __user *) to, from, n);
4125 }
4126
4127diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h
4128--- linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4129+++ linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4130@@ -9,6 +9,7 @@
4131 #include <linux/compiler.h>
4132 #include <linux/string.h>
4133 #include <linux/thread_info.h>
4134+#include <linux/kernel.h>
4135 #include <asm/asi.h>
4136 #include <asm/system.h>
4137 #include <asm/spitfire.h>
4138@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4139 static inline unsigned long __must_check
4140 copy_from_user(void *to, const void __user *from, unsigned long size)
4141 {
4142- unsigned long ret = ___copy_from_user(to, from, size);
4143+ unsigned long ret;
4144
4145+ if ((long)size < 0 || size > INT_MAX)
4146+ return size;
4147+
4148+ if (!__builtin_constant_p(size))
4149+ check_object_size(to, size, false);
4150+
4151+ ret = ___copy_from_user(to, from, size);
4152 if (unlikely(ret))
4153 ret = copy_from_user_fixup(to, from, size);
4154 return ret;
4155@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4156 static inline unsigned long __must_check
4157 copy_to_user(void __user *to, const void *from, unsigned long size)
4158 {
4159- unsigned long ret = ___copy_to_user(to, from, size);
4160+ unsigned long ret;
4161+
4162+ if ((long)size < 0 || size > INT_MAX)
4163+ return size;
4164+
4165+ if (!__builtin_constant_p(size))
4166+ check_object_size(from, size, true);
4167
4168+ ret = ___copy_to_user(to, from, size);
4169 if (unlikely(ret))
4170 ret = copy_to_user_fixup(to, from, size);
4171 return ret;
4172diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess.h linux-2.6.32.43/arch/sparc/include/asm/uaccess.h
4173--- linux-2.6.32.43/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4174+++ linux-2.6.32.43/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4175@@ -1,5 +1,13 @@
4176 #ifndef ___ASM_SPARC_UACCESS_H
4177 #define ___ASM_SPARC_UACCESS_H
4178+
4179+#ifdef __KERNEL__
4180+#ifndef __ASSEMBLY__
4181+#include <linux/types.h>
4182+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4183+#endif
4184+#endif
4185+
4186 #if defined(__sparc__) && defined(__arch64__)
4187 #include <asm/uaccess_64.h>
4188 #else
4189diff -urNp linux-2.6.32.43/arch/sparc/kernel/iommu.c linux-2.6.32.43/arch/sparc/kernel/iommu.c
4190--- linux-2.6.32.43/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4191+++ linux-2.6.32.43/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4192@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4193 spin_unlock_irqrestore(&iommu->lock, flags);
4194 }
4195
4196-static struct dma_map_ops sun4u_dma_ops = {
4197+static const struct dma_map_ops sun4u_dma_ops = {
4198 .alloc_coherent = dma_4u_alloc_coherent,
4199 .free_coherent = dma_4u_free_coherent,
4200 .map_page = dma_4u_map_page,
4201@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4202 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4203 };
4204
4205-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4206+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4207 EXPORT_SYMBOL(dma_ops);
4208
4209 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4210diff -urNp linux-2.6.32.43/arch/sparc/kernel/ioport.c linux-2.6.32.43/arch/sparc/kernel/ioport.c
4211--- linux-2.6.32.43/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4212+++ linux-2.6.32.43/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4213@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4214 BUG();
4215 }
4216
4217-struct dma_map_ops sbus_dma_ops = {
4218+const struct dma_map_ops sbus_dma_ops = {
4219 .alloc_coherent = sbus_alloc_coherent,
4220 .free_coherent = sbus_free_coherent,
4221 .map_page = sbus_map_page,
4222@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4223 .sync_sg_for_device = sbus_sync_sg_for_device,
4224 };
4225
4226-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4227+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4228 EXPORT_SYMBOL(dma_ops);
4229
4230 static int __init sparc_register_ioport(void)
4231@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4232 }
4233 }
4234
4235-struct dma_map_ops pci32_dma_ops = {
4236+const struct dma_map_ops pci32_dma_ops = {
4237 .alloc_coherent = pci32_alloc_coherent,
4238 .free_coherent = pci32_free_coherent,
4239 .map_page = pci32_map_page,
4240diff -urNp linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c
4241--- linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4242+++ linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4243@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4244 {
4245 }
4246
4247-struct kgdb_arch arch_kgdb_ops = {
4248+const struct kgdb_arch arch_kgdb_ops = {
4249 /* Breakpoint instruction: ta 0x7d */
4250 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4251 };
4252diff -urNp linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c
4253--- linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4254+++ linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4255@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4256 {
4257 }
4258
4259-struct kgdb_arch arch_kgdb_ops = {
4260+const struct kgdb_arch arch_kgdb_ops = {
4261 /* Breakpoint instruction: ta 0x72 */
4262 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4263 };
4264diff -urNp linux-2.6.32.43/arch/sparc/kernel/Makefile linux-2.6.32.43/arch/sparc/kernel/Makefile
4265--- linux-2.6.32.43/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4266+++ linux-2.6.32.43/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4267@@ -3,7 +3,7 @@
4268 #
4269
4270 asflags-y := -ansi
4271-ccflags-y := -Werror
4272+#ccflags-y := -Werror
4273
4274 extra-y := head_$(BITS).o
4275 extra-y += init_task.o
4276diff -urNp linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c
4277--- linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4278+++ linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4279@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4280 spin_unlock_irqrestore(&iommu->lock, flags);
4281 }
4282
4283-static struct dma_map_ops sun4v_dma_ops = {
4284+static const struct dma_map_ops sun4v_dma_ops = {
4285 .alloc_coherent = dma_4v_alloc_coherent,
4286 .free_coherent = dma_4v_free_coherent,
4287 .map_page = dma_4v_map_page,
4288diff -urNp linux-2.6.32.43/arch/sparc/kernel/process_32.c linux-2.6.32.43/arch/sparc/kernel/process_32.c
4289--- linux-2.6.32.43/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4290+++ linux-2.6.32.43/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4291@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4292 rw->ins[4], rw->ins[5],
4293 rw->ins[6],
4294 rw->ins[7]);
4295- printk("%pS\n", (void *) rw->ins[7]);
4296+ printk("%pA\n", (void *) rw->ins[7]);
4297 rw = (struct reg_window32 *) rw->ins[6];
4298 }
4299 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4300@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4301
4302 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4303 r->psr, r->pc, r->npc, r->y, print_tainted());
4304- printk("PC: <%pS>\n", (void *) r->pc);
4305+ printk("PC: <%pA>\n", (void *) r->pc);
4306 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4307 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4308 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4309 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4310 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4311 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4312- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4313+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4314
4315 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4316 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4317@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4318 rw = (struct reg_window32 *) fp;
4319 pc = rw->ins[7];
4320 printk("[%08lx : ", pc);
4321- printk("%pS ] ", (void *) pc);
4322+ printk("%pA ] ", (void *) pc);
4323 fp = rw->ins[6];
4324 } while (++count < 16);
4325 printk("\n");
4326diff -urNp linux-2.6.32.43/arch/sparc/kernel/process_64.c linux-2.6.32.43/arch/sparc/kernel/process_64.c
4327--- linux-2.6.32.43/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4328+++ linux-2.6.32.43/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4329@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4330 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4331 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4332 if (regs->tstate & TSTATE_PRIV)
4333- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4334+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4335 }
4336
4337 void show_regs(struct pt_regs *regs)
4338 {
4339 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4340 regs->tpc, regs->tnpc, regs->y, print_tainted());
4341- printk("TPC: <%pS>\n", (void *) regs->tpc);
4342+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4343 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4344 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4345 regs->u_regs[3]);
4346@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4347 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4348 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4349 regs->u_regs[15]);
4350- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4351+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4352 show_regwindow(regs);
4353 }
4354
4355@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4356 ((tp && tp->task) ? tp->task->pid : -1));
4357
4358 if (gp->tstate & TSTATE_PRIV) {
4359- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4360+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4361 (void *) gp->tpc,
4362 (void *) gp->o7,
4363 (void *) gp->i7,
4364diff -urNp linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c
4365--- linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4366+++ linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4367@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4368 if (ARCH_SUN4C && len > 0x20000000)
4369 return -ENOMEM;
4370 if (!addr)
4371- addr = TASK_UNMAPPED_BASE;
4372+ addr = current->mm->mmap_base;
4373
4374 if (flags & MAP_SHARED)
4375 addr = COLOUR_ALIGN(addr);
4376@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4377 }
4378 if (TASK_SIZE - PAGE_SIZE - len < addr)
4379 return -ENOMEM;
4380- if (!vmm || addr + len <= vmm->vm_start)
4381+ if (check_heap_stack_gap(vmm, addr, len))
4382 return addr;
4383 addr = vmm->vm_end;
4384 if (flags & MAP_SHARED)
4385diff -urNp linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c
4386--- linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4387+++ linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4388@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4389 /* We do not accept a shared mapping if it would violate
4390 * cache aliasing constraints.
4391 */
4392- if ((flags & MAP_SHARED) &&
4393+ if ((filp || (flags & MAP_SHARED)) &&
4394 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4395 return -EINVAL;
4396 return addr;
4397@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4398 if (filp || (flags & MAP_SHARED))
4399 do_color_align = 1;
4400
4401+#ifdef CONFIG_PAX_RANDMMAP
4402+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4403+#endif
4404+
4405 if (addr) {
4406 if (do_color_align)
4407 addr = COLOUR_ALIGN(addr, pgoff);
4408@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4409 addr = PAGE_ALIGN(addr);
4410
4411 vma = find_vma(mm, addr);
4412- if (task_size - len >= addr &&
4413- (!vma || addr + len <= vma->vm_start))
4414+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4415 return addr;
4416 }
4417
4418 if (len > mm->cached_hole_size) {
4419- start_addr = addr = mm->free_area_cache;
4420+ start_addr = addr = mm->free_area_cache;
4421 } else {
4422- start_addr = addr = TASK_UNMAPPED_BASE;
4423+ start_addr = addr = mm->mmap_base;
4424 mm->cached_hole_size = 0;
4425 }
4426
4427@@ -175,14 +178,14 @@ full_search:
4428 vma = find_vma(mm, VA_EXCLUDE_END);
4429 }
4430 if (unlikely(task_size < addr)) {
4431- if (start_addr != TASK_UNMAPPED_BASE) {
4432- start_addr = addr = TASK_UNMAPPED_BASE;
4433+ if (start_addr != mm->mmap_base) {
4434+ start_addr = addr = mm->mmap_base;
4435 mm->cached_hole_size = 0;
4436 goto full_search;
4437 }
4438 return -ENOMEM;
4439 }
4440- if (likely(!vma || addr + len <= vma->vm_start)) {
4441+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4442 /*
4443 * Remember the place where we stopped the search:
4444 */
4445@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4446 /* We do not accept a shared mapping if it would violate
4447 * cache aliasing constraints.
4448 */
4449- if ((flags & MAP_SHARED) &&
4450+ if ((filp || (flags & MAP_SHARED)) &&
4451 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4452 return -EINVAL;
4453 return addr;
4454@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4455 addr = PAGE_ALIGN(addr);
4456
4457 vma = find_vma(mm, addr);
4458- if (task_size - len >= addr &&
4459- (!vma || addr + len <= vma->vm_start))
4460+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4461 return addr;
4462 }
4463
4464@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4465 /* make sure it can fit in the remaining address space */
4466 if (likely(addr > len)) {
4467 vma = find_vma(mm, addr-len);
4468- if (!vma || addr <= vma->vm_start) {
4469+ if (check_heap_stack_gap(vma, addr - len, len)) {
4470 /* remember the address as a hint for next time */
4471 return (mm->free_area_cache = addr-len);
4472 }
4473@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4474 if (unlikely(mm->mmap_base < len))
4475 goto bottomup;
4476
4477- addr = mm->mmap_base-len;
4478- if (do_color_align)
4479- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4480+ addr = mm->mmap_base - len;
4481
4482 do {
4483+ if (do_color_align)
4484+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4485 /*
4486 * Lookup failure means no vma is above this address,
4487 * else if new region fits below vma->vm_start,
4488 * return with success:
4489 */
4490 vma = find_vma(mm, addr);
4491- if (likely(!vma || addr+len <= vma->vm_start)) {
4492+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4493 /* remember the address as a hint for next time */
4494 return (mm->free_area_cache = addr);
4495 }
4496@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4497 mm->cached_hole_size = vma->vm_start - addr;
4498
4499 /* try just below the current vma->vm_start */
4500- addr = vma->vm_start-len;
4501- if (do_color_align)
4502- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4503- } while (likely(len < vma->vm_start));
4504+ addr = skip_heap_stack_gap(vma, len);
4505+ } while (!IS_ERR_VALUE(addr));
4506
4507 bottomup:
4508 /*
4509@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4510 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4511 sysctl_legacy_va_layout) {
4512 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4513+
4514+#ifdef CONFIG_PAX_RANDMMAP
4515+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4516+ mm->mmap_base += mm->delta_mmap;
4517+#endif
4518+
4519 mm->get_unmapped_area = arch_get_unmapped_area;
4520 mm->unmap_area = arch_unmap_area;
4521 } else {
4522@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4523 gap = (task_size / 6 * 5);
4524
4525 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4526+
4527+#ifdef CONFIG_PAX_RANDMMAP
4528+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4529+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4530+#endif
4531+
4532 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4533 mm->unmap_area = arch_unmap_area_topdown;
4534 }
4535diff -urNp linux-2.6.32.43/arch/sparc/kernel/traps_32.c linux-2.6.32.43/arch/sparc/kernel/traps_32.c
4536--- linux-2.6.32.43/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4537+++ linux-2.6.32.43/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4538@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4539 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4540 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4541
4542+extern void gr_handle_kernel_exploit(void);
4543+
4544 void die_if_kernel(char *str, struct pt_regs *regs)
4545 {
4546 static int die_counter;
4547@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4548 count++ < 30 &&
4549 (((unsigned long) rw) >= PAGE_OFFSET) &&
4550 !(((unsigned long) rw) & 0x7)) {
4551- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4552+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4553 (void *) rw->ins[7]);
4554 rw = (struct reg_window32 *)rw->ins[6];
4555 }
4556 }
4557 printk("Instruction DUMP:");
4558 instruction_dump ((unsigned long *) regs->pc);
4559- if(regs->psr & PSR_PS)
4560+ if(regs->psr & PSR_PS) {
4561+ gr_handle_kernel_exploit();
4562 do_exit(SIGKILL);
4563+ }
4564 do_exit(SIGSEGV);
4565 }
4566
4567diff -urNp linux-2.6.32.43/arch/sparc/kernel/traps_64.c linux-2.6.32.43/arch/sparc/kernel/traps_64.c
4568--- linux-2.6.32.43/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4569+++ linux-2.6.32.43/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4570@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4571 i + 1,
4572 p->trapstack[i].tstate, p->trapstack[i].tpc,
4573 p->trapstack[i].tnpc, p->trapstack[i].tt);
4574- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4575+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4576 }
4577 }
4578
4579@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4580
4581 lvl -= 0x100;
4582 if (regs->tstate & TSTATE_PRIV) {
4583+
4584+#ifdef CONFIG_PAX_REFCOUNT
4585+ if (lvl == 6)
4586+ pax_report_refcount_overflow(regs);
4587+#endif
4588+
4589 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4590 die_if_kernel(buffer, regs);
4591 }
4592@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4593 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4594 {
4595 char buffer[32];
4596-
4597+
4598 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4599 0, lvl, SIGTRAP) == NOTIFY_STOP)
4600 return;
4601
4602+#ifdef CONFIG_PAX_REFCOUNT
4603+ if (lvl == 6)
4604+ pax_report_refcount_overflow(regs);
4605+#endif
4606+
4607 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4608
4609 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4610@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4611 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4612 printk("%s" "ERROR(%d): ",
4613 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4614- printk("TPC<%pS>\n", (void *) regs->tpc);
4615+ printk("TPC<%pA>\n", (void *) regs->tpc);
4616 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4617 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4618 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4619@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4620 smp_processor_id(),
4621 (type & 0x1) ? 'I' : 'D',
4622 regs->tpc);
4623- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4624+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4625 panic("Irrecoverable Cheetah+ parity error.");
4626 }
4627
4628@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4629 smp_processor_id(),
4630 (type & 0x1) ? 'I' : 'D',
4631 regs->tpc);
4632- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4633+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4634 }
4635
4636 struct sun4v_error_entry {
4637@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4638
4639 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4640 regs->tpc, tl);
4641- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4642+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4643 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4644- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4645+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4646 (void *) regs->u_regs[UREG_I7]);
4647 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4648 "pte[%lx] error[%lx]\n",
4649@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4650
4651 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4652 regs->tpc, tl);
4653- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4654+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4655 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4656- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4657+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4658 (void *) regs->u_regs[UREG_I7]);
4659 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4660 "pte[%lx] error[%lx]\n",
4661@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4662 fp = (unsigned long)sf->fp + STACK_BIAS;
4663 }
4664
4665- printk(" [%016lx] %pS\n", pc, (void *) pc);
4666+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4667 } while (++count < 16);
4668 }
4669
4670@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4671 return (struct reg_window *) (fp + STACK_BIAS);
4672 }
4673
4674+extern void gr_handle_kernel_exploit(void);
4675+
4676 void die_if_kernel(char *str, struct pt_regs *regs)
4677 {
4678 static int die_counter;
4679@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4680 while (rw &&
4681 count++ < 30&&
4682 is_kernel_stack(current, rw)) {
4683- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4684+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4685 (void *) rw->ins[7]);
4686
4687 rw = kernel_stack_up(rw);
4688@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4689 }
4690 user_instruction_dump ((unsigned int __user *) regs->tpc);
4691 }
4692- if (regs->tstate & TSTATE_PRIV)
4693+ if (regs->tstate & TSTATE_PRIV) {
4694+ gr_handle_kernel_exploit();
4695 do_exit(SIGKILL);
4696+ }
4697+
4698 do_exit(SIGSEGV);
4699 }
4700 EXPORT_SYMBOL(die_if_kernel);
4701diff -urNp linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S
4702--- linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4703+++ linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4704@@ -127,7 +127,7 @@ do_int_load:
4705 wr %o5, 0x0, %asi
4706 retl
4707 mov 0, %o0
4708- .size __do_int_load, .-__do_int_load
4709+ .size do_int_load, .-do_int_load
4710
4711 .section __ex_table,"a"
4712 .word 4b, __retl_efault
4713diff -urNp linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c
4714--- linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4715+++ linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4716@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4717 if (count < 5) {
4718 last_time = jiffies;
4719 count++;
4720- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4721+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4722 regs->tpc, (void *) regs->tpc);
4723 }
4724 }
4725diff -urNp linux-2.6.32.43/arch/sparc/lib/atomic_64.S linux-2.6.32.43/arch/sparc/lib/atomic_64.S
4726--- linux-2.6.32.43/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4727+++ linux-2.6.32.43/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4728@@ -18,7 +18,12 @@
4729 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4730 BACKOFF_SETUP(%o2)
4731 1: lduw [%o1], %g1
4732- add %g1, %o0, %g7
4733+ addcc %g1, %o0, %g7
4734+
4735+#ifdef CONFIG_PAX_REFCOUNT
4736+ tvs %icc, 6
4737+#endif
4738+
4739 cas [%o1], %g1, %g7
4740 cmp %g1, %g7
4741 bne,pn %icc, 2f
4742@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4743 2: BACKOFF_SPIN(%o2, %o3, 1b)
4744 .size atomic_add, .-atomic_add
4745
4746+ .globl atomic_add_unchecked
4747+ .type atomic_add_unchecked,#function
4748+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4749+ BACKOFF_SETUP(%o2)
4750+1: lduw [%o1], %g1
4751+ add %g1, %o0, %g7
4752+ cas [%o1], %g1, %g7
4753+ cmp %g1, %g7
4754+ bne,pn %icc, 2f
4755+ nop
4756+ retl
4757+ nop
4758+2: BACKOFF_SPIN(%o2, %o3, 1b)
4759+ .size atomic_add_unchecked, .-atomic_add_unchecked
4760+
4761 .globl atomic_sub
4762 .type atomic_sub,#function
4763 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4764 BACKOFF_SETUP(%o2)
4765 1: lduw [%o1], %g1
4766- sub %g1, %o0, %g7
4767+ subcc %g1, %o0, %g7
4768+
4769+#ifdef CONFIG_PAX_REFCOUNT
4770+ tvs %icc, 6
4771+#endif
4772+
4773 cas [%o1], %g1, %g7
4774 cmp %g1, %g7
4775 bne,pn %icc, 2f
4776@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4777 2: BACKOFF_SPIN(%o2, %o3, 1b)
4778 .size atomic_sub, .-atomic_sub
4779
4780+ .globl atomic_sub_unchecked
4781+ .type atomic_sub_unchecked,#function
4782+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4783+ BACKOFF_SETUP(%o2)
4784+1: lduw [%o1], %g1
4785+ sub %g1, %o0, %g7
4786+ cas [%o1], %g1, %g7
4787+ cmp %g1, %g7
4788+ bne,pn %icc, 2f
4789+ nop
4790+ retl
4791+ nop
4792+2: BACKOFF_SPIN(%o2, %o3, 1b)
4793+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4794+
4795 .globl atomic_add_ret
4796 .type atomic_add_ret,#function
4797 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4798 BACKOFF_SETUP(%o2)
4799 1: lduw [%o1], %g1
4800- add %g1, %o0, %g7
4801+ addcc %g1, %o0, %g7
4802+
4803+#ifdef CONFIG_PAX_REFCOUNT
4804+ tvs %icc, 6
4805+#endif
4806+
4807 cas [%o1], %g1, %g7
4808 cmp %g1, %g7
4809 bne,pn %icc, 2f
4810@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4811 2: BACKOFF_SPIN(%o2, %o3, 1b)
4812 .size atomic_add_ret, .-atomic_add_ret
4813
4814+ .globl atomic_add_ret_unchecked
4815+ .type atomic_add_ret_unchecked,#function
4816+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4817+ BACKOFF_SETUP(%o2)
4818+1: lduw [%o1], %g1
4819+ addcc %g1, %o0, %g7
4820+ cas [%o1], %g1, %g7
4821+ cmp %g1, %g7
4822+ bne,pn %icc, 2f
4823+ add %g7, %o0, %g7
4824+ sra %g7, 0, %o0
4825+ retl
4826+ nop
4827+2: BACKOFF_SPIN(%o2, %o3, 1b)
4828+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4829+
4830 .globl atomic_sub_ret
4831 .type atomic_sub_ret,#function
4832 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4833 BACKOFF_SETUP(%o2)
4834 1: lduw [%o1], %g1
4835- sub %g1, %o0, %g7
4836+ subcc %g1, %o0, %g7
4837+
4838+#ifdef CONFIG_PAX_REFCOUNT
4839+ tvs %icc, 6
4840+#endif
4841+
4842 cas [%o1], %g1, %g7
4843 cmp %g1, %g7
4844 bne,pn %icc, 2f
4845@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4846 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4847 BACKOFF_SETUP(%o2)
4848 1: ldx [%o1], %g1
4849- add %g1, %o0, %g7
4850+ addcc %g1, %o0, %g7
4851+
4852+#ifdef CONFIG_PAX_REFCOUNT
4853+ tvs %xcc, 6
4854+#endif
4855+
4856 casx [%o1], %g1, %g7
4857 cmp %g1, %g7
4858 bne,pn %xcc, 2f
4859@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4860 2: BACKOFF_SPIN(%o2, %o3, 1b)
4861 .size atomic64_add, .-atomic64_add
4862
4863+ .globl atomic64_add_unchecked
4864+ .type atomic64_add_unchecked,#function
4865+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4866+ BACKOFF_SETUP(%o2)
4867+1: ldx [%o1], %g1
4868+ addcc %g1, %o0, %g7
4869+ casx [%o1], %g1, %g7
4870+ cmp %g1, %g7
4871+ bne,pn %xcc, 2f
4872+ nop
4873+ retl
4874+ nop
4875+2: BACKOFF_SPIN(%o2, %o3, 1b)
4876+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4877+
4878 .globl atomic64_sub
4879 .type atomic64_sub,#function
4880 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4881 BACKOFF_SETUP(%o2)
4882 1: ldx [%o1], %g1
4883- sub %g1, %o0, %g7
4884+ subcc %g1, %o0, %g7
4885+
4886+#ifdef CONFIG_PAX_REFCOUNT
4887+ tvs %xcc, 6
4888+#endif
4889+
4890 casx [%o1], %g1, %g7
4891 cmp %g1, %g7
4892 bne,pn %xcc, 2f
4893@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4894 2: BACKOFF_SPIN(%o2, %o3, 1b)
4895 .size atomic64_sub, .-atomic64_sub
4896
4897+ .globl atomic64_sub_unchecked
4898+ .type atomic64_sub_unchecked,#function
4899+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4900+ BACKOFF_SETUP(%o2)
4901+1: ldx [%o1], %g1
4902+ subcc %g1, %o0, %g7
4903+ casx [%o1], %g1, %g7
4904+ cmp %g1, %g7
4905+ bne,pn %xcc, 2f
4906+ nop
4907+ retl
4908+ nop
4909+2: BACKOFF_SPIN(%o2, %o3, 1b)
4910+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4911+
4912 .globl atomic64_add_ret
4913 .type atomic64_add_ret,#function
4914 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4915 BACKOFF_SETUP(%o2)
4916 1: ldx [%o1], %g1
4917- add %g1, %o0, %g7
4918+ addcc %g1, %o0, %g7
4919+
4920+#ifdef CONFIG_PAX_REFCOUNT
4921+ tvs %xcc, 6
4922+#endif
4923+
4924 casx [%o1], %g1, %g7
4925 cmp %g1, %g7
4926 bne,pn %xcc, 2f
4927@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4928 2: BACKOFF_SPIN(%o2, %o3, 1b)
4929 .size atomic64_add_ret, .-atomic64_add_ret
4930
4931+ .globl atomic64_add_ret_unchecked
4932+ .type atomic64_add_ret_unchecked,#function
4933+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4934+ BACKOFF_SETUP(%o2)
4935+1: ldx [%o1], %g1
4936+ addcc %g1, %o0, %g7
4937+ casx [%o1], %g1, %g7
4938+ cmp %g1, %g7
4939+ bne,pn %xcc, 2f
4940+ add %g7, %o0, %g7
4941+ mov %g7, %o0
4942+ retl
4943+ nop
4944+2: BACKOFF_SPIN(%o2, %o3, 1b)
4945+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4946+
4947 .globl atomic64_sub_ret
4948 .type atomic64_sub_ret,#function
4949 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4950 BACKOFF_SETUP(%o2)
4951 1: ldx [%o1], %g1
4952- sub %g1, %o0, %g7
4953+ subcc %g1, %o0, %g7
4954+
4955+#ifdef CONFIG_PAX_REFCOUNT
4956+ tvs %xcc, 6
4957+#endif
4958+
4959 casx [%o1], %g1, %g7
4960 cmp %g1, %g7
4961 bne,pn %xcc, 2f
4962diff -urNp linux-2.6.32.43/arch/sparc/lib/ksyms.c linux-2.6.32.43/arch/sparc/lib/ksyms.c
4963--- linux-2.6.32.43/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4964+++ linux-2.6.32.43/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4965@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4966
4967 /* Atomic counter implementation. */
4968 EXPORT_SYMBOL(atomic_add);
4969+EXPORT_SYMBOL(atomic_add_unchecked);
4970 EXPORT_SYMBOL(atomic_add_ret);
4971 EXPORT_SYMBOL(atomic_sub);
4972+EXPORT_SYMBOL(atomic_sub_unchecked);
4973 EXPORT_SYMBOL(atomic_sub_ret);
4974 EXPORT_SYMBOL(atomic64_add);
4975+EXPORT_SYMBOL(atomic64_add_unchecked);
4976 EXPORT_SYMBOL(atomic64_add_ret);
4977+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4978 EXPORT_SYMBOL(atomic64_sub);
4979+EXPORT_SYMBOL(atomic64_sub_unchecked);
4980 EXPORT_SYMBOL(atomic64_sub_ret);
4981
4982 /* Atomic bit operations. */
4983diff -urNp linux-2.6.32.43/arch/sparc/lib/Makefile linux-2.6.32.43/arch/sparc/lib/Makefile
4984--- linux-2.6.32.43/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4985+++ linux-2.6.32.43/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4986@@ -2,7 +2,7 @@
4987 #
4988
4989 asflags-y := -ansi -DST_DIV0=0x02
4990-ccflags-y := -Werror
4991+#ccflags-y := -Werror
4992
4993 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4994 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4995diff -urNp linux-2.6.32.43/arch/sparc/lib/rwsem_64.S linux-2.6.32.43/arch/sparc/lib/rwsem_64.S
4996--- linux-2.6.32.43/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4997+++ linux-2.6.32.43/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4998@@ -11,7 +11,12 @@
4999 .globl __down_read
5000 __down_read:
5001 1: lduw [%o0], %g1
5002- add %g1, 1, %g7
5003+ addcc %g1, 1, %g7
5004+
5005+#ifdef CONFIG_PAX_REFCOUNT
5006+ tvs %icc, 6
5007+#endif
5008+
5009 cas [%o0], %g1, %g7
5010 cmp %g1, %g7
5011 bne,pn %icc, 1b
5012@@ -33,7 +38,12 @@ __down_read:
5013 .globl __down_read_trylock
5014 __down_read_trylock:
5015 1: lduw [%o0], %g1
5016- add %g1, 1, %g7
5017+ addcc %g1, 1, %g7
5018+
5019+#ifdef CONFIG_PAX_REFCOUNT
5020+ tvs %icc, 6
5021+#endif
5022+
5023 cmp %g7, 0
5024 bl,pn %icc, 2f
5025 mov 0, %o1
5026@@ -51,7 +61,12 @@ __down_write:
5027 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5028 1:
5029 lduw [%o0], %g3
5030- add %g3, %g1, %g7
5031+ addcc %g3, %g1, %g7
5032+
5033+#ifdef CONFIG_PAX_REFCOUNT
5034+ tvs %icc, 6
5035+#endif
5036+
5037 cas [%o0], %g3, %g7
5038 cmp %g3, %g7
5039 bne,pn %icc, 1b
5040@@ -77,7 +92,12 @@ __down_write_trylock:
5041 cmp %g3, 0
5042 bne,pn %icc, 2f
5043 mov 0, %o1
5044- add %g3, %g1, %g7
5045+ addcc %g3, %g1, %g7
5046+
5047+#ifdef CONFIG_PAX_REFCOUNT
5048+ tvs %icc, 6
5049+#endif
5050+
5051 cas [%o0], %g3, %g7
5052 cmp %g3, %g7
5053 bne,pn %icc, 1b
5054@@ -90,7 +110,12 @@ __down_write_trylock:
5055 __up_read:
5056 1:
5057 lduw [%o0], %g1
5058- sub %g1, 1, %g7
5059+ subcc %g1, 1, %g7
5060+
5061+#ifdef CONFIG_PAX_REFCOUNT
5062+ tvs %icc, 6
5063+#endif
5064+
5065 cas [%o0], %g1, %g7
5066 cmp %g1, %g7
5067 bne,pn %icc, 1b
5068@@ -118,7 +143,12 @@ __up_write:
5069 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5070 1:
5071 lduw [%o0], %g3
5072- sub %g3, %g1, %g7
5073+ subcc %g3, %g1, %g7
5074+
5075+#ifdef CONFIG_PAX_REFCOUNT
5076+ tvs %icc, 6
5077+#endif
5078+
5079 cas [%o0], %g3, %g7
5080 cmp %g3, %g7
5081 bne,pn %icc, 1b
5082@@ -143,7 +173,12 @@ __downgrade_write:
5083 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5084 1:
5085 lduw [%o0], %g3
5086- sub %g3, %g1, %g7
5087+ subcc %g3, %g1, %g7
5088+
5089+#ifdef CONFIG_PAX_REFCOUNT
5090+ tvs %icc, 6
5091+#endif
5092+
5093 cas [%o0], %g3, %g7
5094 cmp %g3, %g7
5095 bne,pn %icc, 1b
5096diff -urNp linux-2.6.32.43/arch/sparc/Makefile linux-2.6.32.43/arch/sparc/Makefile
5097--- linux-2.6.32.43/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5098+++ linux-2.6.32.43/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5099@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5100 # Export what is needed by arch/sparc/boot/Makefile
5101 export VMLINUX_INIT VMLINUX_MAIN
5102 VMLINUX_INIT := $(head-y) $(init-y)
5103-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5104+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5105 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5106 VMLINUX_MAIN += $(drivers-y) $(net-y)
5107
5108diff -urNp linux-2.6.32.43/arch/sparc/mm/fault_32.c linux-2.6.32.43/arch/sparc/mm/fault_32.c
5109--- linux-2.6.32.43/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5110+++ linux-2.6.32.43/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5111@@ -21,6 +21,9 @@
5112 #include <linux/interrupt.h>
5113 #include <linux/module.h>
5114 #include <linux/kdebug.h>
5115+#include <linux/slab.h>
5116+#include <linux/pagemap.h>
5117+#include <linux/compiler.h>
5118
5119 #include <asm/system.h>
5120 #include <asm/page.h>
5121@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5122 return safe_compute_effective_address(regs, insn);
5123 }
5124
5125+#ifdef CONFIG_PAX_PAGEEXEC
5126+#ifdef CONFIG_PAX_DLRESOLVE
5127+static void pax_emuplt_close(struct vm_area_struct *vma)
5128+{
5129+ vma->vm_mm->call_dl_resolve = 0UL;
5130+}
5131+
5132+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5133+{
5134+ unsigned int *kaddr;
5135+
5136+ vmf->page = alloc_page(GFP_HIGHUSER);
5137+ if (!vmf->page)
5138+ return VM_FAULT_OOM;
5139+
5140+ kaddr = kmap(vmf->page);
5141+ memset(kaddr, 0, PAGE_SIZE);
5142+ kaddr[0] = 0x9DE3BFA8U; /* save */
5143+ flush_dcache_page(vmf->page);
5144+ kunmap(vmf->page);
5145+ return VM_FAULT_MAJOR;
5146+}
5147+
5148+static const struct vm_operations_struct pax_vm_ops = {
5149+ .close = pax_emuplt_close,
5150+ .fault = pax_emuplt_fault
5151+};
5152+
5153+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5154+{
5155+ int ret;
5156+
5157+ vma->vm_mm = current->mm;
5158+ vma->vm_start = addr;
5159+ vma->vm_end = addr + PAGE_SIZE;
5160+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5161+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5162+ vma->vm_ops = &pax_vm_ops;
5163+
5164+ ret = insert_vm_struct(current->mm, vma);
5165+ if (ret)
5166+ return ret;
5167+
5168+ ++current->mm->total_vm;
5169+ return 0;
5170+}
5171+#endif
5172+
5173+/*
5174+ * PaX: decide what to do with offenders (regs->pc = fault address)
5175+ *
5176+ * returns 1 when task should be killed
5177+ * 2 when patched PLT trampoline was detected
5178+ * 3 when unpatched PLT trampoline was detected
5179+ */
5180+static int pax_handle_fetch_fault(struct pt_regs *regs)
5181+{
5182+
5183+#ifdef CONFIG_PAX_EMUPLT
5184+ int err;
5185+
5186+ do { /* PaX: patched PLT emulation #1 */
5187+ unsigned int sethi1, sethi2, jmpl;
5188+
5189+ err = get_user(sethi1, (unsigned int *)regs->pc);
5190+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5191+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5192+
5193+ if (err)
5194+ break;
5195+
5196+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5197+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5198+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5199+ {
5200+ unsigned int addr;
5201+
5202+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5203+ addr = regs->u_regs[UREG_G1];
5204+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5205+ regs->pc = addr;
5206+ regs->npc = addr+4;
5207+ return 2;
5208+ }
5209+ } while (0);
5210+
5211+ { /* PaX: patched PLT emulation #2 */
5212+ unsigned int ba;
5213+
5214+ err = get_user(ba, (unsigned int *)regs->pc);
5215+
5216+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5217+ unsigned int addr;
5218+
5219+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5220+ regs->pc = addr;
5221+ regs->npc = addr+4;
5222+ return 2;
5223+ }
5224+ }
5225+
5226+ do { /* PaX: patched PLT emulation #3 */
5227+ unsigned int sethi, jmpl, nop;
5228+
5229+ err = get_user(sethi, (unsigned int *)regs->pc);
5230+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5231+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5232+
5233+ if (err)
5234+ break;
5235+
5236+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5237+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5238+ nop == 0x01000000U)
5239+ {
5240+ unsigned int addr;
5241+
5242+ addr = (sethi & 0x003FFFFFU) << 10;
5243+ regs->u_regs[UREG_G1] = addr;
5244+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5245+ regs->pc = addr;
5246+ regs->npc = addr+4;
5247+ return 2;
5248+ }
5249+ } while (0);
5250+
5251+ do { /* PaX: unpatched PLT emulation step 1 */
5252+ unsigned int sethi, ba, nop;
5253+
5254+ err = get_user(sethi, (unsigned int *)regs->pc);
5255+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5256+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5257+
5258+ if (err)
5259+ break;
5260+
5261+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5262+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5263+ nop == 0x01000000U)
5264+ {
5265+ unsigned int addr, save, call;
5266+
5267+ if ((ba & 0xFFC00000U) == 0x30800000U)
5268+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5269+ else
5270+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5271+
5272+ err = get_user(save, (unsigned int *)addr);
5273+ err |= get_user(call, (unsigned int *)(addr+4));
5274+ err |= get_user(nop, (unsigned int *)(addr+8));
5275+ if (err)
5276+ break;
5277+
5278+#ifdef CONFIG_PAX_DLRESOLVE
5279+ if (save == 0x9DE3BFA8U &&
5280+ (call & 0xC0000000U) == 0x40000000U &&
5281+ nop == 0x01000000U)
5282+ {
5283+ struct vm_area_struct *vma;
5284+ unsigned long call_dl_resolve;
5285+
5286+ down_read(&current->mm->mmap_sem);
5287+ call_dl_resolve = current->mm->call_dl_resolve;
5288+ up_read(&current->mm->mmap_sem);
5289+ if (likely(call_dl_resolve))
5290+ goto emulate;
5291+
5292+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5293+
5294+ down_write(&current->mm->mmap_sem);
5295+ if (current->mm->call_dl_resolve) {
5296+ call_dl_resolve = current->mm->call_dl_resolve;
5297+ up_write(&current->mm->mmap_sem);
5298+ if (vma)
5299+ kmem_cache_free(vm_area_cachep, vma);
5300+ goto emulate;
5301+ }
5302+
5303+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5304+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5305+ up_write(&current->mm->mmap_sem);
5306+ if (vma)
5307+ kmem_cache_free(vm_area_cachep, vma);
5308+ return 1;
5309+ }
5310+
5311+ if (pax_insert_vma(vma, call_dl_resolve)) {
5312+ up_write(&current->mm->mmap_sem);
5313+ kmem_cache_free(vm_area_cachep, vma);
5314+ return 1;
5315+ }
5316+
5317+ current->mm->call_dl_resolve = call_dl_resolve;
5318+ up_write(&current->mm->mmap_sem);
5319+
5320+emulate:
5321+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5322+ regs->pc = call_dl_resolve;
5323+ regs->npc = addr+4;
5324+ return 3;
5325+ }
5326+#endif
5327+
5328+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5329+ if ((save & 0xFFC00000U) == 0x05000000U &&
5330+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5331+ nop == 0x01000000U)
5332+ {
5333+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5334+ regs->u_regs[UREG_G2] = addr + 4;
5335+ addr = (save & 0x003FFFFFU) << 10;
5336+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5337+ regs->pc = addr;
5338+ regs->npc = addr+4;
5339+ return 3;
5340+ }
5341+ }
5342+ } while (0);
5343+
5344+ do { /* PaX: unpatched PLT emulation step 2 */
5345+ unsigned int save, call, nop;
5346+
5347+ err = get_user(save, (unsigned int *)(regs->pc-4));
5348+ err |= get_user(call, (unsigned int *)regs->pc);
5349+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5350+ if (err)
5351+ break;
5352+
5353+ if (save == 0x9DE3BFA8U &&
5354+ (call & 0xC0000000U) == 0x40000000U &&
5355+ nop == 0x01000000U)
5356+ {
5357+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5358+
5359+ regs->u_regs[UREG_RETPC] = regs->pc;
5360+ regs->pc = dl_resolve;
5361+ regs->npc = dl_resolve+4;
5362+ return 3;
5363+ }
5364+ } while (0);
5365+#endif
5366+
5367+ return 1;
5368+}
5369+
5370+void pax_report_insns(void *pc, void *sp)
5371+{
5372+ unsigned long i;
5373+
5374+ printk(KERN_ERR "PAX: bytes at PC: ");
5375+ for (i = 0; i < 8; i++) {
5376+ unsigned int c;
5377+ if (get_user(c, (unsigned int *)pc+i))
5378+ printk(KERN_CONT "???????? ");
5379+ else
5380+ printk(KERN_CONT "%08x ", c);
5381+ }
5382+ printk("\n");
5383+}
5384+#endif
5385+
5386 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5387 unsigned long address)
5388 {
5389@@ -231,6 +495,24 @@ good_area:
5390 if(!(vma->vm_flags & VM_WRITE))
5391 goto bad_area;
5392 } else {
5393+
5394+#ifdef CONFIG_PAX_PAGEEXEC
5395+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5396+ up_read(&mm->mmap_sem);
5397+ switch (pax_handle_fetch_fault(regs)) {
5398+
5399+#ifdef CONFIG_PAX_EMUPLT
5400+ case 2:
5401+ case 3:
5402+ return;
5403+#endif
5404+
5405+ }
5406+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5407+ do_group_exit(SIGKILL);
5408+ }
5409+#endif
5410+
5411 /* Allow reads even for write-only mappings */
5412 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5413 goto bad_area;
5414diff -urNp linux-2.6.32.43/arch/sparc/mm/fault_64.c linux-2.6.32.43/arch/sparc/mm/fault_64.c
5415--- linux-2.6.32.43/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5416+++ linux-2.6.32.43/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5417@@ -20,6 +20,9 @@
5418 #include <linux/kprobes.h>
5419 #include <linux/kdebug.h>
5420 #include <linux/percpu.h>
5421+#include <linux/slab.h>
5422+#include <linux/pagemap.h>
5423+#include <linux/compiler.h>
5424
5425 #include <asm/page.h>
5426 #include <asm/pgtable.h>
5427@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5428 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5429 regs->tpc);
5430 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5431- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5432+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5433 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5434 dump_stack();
5435 unhandled_fault(regs->tpc, current, regs);
5436@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5437 show_regs(regs);
5438 }
5439
5440+#ifdef CONFIG_PAX_PAGEEXEC
5441+#ifdef CONFIG_PAX_DLRESOLVE
5442+static void pax_emuplt_close(struct vm_area_struct *vma)
5443+{
5444+ vma->vm_mm->call_dl_resolve = 0UL;
5445+}
5446+
5447+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5448+{
5449+ unsigned int *kaddr;
5450+
5451+ vmf->page = alloc_page(GFP_HIGHUSER);
5452+ if (!vmf->page)
5453+ return VM_FAULT_OOM;
5454+
5455+ kaddr = kmap(vmf->page);
5456+ memset(kaddr, 0, PAGE_SIZE);
5457+ kaddr[0] = 0x9DE3BFA8U; /* save */
5458+ flush_dcache_page(vmf->page);
5459+ kunmap(vmf->page);
5460+ return VM_FAULT_MAJOR;
5461+}
5462+
5463+static const struct vm_operations_struct pax_vm_ops = {
5464+ .close = pax_emuplt_close,
5465+ .fault = pax_emuplt_fault
5466+};
5467+
5468+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5469+{
5470+ int ret;
5471+
5472+ vma->vm_mm = current->mm;
5473+ vma->vm_start = addr;
5474+ vma->vm_end = addr + PAGE_SIZE;
5475+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5476+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5477+ vma->vm_ops = &pax_vm_ops;
5478+
5479+ ret = insert_vm_struct(current->mm, vma);
5480+ if (ret)
5481+ return ret;
5482+
5483+ ++current->mm->total_vm;
5484+ return 0;
5485+}
5486+#endif
5487+
5488+/*
5489+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5490+ *
5491+ * returns 1 when task should be killed
5492+ * 2 when patched PLT trampoline was detected
5493+ * 3 when unpatched PLT trampoline was detected
5494+ */
5495+static int pax_handle_fetch_fault(struct pt_regs *regs)
5496+{
5497+
5498+#ifdef CONFIG_PAX_EMUPLT
5499+ int err;
5500+
5501+ do { /* PaX: patched PLT emulation #1 */
5502+ unsigned int sethi1, sethi2, jmpl;
5503+
5504+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5505+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5506+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5507+
5508+ if (err)
5509+ break;
5510+
5511+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5512+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5513+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5514+ {
5515+ unsigned long addr;
5516+
5517+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5518+ addr = regs->u_regs[UREG_G1];
5519+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5520+
5521+ if (test_thread_flag(TIF_32BIT))
5522+ addr &= 0xFFFFFFFFUL;
5523+
5524+ regs->tpc = addr;
5525+ regs->tnpc = addr+4;
5526+ return 2;
5527+ }
5528+ } while (0);
5529+
5530+ { /* PaX: patched PLT emulation #2 */
5531+ unsigned int ba;
5532+
5533+ err = get_user(ba, (unsigned int *)regs->tpc);
5534+
5535+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5536+ unsigned long addr;
5537+
5538+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5539+
5540+ if (test_thread_flag(TIF_32BIT))
5541+ addr &= 0xFFFFFFFFUL;
5542+
5543+ regs->tpc = addr;
5544+ regs->tnpc = addr+4;
5545+ return 2;
5546+ }
5547+ }
5548+
5549+ do { /* PaX: patched PLT emulation #3 */
5550+ unsigned int sethi, jmpl, nop;
5551+
5552+ err = get_user(sethi, (unsigned int *)regs->tpc);
5553+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5554+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5555+
5556+ if (err)
5557+ break;
5558+
5559+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5560+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5561+ nop == 0x01000000U)
5562+ {
5563+ unsigned long addr;
5564+
5565+ addr = (sethi & 0x003FFFFFU) << 10;
5566+ regs->u_regs[UREG_G1] = addr;
5567+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5568+
5569+ if (test_thread_flag(TIF_32BIT))
5570+ addr &= 0xFFFFFFFFUL;
5571+
5572+ regs->tpc = addr;
5573+ regs->tnpc = addr+4;
5574+ return 2;
5575+ }
5576+ } while (0);
5577+
5578+ do { /* PaX: patched PLT emulation #4 */
5579+ unsigned int sethi, mov1, call, mov2;
5580+
5581+ err = get_user(sethi, (unsigned int *)regs->tpc);
5582+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5583+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5584+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5585+
5586+ if (err)
5587+ break;
5588+
5589+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5590+ mov1 == 0x8210000FU &&
5591+ (call & 0xC0000000U) == 0x40000000U &&
5592+ mov2 == 0x9E100001U)
5593+ {
5594+ unsigned long addr;
5595+
5596+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5597+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5598+
5599+ if (test_thread_flag(TIF_32BIT))
5600+ addr &= 0xFFFFFFFFUL;
5601+
5602+ regs->tpc = addr;
5603+ regs->tnpc = addr+4;
5604+ return 2;
5605+ }
5606+ } while (0);
5607+
5608+ do { /* PaX: patched PLT emulation #5 */
5609+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5610+
5611+ err = get_user(sethi, (unsigned int *)regs->tpc);
5612+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5613+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5614+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5615+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5616+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5617+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5618+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5619+
5620+ if (err)
5621+ break;
5622+
5623+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5624+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5625+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5626+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5627+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5628+ sllx == 0x83287020U &&
5629+ jmpl == 0x81C04005U &&
5630+ nop == 0x01000000U)
5631+ {
5632+ unsigned long addr;
5633+
5634+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5635+ regs->u_regs[UREG_G1] <<= 32;
5636+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5637+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5638+ regs->tpc = addr;
5639+ regs->tnpc = addr+4;
5640+ return 2;
5641+ }
5642+ } while (0);
5643+
5644+ do { /* PaX: patched PLT emulation #6 */
5645+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5646+
5647+ err = get_user(sethi, (unsigned int *)regs->tpc);
5648+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5649+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5650+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5651+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5652+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5653+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5654+
5655+ if (err)
5656+ break;
5657+
5658+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5659+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5660+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5661+ sllx == 0x83287020U &&
5662+ (or & 0xFFFFE000U) == 0x8A116000U &&
5663+ jmpl == 0x81C04005U &&
5664+ nop == 0x01000000U)
5665+ {
5666+ unsigned long addr;
5667+
5668+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5669+ regs->u_regs[UREG_G1] <<= 32;
5670+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5671+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5672+ regs->tpc = addr;
5673+ regs->tnpc = addr+4;
5674+ return 2;
5675+ }
5676+ } while (0);
5677+
5678+ do { /* PaX: unpatched PLT emulation step 1 */
5679+ unsigned int sethi, ba, nop;
5680+
5681+ err = get_user(sethi, (unsigned int *)regs->tpc);
5682+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5683+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5684+
5685+ if (err)
5686+ break;
5687+
5688+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5689+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5690+ nop == 0x01000000U)
5691+ {
5692+ unsigned long addr;
5693+ unsigned int save, call;
5694+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5695+
5696+ if ((ba & 0xFFC00000U) == 0x30800000U)
5697+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5698+ else
5699+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5700+
5701+ if (test_thread_flag(TIF_32BIT))
5702+ addr &= 0xFFFFFFFFUL;
5703+
5704+ err = get_user(save, (unsigned int *)addr);
5705+ err |= get_user(call, (unsigned int *)(addr+4));
5706+ err |= get_user(nop, (unsigned int *)(addr+8));
5707+ if (err)
5708+ break;
5709+
5710+#ifdef CONFIG_PAX_DLRESOLVE
5711+ if (save == 0x9DE3BFA8U &&
5712+ (call & 0xC0000000U) == 0x40000000U &&
5713+ nop == 0x01000000U)
5714+ {
5715+ struct vm_area_struct *vma;
5716+ unsigned long call_dl_resolve;
5717+
5718+ down_read(&current->mm->mmap_sem);
5719+ call_dl_resolve = current->mm->call_dl_resolve;
5720+ up_read(&current->mm->mmap_sem);
5721+ if (likely(call_dl_resolve))
5722+ goto emulate;
5723+
5724+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5725+
5726+ down_write(&current->mm->mmap_sem);
5727+ if (current->mm->call_dl_resolve) {
5728+ call_dl_resolve = current->mm->call_dl_resolve;
5729+ up_write(&current->mm->mmap_sem);
5730+ if (vma)
5731+ kmem_cache_free(vm_area_cachep, vma);
5732+ goto emulate;
5733+ }
5734+
5735+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5736+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5737+ up_write(&current->mm->mmap_sem);
5738+ if (vma)
5739+ kmem_cache_free(vm_area_cachep, vma);
5740+ return 1;
5741+ }
5742+
5743+ if (pax_insert_vma(vma, call_dl_resolve)) {
5744+ up_write(&current->mm->mmap_sem);
5745+ kmem_cache_free(vm_area_cachep, vma);
5746+ return 1;
5747+ }
5748+
5749+ current->mm->call_dl_resolve = call_dl_resolve;
5750+ up_write(&current->mm->mmap_sem);
5751+
5752+emulate:
5753+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5754+ regs->tpc = call_dl_resolve;
5755+ regs->tnpc = addr+4;
5756+ return 3;
5757+ }
5758+#endif
5759+
5760+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5761+ if ((save & 0xFFC00000U) == 0x05000000U &&
5762+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5763+ nop == 0x01000000U)
5764+ {
5765+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5766+ regs->u_regs[UREG_G2] = addr + 4;
5767+ addr = (save & 0x003FFFFFU) << 10;
5768+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5769+
5770+ if (test_thread_flag(TIF_32BIT))
5771+ addr &= 0xFFFFFFFFUL;
5772+
5773+ regs->tpc = addr;
5774+ regs->tnpc = addr+4;
5775+ return 3;
5776+ }
5777+
5778+ /* PaX: 64-bit PLT stub */
5779+ err = get_user(sethi1, (unsigned int *)addr);
5780+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5781+ err |= get_user(or1, (unsigned int *)(addr+8));
5782+ err |= get_user(or2, (unsigned int *)(addr+12));
5783+ err |= get_user(sllx, (unsigned int *)(addr+16));
5784+ err |= get_user(add, (unsigned int *)(addr+20));
5785+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5786+ err |= get_user(nop, (unsigned int *)(addr+28));
5787+ if (err)
5788+ break;
5789+
5790+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5791+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5792+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5793+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5794+ sllx == 0x89293020U &&
5795+ add == 0x8A010005U &&
5796+ jmpl == 0x89C14000U &&
5797+ nop == 0x01000000U)
5798+ {
5799+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5800+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5801+ regs->u_regs[UREG_G4] <<= 32;
5802+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5803+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5804+ regs->u_regs[UREG_G4] = addr + 24;
5805+ addr = regs->u_regs[UREG_G5];
5806+ regs->tpc = addr;
5807+ regs->tnpc = addr+4;
5808+ return 3;
5809+ }
5810+ }
5811+ } while (0);
5812+
5813+#ifdef CONFIG_PAX_DLRESOLVE
5814+ do { /* PaX: unpatched PLT emulation step 2 */
5815+ unsigned int save, call, nop;
5816+
5817+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5818+ err |= get_user(call, (unsigned int *)regs->tpc);
5819+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5820+ if (err)
5821+ break;
5822+
5823+ if (save == 0x9DE3BFA8U &&
5824+ (call & 0xC0000000U) == 0x40000000U &&
5825+ nop == 0x01000000U)
5826+ {
5827+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5828+
5829+ if (test_thread_flag(TIF_32BIT))
5830+ dl_resolve &= 0xFFFFFFFFUL;
5831+
5832+ regs->u_regs[UREG_RETPC] = regs->tpc;
5833+ regs->tpc = dl_resolve;
5834+ regs->tnpc = dl_resolve+4;
5835+ return 3;
5836+ }
5837+ } while (0);
5838+#endif
5839+
5840+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5841+ unsigned int sethi, ba, nop;
5842+
5843+ err = get_user(sethi, (unsigned int *)regs->tpc);
5844+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5845+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5846+
5847+ if (err)
5848+ break;
5849+
5850+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5851+ (ba & 0xFFF00000U) == 0x30600000U &&
5852+ nop == 0x01000000U)
5853+ {
5854+ unsigned long addr;
5855+
5856+ addr = (sethi & 0x003FFFFFU) << 10;
5857+ regs->u_regs[UREG_G1] = addr;
5858+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5859+
5860+ if (test_thread_flag(TIF_32BIT))
5861+ addr &= 0xFFFFFFFFUL;
5862+
5863+ regs->tpc = addr;
5864+ regs->tnpc = addr+4;
5865+ return 2;
5866+ }
5867+ } while (0);
5868+
5869+#endif
5870+
5871+ return 1;
5872+}
5873+
5874+void pax_report_insns(void *pc, void *sp)
5875+{
5876+ unsigned long i;
5877+
5878+ printk(KERN_ERR "PAX: bytes at PC: ");
5879+ for (i = 0; i < 8; i++) {
5880+ unsigned int c;
5881+ if (get_user(c, (unsigned int *)pc+i))
5882+ printk(KERN_CONT "???????? ");
5883+ else
5884+ printk(KERN_CONT "%08x ", c);
5885+ }
5886+ printk("\n");
5887+}
5888+#endif
5889+
5890 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5891 {
5892 struct mm_struct *mm = current->mm;
5893@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5894 if (!vma)
5895 goto bad_area;
5896
5897+#ifdef CONFIG_PAX_PAGEEXEC
5898+ /* PaX: detect ITLB misses on non-exec pages */
5899+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5900+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5901+ {
5902+ if (address != regs->tpc)
5903+ goto good_area;
5904+
5905+ up_read(&mm->mmap_sem);
5906+ switch (pax_handle_fetch_fault(regs)) {
5907+
5908+#ifdef CONFIG_PAX_EMUPLT
5909+ case 2:
5910+ case 3:
5911+ return;
5912+#endif
5913+
5914+ }
5915+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5916+ do_group_exit(SIGKILL);
5917+ }
5918+#endif
5919+
5920 /* Pure DTLB misses do not tell us whether the fault causing
5921 * load/store/atomic was a write or not, it only says that there
5922 * was no match. So in such a case we (carefully) read the
5923diff -urNp linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c
5924--- linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5925+++ linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5926@@ -69,7 +69,7 @@ full_search:
5927 }
5928 return -ENOMEM;
5929 }
5930- if (likely(!vma || addr + len <= vma->vm_start)) {
5931+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5932 /*
5933 * Remember the place where we stopped the search:
5934 */
5935@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5936 /* make sure it can fit in the remaining address space */
5937 if (likely(addr > len)) {
5938 vma = find_vma(mm, addr-len);
5939- if (!vma || addr <= vma->vm_start) {
5940+ if (check_heap_stack_gap(vma, addr - len, len)) {
5941 /* remember the address as a hint for next time */
5942 return (mm->free_area_cache = addr-len);
5943 }
5944@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5945 if (unlikely(mm->mmap_base < len))
5946 goto bottomup;
5947
5948- addr = (mm->mmap_base-len) & HPAGE_MASK;
5949+ addr = mm->mmap_base - len;
5950
5951 do {
5952+ addr &= HPAGE_MASK;
5953 /*
5954 * Lookup failure means no vma is above this address,
5955 * else if new region fits below vma->vm_start,
5956 * return with success:
5957 */
5958 vma = find_vma(mm, addr);
5959- if (likely(!vma || addr+len <= vma->vm_start)) {
5960+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5961 /* remember the address as a hint for next time */
5962 return (mm->free_area_cache = addr);
5963 }
5964@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5965 mm->cached_hole_size = vma->vm_start - addr;
5966
5967 /* try just below the current vma->vm_start */
5968- addr = (vma->vm_start-len) & HPAGE_MASK;
5969- } while (likely(len < vma->vm_start));
5970+ addr = skip_heap_stack_gap(vma, len);
5971+ } while (!IS_ERR_VALUE(addr));
5972
5973 bottomup:
5974 /*
5975@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5976 if (addr) {
5977 addr = ALIGN(addr, HPAGE_SIZE);
5978 vma = find_vma(mm, addr);
5979- if (task_size - len >= addr &&
5980- (!vma || addr + len <= vma->vm_start))
5981+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5982 return addr;
5983 }
5984 if (mm->get_unmapped_area == arch_get_unmapped_area)
5985diff -urNp linux-2.6.32.43/arch/sparc/mm/init_32.c linux-2.6.32.43/arch/sparc/mm/init_32.c
5986--- linux-2.6.32.43/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5987+++ linux-2.6.32.43/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5988@@ -317,6 +317,9 @@ extern void device_scan(void);
5989 pgprot_t PAGE_SHARED __read_mostly;
5990 EXPORT_SYMBOL(PAGE_SHARED);
5991
5992+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5993+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5994+
5995 void __init paging_init(void)
5996 {
5997 switch(sparc_cpu_model) {
5998@@ -345,17 +348,17 @@ void __init paging_init(void)
5999
6000 /* Initialize the protection map with non-constant, MMU dependent values. */
6001 protection_map[0] = PAGE_NONE;
6002- protection_map[1] = PAGE_READONLY;
6003- protection_map[2] = PAGE_COPY;
6004- protection_map[3] = PAGE_COPY;
6005+ protection_map[1] = PAGE_READONLY_NOEXEC;
6006+ protection_map[2] = PAGE_COPY_NOEXEC;
6007+ protection_map[3] = PAGE_COPY_NOEXEC;
6008 protection_map[4] = PAGE_READONLY;
6009 protection_map[5] = PAGE_READONLY;
6010 protection_map[6] = PAGE_COPY;
6011 protection_map[7] = PAGE_COPY;
6012 protection_map[8] = PAGE_NONE;
6013- protection_map[9] = PAGE_READONLY;
6014- protection_map[10] = PAGE_SHARED;
6015- protection_map[11] = PAGE_SHARED;
6016+ protection_map[9] = PAGE_READONLY_NOEXEC;
6017+ protection_map[10] = PAGE_SHARED_NOEXEC;
6018+ protection_map[11] = PAGE_SHARED_NOEXEC;
6019 protection_map[12] = PAGE_READONLY;
6020 protection_map[13] = PAGE_READONLY;
6021 protection_map[14] = PAGE_SHARED;
6022diff -urNp linux-2.6.32.43/arch/sparc/mm/Makefile linux-2.6.32.43/arch/sparc/mm/Makefile
6023--- linux-2.6.32.43/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6024+++ linux-2.6.32.43/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6025@@ -2,7 +2,7 @@
6026 #
6027
6028 asflags-y := -ansi
6029-ccflags-y := -Werror
6030+#ccflags-y := -Werror
6031
6032 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6033 obj-y += fault_$(BITS).o
6034diff -urNp linux-2.6.32.43/arch/sparc/mm/srmmu.c linux-2.6.32.43/arch/sparc/mm/srmmu.c
6035--- linux-2.6.32.43/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6036+++ linux-2.6.32.43/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6037@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6038 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6039 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6040 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6041+
6042+#ifdef CONFIG_PAX_PAGEEXEC
6043+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6044+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6045+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6046+#endif
6047+
6048 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6049 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6050
6051diff -urNp linux-2.6.32.43/arch/um/include/asm/kmap_types.h linux-2.6.32.43/arch/um/include/asm/kmap_types.h
6052--- linux-2.6.32.43/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6053+++ linux-2.6.32.43/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6054@@ -23,6 +23,7 @@ enum km_type {
6055 KM_IRQ1,
6056 KM_SOFTIRQ0,
6057 KM_SOFTIRQ1,
6058+ KM_CLEARPAGE,
6059 KM_TYPE_NR
6060 };
6061
6062diff -urNp linux-2.6.32.43/arch/um/include/asm/page.h linux-2.6.32.43/arch/um/include/asm/page.h
6063--- linux-2.6.32.43/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6064+++ linux-2.6.32.43/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6065@@ -14,6 +14,9 @@
6066 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6067 #define PAGE_MASK (~(PAGE_SIZE-1))
6068
6069+#define ktla_ktva(addr) (addr)
6070+#define ktva_ktla(addr) (addr)
6071+
6072 #ifndef __ASSEMBLY__
6073
6074 struct page;
6075diff -urNp linux-2.6.32.43/arch/um/kernel/process.c linux-2.6.32.43/arch/um/kernel/process.c
6076--- linux-2.6.32.43/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6077+++ linux-2.6.32.43/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6078@@ -393,22 +393,6 @@ int singlestepping(void * t)
6079 return 2;
6080 }
6081
6082-/*
6083- * Only x86 and x86_64 have an arch_align_stack().
6084- * All other arches have "#define arch_align_stack(x) (x)"
6085- * in their asm/system.h
6086- * As this is included in UML from asm-um/system-generic.h,
6087- * we can use it to behave as the subarch does.
6088- */
6089-#ifndef arch_align_stack
6090-unsigned long arch_align_stack(unsigned long sp)
6091-{
6092- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6093- sp -= get_random_int() % 8192;
6094- return sp & ~0xf;
6095-}
6096-#endif
6097-
6098 unsigned long get_wchan(struct task_struct *p)
6099 {
6100 unsigned long stack_page, sp, ip;
6101diff -urNp linux-2.6.32.43/arch/um/sys-i386/syscalls.c linux-2.6.32.43/arch/um/sys-i386/syscalls.c
6102--- linux-2.6.32.43/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6103+++ linux-2.6.32.43/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6104@@ -11,6 +11,21 @@
6105 #include "asm/uaccess.h"
6106 #include "asm/unistd.h"
6107
6108+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6109+{
6110+ unsigned long pax_task_size = TASK_SIZE;
6111+
6112+#ifdef CONFIG_PAX_SEGMEXEC
6113+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6114+ pax_task_size = SEGMEXEC_TASK_SIZE;
6115+#endif
6116+
6117+ if (len > pax_task_size || addr > pax_task_size - len)
6118+ return -EINVAL;
6119+
6120+ return 0;
6121+}
6122+
6123 /*
6124 * Perform the select(nd, in, out, ex, tv) and mmap() system
6125 * calls. Linux/i386 didn't use to be able to handle more than
6126diff -urNp linux-2.6.32.43/arch/x86/boot/bitops.h linux-2.6.32.43/arch/x86/boot/bitops.h
6127--- linux-2.6.32.43/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6128+++ linux-2.6.32.43/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6129@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6130 u8 v;
6131 const u32 *p = (const u32 *)addr;
6132
6133- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6134+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6135 return v;
6136 }
6137
6138@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6139
6140 static inline void set_bit(int nr, void *addr)
6141 {
6142- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6143+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6144 }
6145
6146 #endif /* BOOT_BITOPS_H */
6147diff -urNp linux-2.6.32.43/arch/x86/boot/boot.h linux-2.6.32.43/arch/x86/boot/boot.h
6148--- linux-2.6.32.43/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6149+++ linux-2.6.32.43/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6150@@ -82,7 +82,7 @@ static inline void io_delay(void)
6151 static inline u16 ds(void)
6152 {
6153 u16 seg;
6154- asm("movw %%ds,%0" : "=rm" (seg));
6155+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6156 return seg;
6157 }
6158
6159@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6160 static inline int memcmp(const void *s1, const void *s2, size_t len)
6161 {
6162 u8 diff;
6163- asm("repe; cmpsb; setnz %0"
6164+ asm volatile("repe; cmpsb; setnz %0"
6165 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6166 return diff;
6167 }
6168diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/head_32.S linux-2.6.32.43/arch/x86/boot/compressed/head_32.S
6169--- linux-2.6.32.43/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6170+++ linux-2.6.32.43/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6171@@ -76,7 +76,7 @@ ENTRY(startup_32)
6172 notl %eax
6173 andl %eax, %ebx
6174 #else
6175- movl $LOAD_PHYSICAL_ADDR, %ebx
6176+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6177 #endif
6178
6179 /* Target address to relocate to for decompression */
6180@@ -149,7 +149,7 @@ relocated:
6181 * and where it was actually loaded.
6182 */
6183 movl %ebp, %ebx
6184- subl $LOAD_PHYSICAL_ADDR, %ebx
6185+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6186 jz 2f /* Nothing to be done if loaded at compiled addr. */
6187 /*
6188 * Process relocations.
6189@@ -157,8 +157,7 @@ relocated:
6190
6191 1: subl $4, %edi
6192 movl (%edi), %ecx
6193- testl %ecx, %ecx
6194- jz 2f
6195+ jecxz 2f
6196 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6197 jmp 1b
6198 2:
6199diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/head_64.S linux-2.6.32.43/arch/x86/boot/compressed/head_64.S
6200--- linux-2.6.32.43/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6201+++ linux-2.6.32.43/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6202@@ -91,7 +91,7 @@ ENTRY(startup_32)
6203 notl %eax
6204 andl %eax, %ebx
6205 #else
6206- movl $LOAD_PHYSICAL_ADDR, %ebx
6207+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6208 #endif
6209
6210 /* Target address to relocate to for decompression */
6211@@ -183,7 +183,7 @@ no_longmode:
6212 hlt
6213 jmp 1b
6214
6215-#include "../../kernel/verify_cpu_64.S"
6216+#include "../../kernel/verify_cpu.S"
6217
6218 /*
6219 * Be careful here startup_64 needs to be at a predictable
6220@@ -234,7 +234,7 @@ ENTRY(startup_64)
6221 notq %rax
6222 andq %rax, %rbp
6223 #else
6224- movq $LOAD_PHYSICAL_ADDR, %rbp
6225+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6226 #endif
6227
6228 /* Target address to relocate to for decompression */
6229diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/misc.c linux-2.6.32.43/arch/x86/boot/compressed/misc.c
6230--- linux-2.6.32.43/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6231+++ linux-2.6.32.43/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6232@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6233 case PT_LOAD:
6234 #ifdef CONFIG_RELOCATABLE
6235 dest = output;
6236- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6237+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6238 #else
6239 dest = (void *)(phdr->p_paddr);
6240 #endif
6241@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6242 error("Destination address too large");
6243 #endif
6244 #ifndef CONFIG_RELOCATABLE
6245- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6246+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6247 error("Wrong destination address");
6248 #endif
6249
6250diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c
6251--- linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6252+++ linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6253@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6254
6255 offs = (olen > ilen) ? olen - ilen : 0;
6256 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6257- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6258+ offs += 64*1024; /* Add 64K bytes slack */
6259 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6260
6261 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6262diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/relocs.c linux-2.6.32.43/arch/x86/boot/compressed/relocs.c
6263--- linux-2.6.32.43/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6264+++ linux-2.6.32.43/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6265@@ -10,8 +10,11 @@
6266 #define USE_BSD
6267 #include <endian.h>
6268
6269+#include "../../../../include/linux/autoconf.h"
6270+
6271 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6272 static Elf32_Ehdr ehdr;
6273+static Elf32_Phdr *phdr;
6274 static unsigned long reloc_count, reloc_idx;
6275 static unsigned long *relocs;
6276
6277@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6278
6279 static int is_safe_abs_reloc(const char* sym_name)
6280 {
6281- int i;
6282+ unsigned int i;
6283
6284 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6285 if (!strcmp(sym_name, safe_abs_relocs[i]))
6286@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6287 }
6288 }
6289
6290+static void read_phdrs(FILE *fp)
6291+{
6292+ unsigned int i;
6293+
6294+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6295+ if (!phdr) {
6296+ die("Unable to allocate %d program headers\n",
6297+ ehdr.e_phnum);
6298+ }
6299+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6300+ die("Seek to %d failed: %s\n",
6301+ ehdr.e_phoff, strerror(errno));
6302+ }
6303+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6304+ die("Cannot read ELF program headers: %s\n",
6305+ strerror(errno));
6306+ }
6307+ for(i = 0; i < ehdr.e_phnum; i++) {
6308+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6309+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6310+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6311+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6312+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6313+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6314+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6315+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6316+ }
6317+
6318+}
6319+
6320 static void read_shdrs(FILE *fp)
6321 {
6322- int i;
6323+ unsigned int i;
6324 Elf32_Shdr shdr;
6325
6326 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6327@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6328
6329 static void read_strtabs(FILE *fp)
6330 {
6331- int i;
6332+ unsigned int i;
6333 for (i = 0; i < ehdr.e_shnum; i++) {
6334 struct section *sec = &secs[i];
6335 if (sec->shdr.sh_type != SHT_STRTAB) {
6336@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6337
6338 static void read_symtabs(FILE *fp)
6339 {
6340- int i,j;
6341+ unsigned int i,j;
6342 for (i = 0; i < ehdr.e_shnum; i++) {
6343 struct section *sec = &secs[i];
6344 if (sec->shdr.sh_type != SHT_SYMTAB) {
6345@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6346
6347 static void read_relocs(FILE *fp)
6348 {
6349- int i,j;
6350+ unsigned int i,j;
6351+ uint32_t base;
6352+
6353 for (i = 0; i < ehdr.e_shnum; i++) {
6354 struct section *sec = &secs[i];
6355 if (sec->shdr.sh_type != SHT_REL) {
6356@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6357 die("Cannot read symbol table: %s\n",
6358 strerror(errno));
6359 }
6360+ base = 0;
6361+ for (j = 0; j < ehdr.e_phnum; j++) {
6362+ if (phdr[j].p_type != PT_LOAD )
6363+ continue;
6364+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6365+ continue;
6366+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6367+ break;
6368+ }
6369 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6370 Elf32_Rel *rel = &sec->reltab[j];
6371- rel->r_offset = elf32_to_cpu(rel->r_offset);
6372+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6373 rel->r_info = elf32_to_cpu(rel->r_info);
6374 }
6375 }
6376@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6377
6378 static void print_absolute_symbols(void)
6379 {
6380- int i;
6381+ unsigned int i;
6382 printf("Absolute symbols\n");
6383 printf(" Num: Value Size Type Bind Visibility Name\n");
6384 for (i = 0; i < ehdr.e_shnum; i++) {
6385 struct section *sec = &secs[i];
6386 char *sym_strtab;
6387 Elf32_Sym *sh_symtab;
6388- int j;
6389+ unsigned int j;
6390
6391 if (sec->shdr.sh_type != SHT_SYMTAB) {
6392 continue;
6393@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6394
6395 static void print_absolute_relocs(void)
6396 {
6397- int i, printed = 0;
6398+ unsigned int i, printed = 0;
6399
6400 for (i = 0; i < ehdr.e_shnum; i++) {
6401 struct section *sec = &secs[i];
6402 struct section *sec_applies, *sec_symtab;
6403 char *sym_strtab;
6404 Elf32_Sym *sh_symtab;
6405- int j;
6406+ unsigned int j;
6407 if (sec->shdr.sh_type != SHT_REL) {
6408 continue;
6409 }
6410@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6411
6412 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6413 {
6414- int i;
6415+ unsigned int i;
6416 /* Walk through the relocations */
6417 for (i = 0; i < ehdr.e_shnum; i++) {
6418 char *sym_strtab;
6419 Elf32_Sym *sh_symtab;
6420 struct section *sec_applies, *sec_symtab;
6421- int j;
6422+ unsigned int j;
6423 struct section *sec = &secs[i];
6424
6425 if (sec->shdr.sh_type != SHT_REL) {
6426@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6427 if (sym->st_shndx == SHN_ABS) {
6428 continue;
6429 }
6430+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6431+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6432+ continue;
6433+
6434+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6435+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6436+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6437+ continue;
6438+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6439+ continue;
6440+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6441+ continue;
6442+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6443+ continue;
6444+#endif
6445 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6446 /*
6447 * NONE can be ignored and and PC relative
6448@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6449
6450 static void emit_relocs(int as_text)
6451 {
6452- int i;
6453+ unsigned int i;
6454 /* Count how many relocations I have and allocate space for them. */
6455 reloc_count = 0;
6456 walk_relocs(count_reloc);
6457@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6458 fname, strerror(errno));
6459 }
6460 read_ehdr(fp);
6461+ read_phdrs(fp);
6462 read_shdrs(fp);
6463 read_strtabs(fp);
6464 read_symtabs(fp);
6465diff -urNp linux-2.6.32.43/arch/x86/boot/cpucheck.c linux-2.6.32.43/arch/x86/boot/cpucheck.c
6466--- linux-2.6.32.43/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6467+++ linux-2.6.32.43/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6468@@ -74,7 +74,7 @@ static int has_fpu(void)
6469 u16 fcw = -1, fsw = -1;
6470 u32 cr0;
6471
6472- asm("movl %%cr0,%0" : "=r" (cr0));
6473+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6474 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6475 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6476 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6477@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6478 {
6479 u32 f0, f1;
6480
6481- asm("pushfl ; "
6482+ asm volatile("pushfl ; "
6483 "pushfl ; "
6484 "popl %0 ; "
6485 "movl %0,%1 ; "
6486@@ -115,7 +115,7 @@ static void get_flags(void)
6487 set_bit(X86_FEATURE_FPU, cpu.flags);
6488
6489 if (has_eflag(X86_EFLAGS_ID)) {
6490- asm("cpuid"
6491+ asm volatile("cpuid"
6492 : "=a" (max_intel_level),
6493 "=b" (cpu_vendor[0]),
6494 "=d" (cpu_vendor[1]),
6495@@ -124,7 +124,7 @@ static void get_flags(void)
6496
6497 if (max_intel_level >= 0x00000001 &&
6498 max_intel_level <= 0x0000ffff) {
6499- asm("cpuid"
6500+ asm volatile("cpuid"
6501 : "=a" (tfms),
6502 "=c" (cpu.flags[4]),
6503 "=d" (cpu.flags[0])
6504@@ -136,7 +136,7 @@ static void get_flags(void)
6505 cpu.model += ((tfms >> 16) & 0xf) << 4;
6506 }
6507
6508- asm("cpuid"
6509+ asm volatile("cpuid"
6510 : "=a" (max_amd_level)
6511 : "a" (0x80000000)
6512 : "ebx", "ecx", "edx");
6513@@ -144,7 +144,7 @@ static void get_flags(void)
6514 if (max_amd_level >= 0x80000001 &&
6515 max_amd_level <= 0x8000ffff) {
6516 u32 eax = 0x80000001;
6517- asm("cpuid"
6518+ asm volatile("cpuid"
6519 : "+a" (eax),
6520 "=c" (cpu.flags[6]),
6521 "=d" (cpu.flags[1])
6522@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6523 u32 ecx = MSR_K7_HWCR;
6524 u32 eax, edx;
6525
6526- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6527+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6528 eax &= ~(1 << 15);
6529- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6530+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6531
6532 get_flags(); /* Make sure it really did something */
6533 err = check_flags();
6534@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6535 u32 ecx = MSR_VIA_FCR;
6536 u32 eax, edx;
6537
6538- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6539+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6540 eax |= (1<<1)|(1<<7);
6541- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6542+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6543
6544 set_bit(X86_FEATURE_CX8, cpu.flags);
6545 err = check_flags();
6546@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6547 u32 eax, edx;
6548 u32 level = 1;
6549
6550- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6551- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6552- asm("cpuid"
6553+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6554+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6555+ asm volatile("cpuid"
6556 : "+a" (level), "=d" (cpu.flags[0])
6557 : : "ecx", "ebx");
6558- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6559+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6560
6561 err = check_flags();
6562 }
6563diff -urNp linux-2.6.32.43/arch/x86/boot/header.S linux-2.6.32.43/arch/x86/boot/header.S
6564--- linux-2.6.32.43/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6565+++ linux-2.6.32.43/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6566@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6567 # single linked list of
6568 # struct setup_data
6569
6570-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6571+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6572
6573 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6574 #define VO_INIT_SIZE (VO__end - VO__text)
6575diff -urNp linux-2.6.32.43/arch/x86/boot/memory.c linux-2.6.32.43/arch/x86/boot/memory.c
6576--- linux-2.6.32.43/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6577+++ linux-2.6.32.43/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6578@@ -19,7 +19,7 @@
6579
6580 static int detect_memory_e820(void)
6581 {
6582- int count = 0;
6583+ unsigned int count = 0;
6584 struct biosregs ireg, oreg;
6585 struct e820entry *desc = boot_params.e820_map;
6586 static struct e820entry buf; /* static so it is zeroed */
6587diff -urNp linux-2.6.32.43/arch/x86/boot/video.c linux-2.6.32.43/arch/x86/boot/video.c
6588--- linux-2.6.32.43/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6589+++ linux-2.6.32.43/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6590@@ -90,7 +90,7 @@ static void store_mode_params(void)
6591 static unsigned int get_entry(void)
6592 {
6593 char entry_buf[4];
6594- int i, len = 0;
6595+ unsigned int i, len = 0;
6596 int key;
6597 unsigned int v;
6598
6599diff -urNp linux-2.6.32.43/arch/x86/boot/video-vesa.c linux-2.6.32.43/arch/x86/boot/video-vesa.c
6600--- linux-2.6.32.43/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6601+++ linux-2.6.32.43/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6602@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6603
6604 boot_params.screen_info.vesapm_seg = oreg.es;
6605 boot_params.screen_info.vesapm_off = oreg.di;
6606+ boot_params.screen_info.vesapm_size = oreg.cx;
6607 }
6608
6609 /*
6610diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32_aout.c linux-2.6.32.43/arch/x86/ia32/ia32_aout.c
6611--- linux-2.6.32.43/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6612+++ linux-2.6.32.43/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6613@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6614 unsigned long dump_start, dump_size;
6615 struct user32 dump;
6616
6617+ memset(&dump, 0, sizeof(dump));
6618+
6619 fs = get_fs();
6620 set_fs(KERNEL_DS);
6621 has_dumped = 1;
6622@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6623 dump_size = dump.u_ssize << PAGE_SHIFT;
6624 DUMP_WRITE(dump_start, dump_size);
6625 }
6626- /*
6627- * Finally dump the task struct. Not be used by gdb, but
6628- * could be useful
6629- */
6630- set_fs(KERNEL_DS);
6631- DUMP_WRITE(current, sizeof(*current));
6632 end_coredump:
6633 set_fs(fs);
6634 return has_dumped;
6635diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32entry.S linux-2.6.32.43/arch/x86/ia32/ia32entry.S
6636--- linux-2.6.32.43/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6637+++ linux-2.6.32.43/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6638@@ -13,6 +13,7 @@
6639 #include <asm/thread_info.h>
6640 #include <asm/segment.h>
6641 #include <asm/irqflags.h>
6642+#include <asm/pgtable.h>
6643 #include <linux/linkage.h>
6644
6645 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6646@@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6647 ENDPROC(native_irq_enable_sysexit)
6648 #endif
6649
6650+ .macro pax_enter_kernel_user
6651+#ifdef CONFIG_PAX_MEMORY_UDEREF
6652+ call pax_enter_kernel_user
6653+#endif
6654+ .endm
6655+
6656+ .macro pax_exit_kernel_user
6657+#ifdef CONFIG_PAX_MEMORY_UDEREF
6658+ call pax_exit_kernel_user
6659+#endif
6660+#ifdef CONFIG_PAX_RANDKSTACK
6661+ pushq %rax
6662+ call pax_randomize_kstack
6663+ popq %rax
6664+#endif
6665+ pax_erase_kstack
6666+ .endm
6667+
6668+.macro pax_erase_kstack
6669+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6670+ call pax_erase_kstack
6671+#endif
6672+.endm
6673+
6674 /*
6675 * 32bit SYSENTER instruction entry.
6676 *
6677@@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6678 CFI_REGISTER rsp,rbp
6679 SWAPGS_UNSAFE_STACK
6680 movq PER_CPU_VAR(kernel_stack), %rsp
6681- addq $(KERNEL_STACK_OFFSET),%rsp
6682+ pax_enter_kernel_user
6683 /*
6684 * No need to follow this irqs on/off section: the syscall
6685 * disabled irqs, here we enable it straight after entry:
6686@@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6687 pushfq
6688 CFI_ADJUST_CFA_OFFSET 8
6689 /*CFI_REL_OFFSET rflags,0*/
6690- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6691+ GET_THREAD_INFO(%r10)
6692+ movl TI_sysenter_return(%r10), %r10d
6693 CFI_REGISTER rip,r10
6694 pushq $__USER32_CS
6695 CFI_ADJUST_CFA_OFFSET 8
6696@@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6697 SAVE_ARGS 0,0,1
6698 /* no need to do an access_ok check here because rbp has been
6699 32bit zero extended */
6700+
6701+#ifdef CONFIG_PAX_MEMORY_UDEREF
6702+ mov $PAX_USER_SHADOW_BASE,%r10
6703+ add %r10,%rbp
6704+#endif
6705+
6706 1: movl (%rbp),%ebp
6707 .section __ex_table,"a"
6708 .quad 1b,ia32_badarg
6709@@ -172,6 +204,7 @@ sysenter_dispatch:
6710 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6711 jnz sysexit_audit
6712 sysexit_from_sys_call:
6713+ pax_exit_kernel_user
6714 andl $~TS_COMPAT,TI_status(%r10)
6715 /* clear IF, that popfq doesn't enable interrupts early */
6716 andl $~0x200,EFLAGS-R11(%rsp)
6717@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6718 movl %eax,%esi /* 2nd arg: syscall number */
6719 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6720 call audit_syscall_entry
6721+
6722+ pax_erase_kstack
6723+
6724 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6725 cmpq $(IA32_NR_syscalls-1),%rax
6726 ja ia32_badsys
6727@@ -252,6 +288,9 @@ sysenter_tracesys:
6728 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6729 movq %rsp,%rdi /* &pt_regs -> arg1 */
6730 call syscall_trace_enter
6731+
6732+ pax_erase_kstack
6733+
6734 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6735 RESTORE_REST
6736 cmpq $(IA32_NR_syscalls-1),%rax
6737@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6738 ENTRY(ia32_cstar_target)
6739 CFI_STARTPROC32 simple
6740 CFI_SIGNAL_FRAME
6741- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6742+ CFI_DEF_CFA rsp,0
6743 CFI_REGISTER rip,rcx
6744 /*CFI_REGISTER rflags,r11*/
6745 SWAPGS_UNSAFE_STACK
6746 movl %esp,%r8d
6747 CFI_REGISTER rsp,r8
6748 movq PER_CPU_VAR(kernel_stack),%rsp
6749+
6750+#ifdef CONFIG_PAX_MEMORY_UDEREF
6751+ pax_enter_kernel_user
6752+#endif
6753+
6754 /*
6755 * No need to follow this irqs on/off section: the syscall
6756 * disabled irqs and here we enable it straight after entry:
6757 */
6758 ENABLE_INTERRUPTS(CLBR_NONE)
6759- SAVE_ARGS 8,1,1
6760+ SAVE_ARGS 8*6,1,1
6761 movl %eax,%eax /* zero extension */
6762 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6763 movq %rcx,RIP-ARGOFFSET(%rsp)
6764@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6765 /* no need to do an access_ok check here because r8 has been
6766 32bit zero extended */
6767 /* hardware stack frame is complete now */
6768+
6769+#ifdef CONFIG_PAX_MEMORY_UDEREF
6770+ mov $PAX_USER_SHADOW_BASE,%r10
6771+ add %r10,%r8
6772+#endif
6773+
6774 1: movl (%r8),%r9d
6775 .section __ex_table,"a"
6776 .quad 1b,ia32_badarg
6777@@ -333,6 +383,7 @@ cstar_dispatch:
6778 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6779 jnz sysretl_audit
6780 sysretl_from_sys_call:
6781+ pax_exit_kernel_user
6782 andl $~TS_COMPAT,TI_status(%r10)
6783 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6784 movl RIP-ARGOFFSET(%rsp),%ecx
6785@@ -370,6 +421,9 @@ cstar_tracesys:
6786 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6787 movq %rsp,%rdi /* &pt_regs -> arg1 */
6788 call syscall_trace_enter
6789+
6790+ pax_erase_kstack
6791+
6792 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6793 RESTORE_REST
6794 xchgl %ebp,%r9d
6795@@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6796 CFI_REL_OFFSET rip,RIP-RIP
6797 PARAVIRT_ADJUST_EXCEPTION_FRAME
6798 SWAPGS
6799+ pax_enter_kernel_user
6800 /*
6801 * No need to follow this irqs on/off section: the syscall
6802 * disabled irqs and here we enable it straight after entry:
6803@@ -448,6 +503,9 @@ ia32_tracesys:
6804 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6805 movq %rsp,%rdi /* &pt_regs -> arg1 */
6806 call syscall_trace_enter
6807+
6808+ pax_erase_kstack
6809+
6810 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6811 RESTORE_REST
6812 cmpq $(IA32_NR_syscalls-1),%rax
6813diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32_signal.c linux-2.6.32.43/arch/x86/ia32/ia32_signal.c
6814--- linux-2.6.32.43/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6815+++ linux-2.6.32.43/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6816@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6817 sp -= frame_size;
6818 /* Align the stack pointer according to the i386 ABI,
6819 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6820- sp = ((sp + 4) & -16ul) - 4;
6821+ sp = ((sp - 12) & -16ul) - 4;
6822 return (void __user *) sp;
6823 }
6824
6825@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6826 * These are actually not used anymore, but left because some
6827 * gdb versions depend on them as a marker.
6828 */
6829- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6830+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6831 } put_user_catch(err);
6832
6833 if (err)
6834@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6835 0xb8,
6836 __NR_ia32_rt_sigreturn,
6837 0x80cd,
6838- 0,
6839+ 0
6840 };
6841
6842 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6843@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6844
6845 if (ka->sa.sa_flags & SA_RESTORER)
6846 restorer = ka->sa.sa_restorer;
6847+ else if (current->mm->context.vdso)
6848+ /* Return stub is in 32bit vsyscall page */
6849+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6850 else
6851- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6852- rt_sigreturn);
6853+ restorer = &frame->retcode;
6854 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6855
6856 /*
6857 * Not actually used anymore, but left because some gdb
6858 * versions need it.
6859 */
6860- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6861+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6862 } put_user_catch(err);
6863
6864 if (err)
6865diff -urNp linux-2.6.32.43/arch/x86/include/asm/alternative.h linux-2.6.32.43/arch/x86/include/asm/alternative.h
6866--- linux-2.6.32.43/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6867+++ linux-2.6.32.43/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6868@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6869 " .byte 662b-661b\n" /* sourcelen */ \
6870 " .byte 664f-663f\n" /* replacementlen */ \
6871 ".previous\n" \
6872- ".section .altinstr_replacement, \"ax\"\n" \
6873+ ".section .altinstr_replacement, \"a\"\n" \
6874 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6875 ".previous"
6876
6877diff -urNp linux-2.6.32.43/arch/x86/include/asm/apm.h linux-2.6.32.43/arch/x86/include/asm/apm.h
6878--- linux-2.6.32.43/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6879+++ linux-2.6.32.43/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6880@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6881 __asm__ __volatile__(APM_DO_ZERO_SEGS
6882 "pushl %%edi\n\t"
6883 "pushl %%ebp\n\t"
6884- "lcall *%%cs:apm_bios_entry\n\t"
6885+ "lcall *%%ss:apm_bios_entry\n\t"
6886 "setc %%al\n\t"
6887 "popl %%ebp\n\t"
6888 "popl %%edi\n\t"
6889@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6890 __asm__ __volatile__(APM_DO_ZERO_SEGS
6891 "pushl %%edi\n\t"
6892 "pushl %%ebp\n\t"
6893- "lcall *%%cs:apm_bios_entry\n\t"
6894+ "lcall *%%ss:apm_bios_entry\n\t"
6895 "setc %%bl\n\t"
6896 "popl %%ebp\n\t"
6897 "popl %%edi\n\t"
6898diff -urNp linux-2.6.32.43/arch/x86/include/asm/atomic_32.h linux-2.6.32.43/arch/x86/include/asm/atomic_32.h
6899--- linux-2.6.32.43/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6900+++ linux-2.6.32.43/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6901@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6902 }
6903
6904 /**
6905+ * atomic_read_unchecked - read atomic variable
6906+ * @v: pointer of type atomic_unchecked_t
6907+ *
6908+ * Atomically reads the value of @v.
6909+ */
6910+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6911+{
6912+ return v->counter;
6913+}
6914+
6915+/**
6916 * atomic_set - set atomic variable
6917 * @v: pointer of type atomic_t
6918 * @i: required value
6919@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6920 }
6921
6922 /**
6923+ * atomic_set_unchecked - set atomic variable
6924+ * @v: pointer of type atomic_unchecked_t
6925+ * @i: required value
6926+ *
6927+ * Atomically sets the value of @v to @i.
6928+ */
6929+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6930+{
6931+ v->counter = i;
6932+}
6933+
6934+/**
6935 * atomic_add - add integer to atomic variable
6936 * @i: integer value to add
6937 * @v: pointer of type atomic_t
6938@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6939 */
6940 static inline void atomic_add(int i, atomic_t *v)
6941 {
6942- asm volatile(LOCK_PREFIX "addl %1,%0"
6943+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6944+
6945+#ifdef CONFIG_PAX_REFCOUNT
6946+ "jno 0f\n"
6947+ LOCK_PREFIX "subl %1,%0\n"
6948+ "int $4\n0:\n"
6949+ _ASM_EXTABLE(0b, 0b)
6950+#endif
6951+
6952+ : "+m" (v->counter)
6953+ : "ir" (i));
6954+}
6955+
6956+/**
6957+ * atomic_add_unchecked - add integer to atomic variable
6958+ * @i: integer value to add
6959+ * @v: pointer of type atomic_unchecked_t
6960+ *
6961+ * Atomically adds @i to @v.
6962+ */
6963+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6964+{
6965+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6966 : "+m" (v->counter)
6967 : "ir" (i));
6968 }
6969@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6970 */
6971 static inline void atomic_sub(int i, atomic_t *v)
6972 {
6973- asm volatile(LOCK_PREFIX "subl %1,%0"
6974+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6975+
6976+#ifdef CONFIG_PAX_REFCOUNT
6977+ "jno 0f\n"
6978+ LOCK_PREFIX "addl %1,%0\n"
6979+ "int $4\n0:\n"
6980+ _ASM_EXTABLE(0b, 0b)
6981+#endif
6982+
6983+ : "+m" (v->counter)
6984+ : "ir" (i));
6985+}
6986+
6987+/**
6988+ * atomic_sub_unchecked - subtract integer from atomic variable
6989+ * @i: integer value to subtract
6990+ * @v: pointer of type atomic_unchecked_t
6991+ *
6992+ * Atomically subtracts @i from @v.
6993+ */
6994+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
6995+{
6996+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
6997 : "+m" (v->counter)
6998 : "ir" (i));
6999 }
7000@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7001 {
7002 unsigned char c;
7003
7004- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7005+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7006+
7007+#ifdef CONFIG_PAX_REFCOUNT
7008+ "jno 0f\n"
7009+ LOCK_PREFIX "addl %2,%0\n"
7010+ "int $4\n0:\n"
7011+ _ASM_EXTABLE(0b, 0b)
7012+#endif
7013+
7014+ "sete %1\n"
7015 : "+m" (v->counter), "=qm" (c)
7016 : "ir" (i) : "memory");
7017 return c;
7018@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7019 */
7020 static inline void atomic_inc(atomic_t *v)
7021 {
7022- asm volatile(LOCK_PREFIX "incl %0"
7023+ asm volatile(LOCK_PREFIX "incl %0\n"
7024+
7025+#ifdef CONFIG_PAX_REFCOUNT
7026+ "jno 0f\n"
7027+ LOCK_PREFIX "decl %0\n"
7028+ "int $4\n0:\n"
7029+ _ASM_EXTABLE(0b, 0b)
7030+#endif
7031+
7032+ : "+m" (v->counter));
7033+}
7034+
7035+/**
7036+ * atomic_inc_unchecked - increment atomic variable
7037+ * @v: pointer of type atomic_unchecked_t
7038+ *
7039+ * Atomically increments @v by 1.
7040+ */
7041+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7042+{
7043+ asm volatile(LOCK_PREFIX "incl %0\n"
7044 : "+m" (v->counter));
7045 }
7046
7047@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7048 */
7049 static inline void atomic_dec(atomic_t *v)
7050 {
7051- asm volatile(LOCK_PREFIX "decl %0"
7052+ asm volatile(LOCK_PREFIX "decl %0\n"
7053+
7054+#ifdef CONFIG_PAX_REFCOUNT
7055+ "jno 0f\n"
7056+ LOCK_PREFIX "incl %0\n"
7057+ "int $4\n0:\n"
7058+ _ASM_EXTABLE(0b, 0b)
7059+#endif
7060+
7061+ : "+m" (v->counter));
7062+}
7063+
7064+/**
7065+ * atomic_dec_unchecked - decrement atomic variable
7066+ * @v: pointer of type atomic_unchecked_t
7067+ *
7068+ * Atomically decrements @v by 1.
7069+ */
7070+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7071+{
7072+ asm volatile(LOCK_PREFIX "decl %0\n"
7073 : "+m" (v->counter));
7074 }
7075
7076@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7077 {
7078 unsigned char c;
7079
7080- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7081+ asm volatile(LOCK_PREFIX "decl %0\n"
7082+
7083+#ifdef CONFIG_PAX_REFCOUNT
7084+ "jno 0f\n"
7085+ LOCK_PREFIX "incl %0\n"
7086+ "int $4\n0:\n"
7087+ _ASM_EXTABLE(0b, 0b)
7088+#endif
7089+
7090+ "sete %1\n"
7091 : "+m" (v->counter), "=qm" (c)
7092 : : "memory");
7093 return c != 0;
7094@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7095 {
7096 unsigned char c;
7097
7098- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7099+ asm volatile(LOCK_PREFIX "incl %0\n"
7100+
7101+#ifdef CONFIG_PAX_REFCOUNT
7102+ "jno 0f\n"
7103+ LOCK_PREFIX "decl %0\n"
7104+ "into\n0:\n"
7105+ _ASM_EXTABLE(0b, 0b)
7106+#endif
7107+
7108+ "sete %1\n"
7109+ : "+m" (v->counter), "=qm" (c)
7110+ : : "memory");
7111+ return c != 0;
7112+}
7113+
7114+/**
7115+ * atomic_inc_and_test_unchecked - increment and test
7116+ * @v: pointer of type atomic_unchecked_t
7117+ *
7118+ * Atomically increments @v by 1
7119+ * and returns true if the result is zero, or false for all
7120+ * other cases.
7121+ */
7122+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7123+{
7124+ unsigned char c;
7125+
7126+ asm volatile(LOCK_PREFIX "incl %0\n"
7127+ "sete %1\n"
7128 : "+m" (v->counter), "=qm" (c)
7129 : : "memory");
7130 return c != 0;
7131@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7132 {
7133 unsigned char c;
7134
7135- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7136+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7137+
7138+#ifdef CONFIG_PAX_REFCOUNT
7139+ "jno 0f\n"
7140+ LOCK_PREFIX "subl %2,%0\n"
7141+ "int $4\n0:\n"
7142+ _ASM_EXTABLE(0b, 0b)
7143+#endif
7144+
7145+ "sets %1\n"
7146 : "+m" (v->counter), "=qm" (c)
7147 : "ir" (i) : "memory");
7148 return c;
7149@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7150 #endif
7151 /* Modern 486+ processor */
7152 __i = i;
7153+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7154+
7155+#ifdef CONFIG_PAX_REFCOUNT
7156+ "jno 0f\n"
7157+ "movl %0, %1\n"
7158+ "int $4\n0:\n"
7159+ _ASM_EXTABLE(0b, 0b)
7160+#endif
7161+
7162+ : "+r" (i), "+m" (v->counter)
7163+ : : "memory");
7164+ return i + __i;
7165+
7166+#ifdef CONFIG_M386
7167+no_xadd: /* Legacy 386 processor */
7168+ local_irq_save(flags);
7169+ __i = atomic_read(v);
7170+ atomic_set(v, i + __i);
7171+ local_irq_restore(flags);
7172+ return i + __i;
7173+#endif
7174+}
7175+
7176+/**
7177+ * atomic_add_return_unchecked - add integer and return
7178+ * @v: pointer of type atomic_unchecked_t
7179+ * @i: integer value to add
7180+ *
7181+ * Atomically adds @i to @v and returns @i + @v
7182+ */
7183+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7184+{
7185+ int __i;
7186+#ifdef CONFIG_M386
7187+ unsigned long flags;
7188+ if (unlikely(boot_cpu_data.x86 <= 3))
7189+ goto no_xadd;
7190+#endif
7191+ /* Modern 486+ processor */
7192+ __i = i;
7193 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7194 : "+r" (i), "+m" (v->counter)
7195 : : "memory");
7196@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7197 return cmpxchg(&v->counter, old, new);
7198 }
7199
7200+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7201+{
7202+ return cmpxchg(&v->counter, old, new);
7203+}
7204+
7205 static inline int atomic_xchg(atomic_t *v, int new)
7206 {
7207 return xchg(&v->counter, new);
7208 }
7209
7210+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7211+{
7212+ return xchg(&v->counter, new);
7213+}
7214+
7215 /**
7216 * atomic_add_unless - add unless the number is already a given value
7217 * @v: pointer of type atomic_t
7218@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7219 */
7220 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7221 {
7222- int c, old;
7223+ int c, old, new;
7224 c = atomic_read(v);
7225 for (;;) {
7226- if (unlikely(c == (u)))
7227+ if (unlikely(c == u))
7228 break;
7229- old = atomic_cmpxchg((v), c, c + (a));
7230+
7231+ asm volatile("addl %2,%0\n"
7232+
7233+#ifdef CONFIG_PAX_REFCOUNT
7234+ "jno 0f\n"
7235+ "subl %2,%0\n"
7236+ "int $4\n0:\n"
7237+ _ASM_EXTABLE(0b, 0b)
7238+#endif
7239+
7240+ : "=r" (new)
7241+ : "0" (c), "ir" (a));
7242+
7243+ old = atomic_cmpxchg(v, c, new);
7244 if (likely(old == c))
7245 break;
7246 c = old;
7247 }
7248- return c != (u);
7249+ return c != u;
7250 }
7251
7252 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7253
7254 #define atomic_inc_return(v) (atomic_add_return(1, v))
7255+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7256+{
7257+ return atomic_add_return_unchecked(1, v);
7258+}
7259 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7260
7261 /* These are x86-specific, used by some header files */
7262@@ -266,9 +495,18 @@ typedef struct {
7263 u64 __aligned(8) counter;
7264 } atomic64_t;
7265
7266+#ifdef CONFIG_PAX_REFCOUNT
7267+typedef struct {
7268+ u64 __aligned(8) counter;
7269+} atomic64_unchecked_t;
7270+#else
7271+typedef atomic64_t atomic64_unchecked_t;
7272+#endif
7273+
7274 #define ATOMIC64_INIT(val) { (val) }
7275
7276 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7277+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7278
7279 /**
7280 * atomic64_xchg - xchg atomic64 variable
7281@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7282 * the old value.
7283 */
7284 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7285+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7286
7287 /**
7288 * atomic64_set - set atomic64 variable
7289@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7290 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7291
7292 /**
7293+ * atomic64_unchecked_set - set atomic64 variable
7294+ * @ptr: pointer to type atomic64_unchecked_t
7295+ * @new_val: value to assign
7296+ *
7297+ * Atomically sets the value of @ptr to @new_val.
7298+ */
7299+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7300+
7301+/**
7302 * atomic64_read - read atomic64 variable
7303 * @ptr: pointer to type atomic64_t
7304 *
7305@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7306 return res;
7307 }
7308
7309-extern u64 atomic64_read(atomic64_t *ptr);
7310+/**
7311+ * atomic64_read_unchecked - read atomic64 variable
7312+ * @ptr: pointer to type atomic64_unchecked_t
7313+ *
7314+ * Atomically reads the value of @ptr and returns it.
7315+ */
7316+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7317+{
7318+ u64 res;
7319+
7320+ /*
7321+ * Note, we inline this atomic64_unchecked_t primitive because
7322+ * it only clobbers EAX/EDX and leaves the others
7323+ * untouched. We also (somewhat subtly) rely on the
7324+ * fact that cmpxchg8b returns the current 64-bit value
7325+ * of the memory location we are touching:
7326+ */
7327+ asm volatile(
7328+ "mov %%ebx, %%eax\n\t"
7329+ "mov %%ecx, %%edx\n\t"
7330+ LOCK_PREFIX "cmpxchg8b %1\n"
7331+ : "=&A" (res)
7332+ : "m" (*ptr)
7333+ );
7334+
7335+ return res;
7336+}
7337
7338 /**
7339 * atomic64_add_return - add and return
7340@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7341 * Other variants with different arithmetic operators:
7342 */
7343 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7344+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7345 extern u64 atomic64_inc_return(atomic64_t *ptr);
7346+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7347 extern u64 atomic64_dec_return(atomic64_t *ptr);
7348+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7349
7350 /**
7351 * atomic64_add - add integer to atomic64 variable
7352@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7353 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7354
7355 /**
7356+ * atomic64_add_unchecked - add integer to atomic64 variable
7357+ * @delta: integer value to add
7358+ * @ptr: pointer to type atomic64_unchecked_t
7359+ *
7360+ * Atomically adds @delta to @ptr.
7361+ */
7362+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7363+
7364+/**
7365 * atomic64_sub - subtract the atomic64 variable
7366 * @delta: integer value to subtract
7367 * @ptr: pointer to type atomic64_t
7368@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7369 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7370
7371 /**
7372+ * atomic64_sub_unchecked - subtract the atomic64 variable
7373+ * @delta: integer value to subtract
7374+ * @ptr: pointer to type atomic64_unchecked_t
7375+ *
7376+ * Atomically subtracts @delta from @ptr.
7377+ */
7378+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7379+
7380+/**
7381 * atomic64_sub_and_test - subtract value from variable and test result
7382 * @delta: integer value to subtract
7383 * @ptr: pointer to type atomic64_t
7384@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7385 extern void atomic64_inc(atomic64_t *ptr);
7386
7387 /**
7388+ * atomic64_inc_unchecked - increment atomic64 variable
7389+ * @ptr: pointer to type atomic64_unchecked_t
7390+ *
7391+ * Atomically increments @ptr by 1.
7392+ */
7393+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7394+
7395+/**
7396 * atomic64_dec - decrement atomic64 variable
7397 * @ptr: pointer to type atomic64_t
7398 *
7399@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7400 extern void atomic64_dec(atomic64_t *ptr);
7401
7402 /**
7403+ * atomic64_dec_unchecked - decrement atomic64 variable
7404+ * @ptr: pointer to type atomic64_unchecked_t
7405+ *
7406+ * Atomically decrements @ptr by 1.
7407+ */
7408+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7409+
7410+/**
7411 * atomic64_dec_and_test - decrement and test
7412 * @ptr: pointer to type atomic64_t
7413 *
7414diff -urNp linux-2.6.32.43/arch/x86/include/asm/atomic_64.h linux-2.6.32.43/arch/x86/include/asm/atomic_64.h
7415--- linux-2.6.32.43/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7416+++ linux-2.6.32.43/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7417@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7418 }
7419
7420 /**
7421+ * atomic_read_unchecked - read atomic variable
7422+ * @v: pointer of type atomic_unchecked_t
7423+ *
7424+ * Atomically reads the value of @v.
7425+ */
7426+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7427+{
7428+ return v->counter;
7429+}
7430+
7431+/**
7432 * atomic_set - set atomic variable
7433 * @v: pointer of type atomic_t
7434 * @i: required value
7435@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7436 }
7437
7438 /**
7439+ * atomic_set_unchecked - set atomic variable
7440+ * @v: pointer of type atomic_unchecked_t
7441+ * @i: required value
7442+ *
7443+ * Atomically sets the value of @v to @i.
7444+ */
7445+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7446+{
7447+ v->counter = i;
7448+}
7449+
7450+/**
7451 * atomic_add - add integer to atomic variable
7452 * @i: integer value to add
7453 * @v: pointer of type atomic_t
7454@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7455 */
7456 static inline void atomic_add(int i, atomic_t *v)
7457 {
7458- asm volatile(LOCK_PREFIX "addl %1,%0"
7459+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7460+
7461+#ifdef CONFIG_PAX_REFCOUNT
7462+ "jno 0f\n"
7463+ LOCK_PREFIX "subl %1,%0\n"
7464+ "int $4\n0:\n"
7465+ _ASM_EXTABLE(0b, 0b)
7466+#endif
7467+
7468+ : "=m" (v->counter)
7469+ : "ir" (i), "m" (v->counter));
7470+}
7471+
7472+/**
7473+ * atomic_add_unchecked - add integer to atomic variable
7474+ * @i: integer value to add
7475+ * @v: pointer of type atomic_unchecked_t
7476+ *
7477+ * Atomically adds @i to @v.
7478+ */
7479+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7480+{
7481+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7482 : "=m" (v->counter)
7483 : "ir" (i), "m" (v->counter));
7484 }
7485@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7486 */
7487 static inline void atomic_sub(int i, atomic_t *v)
7488 {
7489- asm volatile(LOCK_PREFIX "subl %1,%0"
7490+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7491+
7492+#ifdef CONFIG_PAX_REFCOUNT
7493+ "jno 0f\n"
7494+ LOCK_PREFIX "addl %1,%0\n"
7495+ "int $4\n0:\n"
7496+ _ASM_EXTABLE(0b, 0b)
7497+#endif
7498+
7499+ : "=m" (v->counter)
7500+ : "ir" (i), "m" (v->counter));
7501+}
7502+
7503+/**
7504+ * atomic_sub_unchecked - subtract the atomic variable
7505+ * @i: integer value to subtract
7506+ * @v: pointer of type atomic_unchecked_t
7507+ *
7508+ * Atomically subtracts @i from @v.
7509+ */
7510+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7511+{
7512+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7513 : "=m" (v->counter)
7514 : "ir" (i), "m" (v->counter));
7515 }
7516@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7517 {
7518 unsigned char c;
7519
7520- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7521+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7522+
7523+#ifdef CONFIG_PAX_REFCOUNT
7524+ "jno 0f\n"
7525+ LOCK_PREFIX "addl %2,%0\n"
7526+ "int $4\n0:\n"
7527+ _ASM_EXTABLE(0b, 0b)
7528+#endif
7529+
7530+ "sete %1\n"
7531 : "=m" (v->counter), "=qm" (c)
7532 : "ir" (i), "m" (v->counter) : "memory");
7533 return c;
7534@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7535 */
7536 static inline void atomic_inc(atomic_t *v)
7537 {
7538- asm volatile(LOCK_PREFIX "incl %0"
7539+ asm volatile(LOCK_PREFIX "incl %0\n"
7540+
7541+#ifdef CONFIG_PAX_REFCOUNT
7542+ "jno 0f\n"
7543+ LOCK_PREFIX "decl %0\n"
7544+ "int $4\n0:\n"
7545+ _ASM_EXTABLE(0b, 0b)
7546+#endif
7547+
7548+ : "=m" (v->counter)
7549+ : "m" (v->counter));
7550+}
7551+
7552+/**
7553+ * atomic_inc_unchecked - increment atomic variable
7554+ * @v: pointer of type atomic_unchecked_t
7555+ *
7556+ * Atomically increments @v by 1.
7557+ */
7558+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7559+{
7560+ asm volatile(LOCK_PREFIX "incl %0\n"
7561 : "=m" (v->counter)
7562 : "m" (v->counter));
7563 }
7564@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7565 */
7566 static inline void atomic_dec(atomic_t *v)
7567 {
7568- asm volatile(LOCK_PREFIX "decl %0"
7569+ asm volatile(LOCK_PREFIX "decl %0\n"
7570+
7571+#ifdef CONFIG_PAX_REFCOUNT
7572+ "jno 0f\n"
7573+ LOCK_PREFIX "incl %0\n"
7574+ "int $4\n0:\n"
7575+ _ASM_EXTABLE(0b, 0b)
7576+#endif
7577+
7578+ : "=m" (v->counter)
7579+ : "m" (v->counter));
7580+}
7581+
7582+/**
7583+ * atomic_dec_unchecked - decrement atomic variable
7584+ * @v: pointer of type atomic_unchecked_t
7585+ *
7586+ * Atomically decrements @v by 1.
7587+ */
7588+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7589+{
7590+ asm volatile(LOCK_PREFIX "decl %0\n"
7591 : "=m" (v->counter)
7592 : "m" (v->counter));
7593 }
7594@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7595 {
7596 unsigned char c;
7597
7598- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7599+ asm volatile(LOCK_PREFIX "decl %0\n"
7600+
7601+#ifdef CONFIG_PAX_REFCOUNT
7602+ "jno 0f\n"
7603+ LOCK_PREFIX "incl %0\n"
7604+ "int $4\n0:\n"
7605+ _ASM_EXTABLE(0b, 0b)
7606+#endif
7607+
7608+ "sete %1\n"
7609 : "=m" (v->counter), "=qm" (c)
7610 : "m" (v->counter) : "memory");
7611 return c != 0;
7612@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7613 {
7614 unsigned char c;
7615
7616- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7617+ asm volatile(LOCK_PREFIX "incl %0\n"
7618+
7619+#ifdef CONFIG_PAX_REFCOUNT
7620+ "jno 0f\n"
7621+ LOCK_PREFIX "decl %0\n"
7622+ "int $4\n0:\n"
7623+ _ASM_EXTABLE(0b, 0b)
7624+#endif
7625+
7626+ "sete %1\n"
7627+ : "=m" (v->counter), "=qm" (c)
7628+ : "m" (v->counter) : "memory");
7629+ return c != 0;
7630+}
7631+
7632+/**
7633+ * atomic_inc_and_test_unchecked - increment and test
7634+ * @v: pointer of type atomic_unchecked_t
7635+ *
7636+ * Atomically increments @v by 1
7637+ * and returns true if the result is zero, or false for all
7638+ * other cases.
7639+ */
7640+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7641+{
7642+ unsigned char c;
7643+
7644+ asm volatile(LOCK_PREFIX "incl %0\n"
7645+ "sete %1\n"
7646 : "=m" (v->counter), "=qm" (c)
7647 : "m" (v->counter) : "memory");
7648 return c != 0;
7649@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7650 {
7651 unsigned char c;
7652
7653- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7654+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7655+
7656+#ifdef CONFIG_PAX_REFCOUNT
7657+ "jno 0f\n"
7658+ LOCK_PREFIX "subl %2,%0\n"
7659+ "int $4\n0:\n"
7660+ _ASM_EXTABLE(0b, 0b)
7661+#endif
7662+
7663+ "sets %1\n"
7664 : "=m" (v->counter), "=qm" (c)
7665 : "ir" (i), "m" (v->counter) : "memory");
7666 return c;
7667@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7668 static inline int atomic_add_return(int i, atomic_t *v)
7669 {
7670 int __i = i;
7671- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7672+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7673+
7674+#ifdef CONFIG_PAX_REFCOUNT
7675+ "jno 0f\n"
7676+ "movl %0, %1\n"
7677+ "int $4\n0:\n"
7678+ _ASM_EXTABLE(0b, 0b)
7679+#endif
7680+
7681+ : "+r" (i), "+m" (v->counter)
7682+ : : "memory");
7683+ return i + __i;
7684+}
7685+
7686+/**
7687+ * atomic_add_return_unchecked - add and return
7688+ * @i: integer value to add
7689+ * @v: pointer of type atomic_unchecked_t
7690+ *
7691+ * Atomically adds @i to @v and returns @i + @v
7692+ */
7693+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7694+{
7695+ int __i = i;
7696+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7697 : "+r" (i), "+m" (v->counter)
7698 : : "memory");
7699 return i + __i;
7700@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7701 }
7702
7703 #define atomic_inc_return(v) (atomic_add_return(1, v))
7704+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7705+{
7706+ return atomic_add_return_unchecked(1, v);
7707+}
7708 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7709
7710 /* The 64-bit atomic type */
7711@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7712 }
7713
7714 /**
7715+ * atomic64_read_unchecked - read atomic64 variable
7716+ * @v: pointer of type atomic64_unchecked_t
7717+ *
7718+ * Atomically reads the value of @v.
7719+ * Doesn't imply a read memory barrier.
7720+ */
7721+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7722+{
7723+ return v->counter;
7724+}
7725+
7726+/**
7727 * atomic64_set - set atomic64 variable
7728 * @v: pointer to type atomic64_t
7729 * @i: required value
7730@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7731 }
7732
7733 /**
7734+ * atomic64_set_unchecked - set atomic64 variable
7735+ * @v: pointer to type atomic64_unchecked_t
7736+ * @i: required value
7737+ *
7738+ * Atomically sets the value of @v to @i.
7739+ */
7740+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7741+{
7742+ v->counter = i;
7743+}
7744+
7745+/**
7746 * atomic64_add - add integer to atomic64 variable
7747 * @i: integer value to add
7748 * @v: pointer to type atomic64_t
7749@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7750 */
7751 static inline void atomic64_add(long i, atomic64_t *v)
7752 {
7753+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7754+
7755+#ifdef CONFIG_PAX_REFCOUNT
7756+ "jno 0f\n"
7757+ LOCK_PREFIX "subq %1,%0\n"
7758+ "int $4\n0:\n"
7759+ _ASM_EXTABLE(0b, 0b)
7760+#endif
7761+
7762+ : "=m" (v->counter)
7763+ : "er" (i), "m" (v->counter));
7764+}
7765+
7766+/**
7767+ * atomic64_add_unchecked - add integer to atomic64 variable
7768+ * @i: integer value to add
7769+ * @v: pointer to type atomic64_unchecked_t
7770+ *
7771+ * Atomically adds @i to @v.
7772+ */
7773+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7774+{
7775 asm volatile(LOCK_PREFIX "addq %1,%0"
7776 : "=m" (v->counter)
7777 : "er" (i), "m" (v->counter));
7778@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7779 */
7780 static inline void atomic64_sub(long i, atomic64_t *v)
7781 {
7782- asm volatile(LOCK_PREFIX "subq %1,%0"
7783+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7784+
7785+#ifdef CONFIG_PAX_REFCOUNT
7786+ "jno 0f\n"
7787+ LOCK_PREFIX "addq %1,%0\n"
7788+ "int $4\n0:\n"
7789+ _ASM_EXTABLE(0b, 0b)
7790+#endif
7791+
7792 : "=m" (v->counter)
7793 : "er" (i), "m" (v->counter));
7794 }
7795@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7796 {
7797 unsigned char c;
7798
7799- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7800+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7801+
7802+#ifdef CONFIG_PAX_REFCOUNT
7803+ "jno 0f\n"
7804+ LOCK_PREFIX "addq %2,%0\n"
7805+ "int $4\n0:\n"
7806+ _ASM_EXTABLE(0b, 0b)
7807+#endif
7808+
7809+ "sete %1\n"
7810 : "=m" (v->counter), "=qm" (c)
7811 : "er" (i), "m" (v->counter) : "memory");
7812 return c;
7813@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7814 */
7815 static inline void atomic64_inc(atomic64_t *v)
7816 {
7817+ asm volatile(LOCK_PREFIX "incq %0\n"
7818+
7819+#ifdef CONFIG_PAX_REFCOUNT
7820+ "jno 0f\n"
7821+ LOCK_PREFIX "decq %0\n"
7822+ "int $4\n0:\n"
7823+ _ASM_EXTABLE(0b, 0b)
7824+#endif
7825+
7826+ : "=m" (v->counter)
7827+ : "m" (v->counter));
7828+}
7829+
7830+/**
7831+ * atomic64_inc_unchecked - increment atomic64 variable
7832+ * @v: pointer to type atomic64_unchecked_t
7833+ *
7834+ * Atomically increments @v by 1.
7835+ */
7836+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7837+{
7838 asm volatile(LOCK_PREFIX "incq %0"
7839 : "=m" (v->counter)
7840 : "m" (v->counter));
7841@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7842 */
7843 static inline void atomic64_dec(atomic64_t *v)
7844 {
7845- asm volatile(LOCK_PREFIX "decq %0"
7846+ asm volatile(LOCK_PREFIX "decq %0\n"
7847+
7848+#ifdef CONFIG_PAX_REFCOUNT
7849+ "jno 0f\n"
7850+ LOCK_PREFIX "incq %0\n"
7851+ "int $4\n0:\n"
7852+ _ASM_EXTABLE(0b, 0b)
7853+#endif
7854+
7855+ : "=m" (v->counter)
7856+ : "m" (v->counter));
7857+}
7858+
7859+/**
7860+ * atomic64_dec_unchecked - decrement atomic64 variable
7861+ * @v: pointer to type atomic64_t
7862+ *
7863+ * Atomically decrements @v by 1.
7864+ */
7865+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7866+{
7867+ asm volatile(LOCK_PREFIX "decq %0\n"
7868 : "=m" (v->counter)
7869 : "m" (v->counter));
7870 }
7871@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7872 {
7873 unsigned char c;
7874
7875- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7876+ asm volatile(LOCK_PREFIX "decq %0\n"
7877+
7878+#ifdef CONFIG_PAX_REFCOUNT
7879+ "jno 0f\n"
7880+ LOCK_PREFIX "incq %0\n"
7881+ "int $4\n0:\n"
7882+ _ASM_EXTABLE(0b, 0b)
7883+#endif
7884+
7885+ "sete %1\n"
7886 : "=m" (v->counter), "=qm" (c)
7887 : "m" (v->counter) : "memory");
7888 return c != 0;
7889@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7890 {
7891 unsigned char c;
7892
7893- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7894+ asm volatile(LOCK_PREFIX "incq %0\n"
7895+
7896+#ifdef CONFIG_PAX_REFCOUNT
7897+ "jno 0f\n"
7898+ LOCK_PREFIX "decq %0\n"
7899+ "int $4\n0:\n"
7900+ _ASM_EXTABLE(0b, 0b)
7901+#endif
7902+
7903+ "sete %1\n"
7904 : "=m" (v->counter), "=qm" (c)
7905 : "m" (v->counter) : "memory");
7906 return c != 0;
7907@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7908 {
7909 unsigned char c;
7910
7911- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7912+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7913+
7914+#ifdef CONFIG_PAX_REFCOUNT
7915+ "jno 0f\n"
7916+ LOCK_PREFIX "subq %2,%0\n"
7917+ "int $4\n0:\n"
7918+ _ASM_EXTABLE(0b, 0b)
7919+#endif
7920+
7921+ "sets %1\n"
7922 : "=m" (v->counter), "=qm" (c)
7923 : "er" (i), "m" (v->counter) : "memory");
7924 return c;
7925@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7926 static inline long atomic64_add_return(long i, atomic64_t *v)
7927 {
7928 long __i = i;
7929- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7930+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7931+
7932+#ifdef CONFIG_PAX_REFCOUNT
7933+ "jno 0f\n"
7934+ "movq %0, %1\n"
7935+ "int $4\n0:\n"
7936+ _ASM_EXTABLE(0b, 0b)
7937+#endif
7938+
7939+ : "+r" (i), "+m" (v->counter)
7940+ : : "memory");
7941+ return i + __i;
7942+}
7943+
7944+/**
7945+ * atomic64_add_return_unchecked - add and return
7946+ * @i: integer value to add
7947+ * @v: pointer to type atomic64_unchecked_t
7948+ *
7949+ * Atomically adds @i to @v and returns @i + @v
7950+ */
7951+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7952+{
7953+ long __i = i;
7954+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
7955 : "+r" (i), "+m" (v->counter)
7956 : : "memory");
7957 return i + __i;
7958@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7959 }
7960
7961 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7962+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7963+{
7964+ return atomic64_add_return_unchecked(1, v);
7965+}
7966 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7967
7968 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7969@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7970 return cmpxchg(&v->counter, old, new);
7971 }
7972
7973+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
7974+{
7975+ return cmpxchg(&v->counter, old, new);
7976+}
7977+
7978 static inline long atomic64_xchg(atomic64_t *v, long new)
7979 {
7980 return xchg(&v->counter, new);
7981 }
7982
7983+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7984+{
7985+ return xchg(&v->counter, new);
7986+}
7987+
7988 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
7989 {
7990 return cmpxchg(&v->counter, old, new);
7991 }
7992
7993+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7994+{
7995+ return cmpxchg(&v->counter, old, new);
7996+}
7997+
7998 static inline long atomic_xchg(atomic_t *v, int new)
7999 {
8000 return xchg(&v->counter, new);
8001 }
8002
8003+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8004+{
8005+ return xchg(&v->counter, new);
8006+}
8007+
8008 /**
8009 * atomic_add_unless - add unless the number is a given value
8010 * @v: pointer of type atomic_t
8011@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8012 */
8013 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8014 {
8015- int c, old;
8016+ int c, old, new;
8017 c = atomic_read(v);
8018 for (;;) {
8019- if (unlikely(c == (u)))
8020+ if (unlikely(c == u))
8021 break;
8022- old = atomic_cmpxchg((v), c, c + (a));
8023+
8024+ asm volatile("addl %2,%0\n"
8025+
8026+#ifdef CONFIG_PAX_REFCOUNT
8027+ "jno 0f\n"
8028+ "subl %2,%0\n"
8029+ "int $4\n0:\n"
8030+ _ASM_EXTABLE(0b, 0b)
8031+#endif
8032+
8033+ : "=r" (new)
8034+ : "0" (c), "ir" (a));
8035+
8036+ old = atomic_cmpxchg(v, c, new);
8037 if (likely(old == c))
8038 break;
8039 c = old;
8040 }
8041- return c != (u);
8042+ return c != u;
8043 }
8044
8045 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8046@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8047 */
8048 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8049 {
8050- long c, old;
8051+ long c, old, new;
8052 c = atomic64_read(v);
8053 for (;;) {
8054- if (unlikely(c == (u)))
8055+ if (unlikely(c == u))
8056 break;
8057- old = atomic64_cmpxchg((v), c, c + (a));
8058+
8059+ asm volatile("addq %2,%0\n"
8060+
8061+#ifdef CONFIG_PAX_REFCOUNT
8062+ "jno 0f\n"
8063+ "subq %2,%0\n"
8064+ "int $4\n0:\n"
8065+ _ASM_EXTABLE(0b, 0b)
8066+#endif
8067+
8068+ : "=r" (new)
8069+ : "0" (c), "er" (a));
8070+
8071+ old = atomic64_cmpxchg(v, c, new);
8072 if (likely(old == c))
8073 break;
8074 c = old;
8075 }
8076- return c != (u);
8077+ return c != u;
8078 }
8079
8080 /**
8081diff -urNp linux-2.6.32.43/arch/x86/include/asm/bitops.h linux-2.6.32.43/arch/x86/include/asm/bitops.h
8082--- linux-2.6.32.43/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8083+++ linux-2.6.32.43/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8084@@ -38,7 +38,7 @@
8085 * a mask operation on a byte.
8086 */
8087 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8088-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8089+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8090 #define CONST_MASK(nr) (1 << ((nr) & 7))
8091
8092 /**
8093diff -urNp linux-2.6.32.43/arch/x86/include/asm/boot.h linux-2.6.32.43/arch/x86/include/asm/boot.h
8094--- linux-2.6.32.43/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8095+++ linux-2.6.32.43/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8096@@ -11,10 +11,15 @@
8097 #include <asm/pgtable_types.h>
8098
8099 /* Physical address where kernel should be loaded. */
8100-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8101+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8102 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8103 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8104
8105+#ifndef __ASSEMBLY__
8106+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8107+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8108+#endif
8109+
8110 /* Minimum kernel alignment, as a power of two */
8111 #ifdef CONFIG_X86_64
8112 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8113diff -urNp linux-2.6.32.43/arch/x86/include/asm/cacheflush.h linux-2.6.32.43/arch/x86/include/asm/cacheflush.h
8114--- linux-2.6.32.43/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8115+++ linux-2.6.32.43/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8116@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8117 static inline unsigned long get_page_memtype(struct page *pg)
8118 {
8119 if (!PageUncached(pg) && !PageWC(pg))
8120- return -1;
8121+ return ~0UL;
8122 else if (!PageUncached(pg) && PageWC(pg))
8123 return _PAGE_CACHE_WC;
8124 else if (PageUncached(pg) && !PageWC(pg))
8125@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8126 SetPageWC(pg);
8127 break;
8128 default:
8129- case -1:
8130+ case ~0UL:
8131 ClearPageUncached(pg);
8132 ClearPageWC(pg);
8133 break;
8134diff -urNp linux-2.6.32.43/arch/x86/include/asm/cache.h linux-2.6.32.43/arch/x86/include/asm/cache.h
8135--- linux-2.6.32.43/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8136+++ linux-2.6.32.43/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8137@@ -5,9 +5,10 @@
8138
8139 /* L1 cache line size */
8140 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8141-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8142+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8143
8144 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8145+#define __read_only __attribute__((__section__(".data.read_only")))
8146
8147 #ifdef CONFIG_X86_VSMP
8148 /* vSMP Internode cacheline shift */
8149diff -urNp linux-2.6.32.43/arch/x86/include/asm/checksum_32.h linux-2.6.32.43/arch/x86/include/asm/checksum_32.h
8150--- linux-2.6.32.43/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8151+++ linux-2.6.32.43/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8152@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8153 int len, __wsum sum,
8154 int *src_err_ptr, int *dst_err_ptr);
8155
8156+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8157+ int len, __wsum sum,
8158+ int *src_err_ptr, int *dst_err_ptr);
8159+
8160+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8161+ int len, __wsum sum,
8162+ int *src_err_ptr, int *dst_err_ptr);
8163+
8164 /*
8165 * Note: when you get a NULL pointer exception here this means someone
8166 * passed in an incorrect kernel address to one of these functions.
8167@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8168 int *err_ptr)
8169 {
8170 might_sleep();
8171- return csum_partial_copy_generic((__force void *)src, dst,
8172+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8173 len, sum, err_ptr, NULL);
8174 }
8175
8176@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8177 {
8178 might_sleep();
8179 if (access_ok(VERIFY_WRITE, dst, len))
8180- return csum_partial_copy_generic(src, (__force void *)dst,
8181+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8182 len, sum, NULL, err_ptr);
8183
8184 if (len)
8185diff -urNp linux-2.6.32.43/arch/x86/include/asm/desc_defs.h linux-2.6.32.43/arch/x86/include/asm/desc_defs.h
8186--- linux-2.6.32.43/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8187+++ linux-2.6.32.43/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8188@@ -31,6 +31,12 @@ struct desc_struct {
8189 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8190 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8191 };
8192+ struct {
8193+ u16 offset_low;
8194+ u16 seg;
8195+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8196+ unsigned offset_high: 16;
8197+ } gate;
8198 };
8199 } __attribute__((packed));
8200
8201diff -urNp linux-2.6.32.43/arch/x86/include/asm/desc.h linux-2.6.32.43/arch/x86/include/asm/desc.h
8202--- linux-2.6.32.43/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8203+++ linux-2.6.32.43/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8204@@ -4,6 +4,7 @@
8205 #include <asm/desc_defs.h>
8206 #include <asm/ldt.h>
8207 #include <asm/mmu.h>
8208+#include <asm/pgtable.h>
8209 #include <linux/smp.h>
8210
8211 static inline void fill_ldt(struct desc_struct *desc,
8212@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8213 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8214 desc->type = (info->read_exec_only ^ 1) << 1;
8215 desc->type |= info->contents << 2;
8216+ desc->type |= info->seg_not_present ^ 1;
8217 desc->s = 1;
8218 desc->dpl = 0x3;
8219 desc->p = info->seg_not_present ^ 1;
8220@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8221 }
8222
8223 extern struct desc_ptr idt_descr;
8224-extern gate_desc idt_table[];
8225-
8226-struct gdt_page {
8227- struct desc_struct gdt[GDT_ENTRIES];
8228-} __attribute__((aligned(PAGE_SIZE)));
8229-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8230+extern gate_desc idt_table[256];
8231
8232+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8233 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8234 {
8235- return per_cpu(gdt_page, cpu).gdt;
8236+ return cpu_gdt_table[cpu];
8237 }
8238
8239 #ifdef CONFIG_X86_64
8240@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8241 unsigned long base, unsigned dpl, unsigned flags,
8242 unsigned short seg)
8243 {
8244- gate->a = (seg << 16) | (base & 0xffff);
8245- gate->b = (base & 0xffff0000) |
8246- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8247+ gate->gate.offset_low = base;
8248+ gate->gate.seg = seg;
8249+ gate->gate.reserved = 0;
8250+ gate->gate.type = type;
8251+ gate->gate.s = 0;
8252+ gate->gate.dpl = dpl;
8253+ gate->gate.p = 1;
8254+ gate->gate.offset_high = base >> 16;
8255 }
8256
8257 #endif
8258@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8259 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8260 const gate_desc *gate)
8261 {
8262+ pax_open_kernel();
8263 memcpy(&idt[entry], gate, sizeof(*gate));
8264+ pax_close_kernel();
8265 }
8266
8267 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8268 const void *desc)
8269 {
8270+ pax_open_kernel();
8271 memcpy(&ldt[entry], desc, 8);
8272+ pax_close_kernel();
8273 }
8274
8275 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8276@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8277 size = sizeof(struct desc_struct);
8278 break;
8279 }
8280+
8281+ pax_open_kernel();
8282 memcpy(&gdt[entry], desc, size);
8283+ pax_close_kernel();
8284 }
8285
8286 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8287@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8288
8289 static inline void native_load_tr_desc(void)
8290 {
8291+ pax_open_kernel();
8292 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8293+ pax_close_kernel();
8294 }
8295
8296 static inline void native_load_gdt(const struct desc_ptr *dtr)
8297@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8298 unsigned int i;
8299 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8300
8301+ pax_open_kernel();
8302 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8303 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8304+ pax_close_kernel();
8305 }
8306
8307 #define _LDT_empty(info) \
8308@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8309 desc->limit = (limit >> 16) & 0xf;
8310 }
8311
8312-static inline void _set_gate(int gate, unsigned type, void *addr,
8313+static inline void _set_gate(int gate, unsigned type, const void *addr,
8314 unsigned dpl, unsigned ist, unsigned seg)
8315 {
8316 gate_desc s;
8317@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8318 * Pentium F0 0F bugfix can have resulted in the mapped
8319 * IDT being write-protected.
8320 */
8321-static inline void set_intr_gate(unsigned int n, void *addr)
8322+static inline void set_intr_gate(unsigned int n, const void *addr)
8323 {
8324 BUG_ON((unsigned)n > 0xFF);
8325 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8326@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8327 /*
8328 * This routine sets up an interrupt gate at directory privilege level 3.
8329 */
8330-static inline void set_system_intr_gate(unsigned int n, void *addr)
8331+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8332 {
8333 BUG_ON((unsigned)n > 0xFF);
8334 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8335 }
8336
8337-static inline void set_system_trap_gate(unsigned int n, void *addr)
8338+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8339 {
8340 BUG_ON((unsigned)n > 0xFF);
8341 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8342 }
8343
8344-static inline void set_trap_gate(unsigned int n, void *addr)
8345+static inline void set_trap_gate(unsigned int n, const void *addr)
8346 {
8347 BUG_ON((unsigned)n > 0xFF);
8348 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8349@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8350 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8351 {
8352 BUG_ON((unsigned)n > 0xFF);
8353- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8354+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8355 }
8356
8357-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8358+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8359 {
8360 BUG_ON((unsigned)n > 0xFF);
8361 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8362 }
8363
8364-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8365+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8366 {
8367 BUG_ON((unsigned)n > 0xFF);
8368 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8369 }
8370
8371+#ifdef CONFIG_X86_32
8372+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8373+{
8374+ struct desc_struct d;
8375+
8376+ if (likely(limit))
8377+ limit = (limit - 1UL) >> PAGE_SHIFT;
8378+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8379+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8380+}
8381+#endif
8382+
8383 #endif /* _ASM_X86_DESC_H */
8384diff -urNp linux-2.6.32.43/arch/x86/include/asm/device.h linux-2.6.32.43/arch/x86/include/asm/device.h
8385--- linux-2.6.32.43/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8386+++ linux-2.6.32.43/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8387@@ -6,7 +6,7 @@ struct dev_archdata {
8388 void *acpi_handle;
8389 #endif
8390 #ifdef CONFIG_X86_64
8391-struct dma_map_ops *dma_ops;
8392+ const struct dma_map_ops *dma_ops;
8393 #endif
8394 #ifdef CONFIG_DMAR
8395 void *iommu; /* hook for IOMMU specific extension */
8396diff -urNp linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h
8397--- linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8398+++ linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8399@@ -25,9 +25,9 @@ extern int iommu_merge;
8400 extern struct device x86_dma_fallback_dev;
8401 extern int panic_on_overflow;
8402
8403-extern struct dma_map_ops *dma_ops;
8404+extern const struct dma_map_ops *dma_ops;
8405
8406-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8407+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8408 {
8409 #ifdef CONFIG_X86_32
8410 return dma_ops;
8411@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8412 /* Make sure we keep the same behaviour */
8413 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8414 {
8415- struct dma_map_ops *ops = get_dma_ops(dev);
8416+ const struct dma_map_ops *ops = get_dma_ops(dev);
8417 if (ops->mapping_error)
8418 return ops->mapping_error(dev, dma_addr);
8419
8420@@ -122,7 +122,7 @@ static inline void *
8421 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8422 gfp_t gfp)
8423 {
8424- struct dma_map_ops *ops = get_dma_ops(dev);
8425+ const struct dma_map_ops *ops = get_dma_ops(dev);
8426 void *memory;
8427
8428 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8429@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8430 static inline void dma_free_coherent(struct device *dev, size_t size,
8431 void *vaddr, dma_addr_t bus)
8432 {
8433- struct dma_map_ops *ops = get_dma_ops(dev);
8434+ const struct dma_map_ops *ops = get_dma_ops(dev);
8435
8436 WARN_ON(irqs_disabled()); /* for portability */
8437
8438diff -urNp linux-2.6.32.43/arch/x86/include/asm/e820.h linux-2.6.32.43/arch/x86/include/asm/e820.h
8439--- linux-2.6.32.43/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8440+++ linux-2.6.32.43/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8441@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8442 #define ISA_END_ADDRESS 0x100000
8443 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8444
8445-#define BIOS_BEGIN 0x000a0000
8446+#define BIOS_BEGIN 0x000c0000
8447 #define BIOS_END 0x00100000
8448
8449 #ifdef __KERNEL__
8450diff -urNp linux-2.6.32.43/arch/x86/include/asm/elf.h linux-2.6.32.43/arch/x86/include/asm/elf.h
8451--- linux-2.6.32.43/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8452+++ linux-2.6.32.43/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8453@@ -257,7 +257,25 @@ extern int force_personality32;
8454 the loader. We need to make sure that it is out of the way of the program
8455 that it will "exec", and that there is sufficient room for the brk. */
8456
8457+#ifdef CONFIG_PAX_SEGMEXEC
8458+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8459+#else
8460 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8461+#endif
8462+
8463+#ifdef CONFIG_PAX_ASLR
8464+#ifdef CONFIG_X86_32
8465+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8466+
8467+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8468+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8469+#else
8470+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8471+
8472+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8473+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8474+#endif
8475+#endif
8476
8477 /* This yields a mask that user programs can use to figure out what
8478 instruction set this CPU supports. This could be done in user space,
8479@@ -311,8 +329,7 @@ do { \
8480 #define ARCH_DLINFO \
8481 do { \
8482 if (vdso_enabled) \
8483- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8484- (unsigned long)current->mm->context.vdso); \
8485+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8486 } while (0)
8487
8488 #define AT_SYSINFO 32
8489@@ -323,7 +340,7 @@ do { \
8490
8491 #endif /* !CONFIG_X86_32 */
8492
8493-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8494+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8495
8496 #define VDSO_ENTRY \
8497 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8498@@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8499 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8500 #define compat_arch_setup_additional_pages syscall32_setup_pages
8501
8502-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8503-#define arch_randomize_brk arch_randomize_brk
8504-
8505 #endif /* _ASM_X86_ELF_H */
8506diff -urNp linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h
8507--- linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8508+++ linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8509@@ -15,6 +15,6 @@ enum reboot_type {
8510
8511 extern enum reboot_type reboot_type;
8512
8513-extern void machine_emergency_restart(void);
8514+extern void machine_emergency_restart(void) __noreturn;
8515
8516 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8517diff -urNp linux-2.6.32.43/arch/x86/include/asm/futex.h linux-2.6.32.43/arch/x86/include/asm/futex.h
8518--- linux-2.6.32.43/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8519+++ linux-2.6.32.43/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8520@@ -12,16 +12,18 @@
8521 #include <asm/system.h>
8522
8523 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8524+ typecheck(u32 *, uaddr); \
8525 asm volatile("1:\t" insn "\n" \
8526 "2:\t.section .fixup,\"ax\"\n" \
8527 "3:\tmov\t%3, %1\n" \
8528 "\tjmp\t2b\n" \
8529 "\t.previous\n" \
8530 _ASM_EXTABLE(1b, 3b) \
8531- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8532+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8533 : "i" (-EFAULT), "0" (oparg), "1" (0))
8534
8535 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8536+ typecheck(u32 *, uaddr); \
8537 asm volatile("1:\tmovl %2, %0\n" \
8538 "\tmovl\t%0, %3\n" \
8539 "\t" insn "\n" \
8540@@ -34,10 +36,10 @@
8541 _ASM_EXTABLE(1b, 4b) \
8542 _ASM_EXTABLE(2b, 4b) \
8543 : "=&a" (oldval), "=&r" (ret), \
8544- "+m" (*uaddr), "=&r" (tem) \
8545+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8546 : "r" (oparg), "i" (-EFAULT), "1" (0))
8547
8548-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8549+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8550 {
8551 int op = (encoded_op >> 28) & 7;
8552 int cmp = (encoded_op >> 24) & 15;
8553@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8554
8555 switch (op) {
8556 case FUTEX_OP_SET:
8557- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8558+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8559 break;
8560 case FUTEX_OP_ADD:
8561- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8562+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8563 uaddr, oparg);
8564 break;
8565 case FUTEX_OP_OR:
8566@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8567 return ret;
8568 }
8569
8570-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8571+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8572 int newval)
8573 {
8574
8575@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8576 return -ENOSYS;
8577 #endif
8578
8579- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8580+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8581 return -EFAULT;
8582
8583- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8584+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8585 "2:\t.section .fixup, \"ax\"\n"
8586 "3:\tmov %2, %0\n"
8587 "\tjmp 2b\n"
8588 "\t.previous\n"
8589 _ASM_EXTABLE(1b, 3b)
8590- : "=a" (oldval), "+m" (*uaddr)
8591+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8592 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8593 : "memory"
8594 );
8595diff -urNp linux-2.6.32.43/arch/x86/include/asm/hw_irq.h linux-2.6.32.43/arch/x86/include/asm/hw_irq.h
8596--- linux-2.6.32.43/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8597+++ linux-2.6.32.43/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8598@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8599 extern void enable_IO_APIC(void);
8600
8601 /* Statistics */
8602-extern atomic_t irq_err_count;
8603-extern atomic_t irq_mis_count;
8604+extern atomic_unchecked_t irq_err_count;
8605+extern atomic_unchecked_t irq_mis_count;
8606
8607 /* EISA */
8608 extern void eisa_set_level_irq(unsigned int irq);
8609diff -urNp linux-2.6.32.43/arch/x86/include/asm/i387.h linux-2.6.32.43/arch/x86/include/asm/i387.h
8610--- linux-2.6.32.43/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8611+++ linux-2.6.32.43/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8612@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8613 {
8614 int err;
8615
8616+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8617+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8618+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8619+#endif
8620+
8621 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8622 "2:\n"
8623 ".section .fixup,\"ax\"\n"
8624@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8625 {
8626 int err;
8627
8628+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8629+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8630+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8631+#endif
8632+
8633 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8634 "2:\n"
8635 ".section .fixup,\"ax\"\n"
8636@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8637 }
8638
8639 /* We need a safe address that is cheap to find and that is already
8640- in L1 during context switch. The best choices are unfortunately
8641- different for UP and SMP */
8642-#ifdef CONFIG_SMP
8643-#define safe_address (__per_cpu_offset[0])
8644-#else
8645-#define safe_address (kstat_cpu(0).cpustat.user)
8646-#endif
8647+ in L1 during context switch. */
8648+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8649
8650 /*
8651 * These must be called with preempt disabled
8652@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8653 struct thread_info *me = current_thread_info();
8654 preempt_disable();
8655 if (me->status & TS_USEDFPU)
8656- __save_init_fpu(me->task);
8657+ __save_init_fpu(current);
8658 else
8659 clts();
8660 }
8661diff -urNp linux-2.6.32.43/arch/x86/include/asm/io_32.h linux-2.6.32.43/arch/x86/include/asm/io_32.h
8662--- linux-2.6.32.43/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8663+++ linux-2.6.32.43/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8664@@ -3,6 +3,7 @@
8665
8666 #include <linux/string.h>
8667 #include <linux/compiler.h>
8668+#include <asm/processor.h>
8669
8670 /*
8671 * This file contains the definitions for the x86 IO instructions
8672@@ -42,6 +43,17 @@
8673
8674 #ifdef __KERNEL__
8675
8676+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8677+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8678+{
8679+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8680+}
8681+
8682+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8683+{
8684+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8685+}
8686+
8687 #include <asm-generic/iomap.h>
8688
8689 #include <linux/vmalloc.h>
8690diff -urNp linux-2.6.32.43/arch/x86/include/asm/io_64.h linux-2.6.32.43/arch/x86/include/asm/io_64.h
8691--- linux-2.6.32.43/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8692+++ linux-2.6.32.43/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8693@@ -140,6 +140,17 @@ __OUTS(l)
8694
8695 #include <linux/vmalloc.h>
8696
8697+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8698+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8699+{
8700+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8701+}
8702+
8703+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8704+{
8705+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8706+}
8707+
8708 #include <asm-generic/iomap.h>
8709
8710 void __memcpy_fromio(void *, unsigned long, unsigned);
8711diff -urNp linux-2.6.32.43/arch/x86/include/asm/iommu.h linux-2.6.32.43/arch/x86/include/asm/iommu.h
8712--- linux-2.6.32.43/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8713+++ linux-2.6.32.43/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8714@@ -3,7 +3,7 @@
8715
8716 extern void pci_iommu_shutdown(void);
8717 extern void no_iommu_init(void);
8718-extern struct dma_map_ops nommu_dma_ops;
8719+extern const struct dma_map_ops nommu_dma_ops;
8720 extern int force_iommu, no_iommu;
8721 extern int iommu_detected;
8722 extern int iommu_pass_through;
8723diff -urNp linux-2.6.32.43/arch/x86/include/asm/irqflags.h linux-2.6.32.43/arch/x86/include/asm/irqflags.h
8724--- linux-2.6.32.43/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8725+++ linux-2.6.32.43/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8726@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8727 sti; \
8728 sysexit
8729
8730+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8731+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8732+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8733+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8734+
8735 #else
8736 #define INTERRUPT_RETURN iret
8737 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8738diff -urNp linux-2.6.32.43/arch/x86/include/asm/kprobes.h linux-2.6.32.43/arch/x86/include/asm/kprobes.h
8739--- linux-2.6.32.43/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8740+++ linux-2.6.32.43/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8741@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8742 #define BREAKPOINT_INSTRUCTION 0xcc
8743 #define RELATIVEJUMP_INSTRUCTION 0xe9
8744 #define MAX_INSN_SIZE 16
8745-#define MAX_STACK_SIZE 64
8746-#define MIN_STACK_SIZE(ADDR) \
8747- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8748- THREAD_SIZE - (unsigned long)(ADDR))) \
8749- ? (MAX_STACK_SIZE) \
8750- : (((unsigned long)current_thread_info()) + \
8751- THREAD_SIZE - (unsigned long)(ADDR)))
8752+#define MAX_STACK_SIZE 64UL
8753+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8754
8755 #define flush_insn_slot(p) do { } while (0)
8756
8757diff -urNp linux-2.6.32.43/arch/x86/include/asm/kvm_host.h linux-2.6.32.43/arch/x86/include/asm/kvm_host.h
8758--- linux-2.6.32.43/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8759+++ linux-2.6.32.43/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8760@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8761 const struct trace_print_flags *exit_reasons_str;
8762 };
8763
8764-extern struct kvm_x86_ops *kvm_x86_ops;
8765+extern const struct kvm_x86_ops *kvm_x86_ops;
8766
8767 int kvm_mmu_module_init(void);
8768 void kvm_mmu_module_exit(void);
8769diff -urNp linux-2.6.32.43/arch/x86/include/asm/local.h linux-2.6.32.43/arch/x86/include/asm/local.h
8770--- linux-2.6.32.43/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8771+++ linux-2.6.32.43/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8772@@ -18,26 +18,58 @@ typedef struct {
8773
8774 static inline void local_inc(local_t *l)
8775 {
8776- asm volatile(_ASM_INC "%0"
8777+ asm volatile(_ASM_INC "%0\n"
8778+
8779+#ifdef CONFIG_PAX_REFCOUNT
8780+ "jno 0f\n"
8781+ _ASM_DEC "%0\n"
8782+ "int $4\n0:\n"
8783+ _ASM_EXTABLE(0b, 0b)
8784+#endif
8785+
8786 : "+m" (l->a.counter));
8787 }
8788
8789 static inline void local_dec(local_t *l)
8790 {
8791- asm volatile(_ASM_DEC "%0"
8792+ asm volatile(_ASM_DEC "%0\n"
8793+
8794+#ifdef CONFIG_PAX_REFCOUNT
8795+ "jno 0f\n"
8796+ _ASM_INC "%0\n"
8797+ "int $4\n0:\n"
8798+ _ASM_EXTABLE(0b, 0b)
8799+#endif
8800+
8801 : "+m" (l->a.counter));
8802 }
8803
8804 static inline void local_add(long i, local_t *l)
8805 {
8806- asm volatile(_ASM_ADD "%1,%0"
8807+ asm volatile(_ASM_ADD "%1,%0\n"
8808+
8809+#ifdef CONFIG_PAX_REFCOUNT
8810+ "jno 0f\n"
8811+ _ASM_SUB "%1,%0\n"
8812+ "int $4\n0:\n"
8813+ _ASM_EXTABLE(0b, 0b)
8814+#endif
8815+
8816 : "+m" (l->a.counter)
8817 : "ir" (i));
8818 }
8819
8820 static inline void local_sub(long i, local_t *l)
8821 {
8822- asm volatile(_ASM_SUB "%1,%0"
8823+ asm volatile(_ASM_SUB "%1,%0\n"
8824+
8825+#ifdef CONFIG_PAX_REFCOUNT
8826+ "jno 0f\n"
8827+ _ASM_ADD "%1,%0\n"
8828+ "int $4\n0:\n"
8829+ _ASM_EXTABLE(0b, 0b)
8830+#endif
8831+
8832 : "+m" (l->a.counter)
8833 : "ir" (i));
8834 }
8835@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8836 {
8837 unsigned char c;
8838
8839- asm volatile(_ASM_SUB "%2,%0; sete %1"
8840+ asm volatile(_ASM_SUB "%2,%0\n"
8841+
8842+#ifdef CONFIG_PAX_REFCOUNT
8843+ "jno 0f\n"
8844+ _ASM_ADD "%2,%0\n"
8845+ "int $4\n0:\n"
8846+ _ASM_EXTABLE(0b, 0b)
8847+#endif
8848+
8849+ "sete %1\n"
8850 : "+m" (l->a.counter), "=qm" (c)
8851 : "ir" (i) : "memory");
8852 return c;
8853@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8854 {
8855 unsigned char c;
8856
8857- asm volatile(_ASM_DEC "%0; sete %1"
8858+ asm volatile(_ASM_DEC "%0\n"
8859+
8860+#ifdef CONFIG_PAX_REFCOUNT
8861+ "jno 0f\n"
8862+ _ASM_INC "%0\n"
8863+ "int $4\n0:\n"
8864+ _ASM_EXTABLE(0b, 0b)
8865+#endif
8866+
8867+ "sete %1\n"
8868 : "+m" (l->a.counter), "=qm" (c)
8869 : : "memory");
8870 return c != 0;
8871@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8872 {
8873 unsigned char c;
8874
8875- asm volatile(_ASM_INC "%0; sete %1"
8876+ asm volatile(_ASM_INC "%0\n"
8877+
8878+#ifdef CONFIG_PAX_REFCOUNT
8879+ "jno 0f\n"
8880+ _ASM_DEC "%0\n"
8881+ "int $4\n0:\n"
8882+ _ASM_EXTABLE(0b, 0b)
8883+#endif
8884+
8885+ "sete %1\n"
8886 : "+m" (l->a.counter), "=qm" (c)
8887 : : "memory");
8888 return c != 0;
8889@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8890 {
8891 unsigned char c;
8892
8893- asm volatile(_ASM_ADD "%2,%0; sets %1"
8894+ asm volatile(_ASM_ADD "%2,%0\n"
8895+
8896+#ifdef CONFIG_PAX_REFCOUNT
8897+ "jno 0f\n"
8898+ _ASM_SUB "%2,%0\n"
8899+ "int $4\n0:\n"
8900+ _ASM_EXTABLE(0b, 0b)
8901+#endif
8902+
8903+ "sets %1\n"
8904 : "+m" (l->a.counter), "=qm" (c)
8905 : "ir" (i) : "memory");
8906 return c;
8907@@ -133,7 +201,15 @@ static inline long local_add_return(long
8908 #endif
8909 /* Modern 486+ processor */
8910 __i = i;
8911- asm volatile(_ASM_XADD "%0, %1;"
8912+ asm volatile(_ASM_XADD "%0, %1\n"
8913+
8914+#ifdef CONFIG_PAX_REFCOUNT
8915+ "jno 0f\n"
8916+ _ASM_MOV "%0,%1\n"
8917+ "int $4\n0:\n"
8918+ _ASM_EXTABLE(0b, 0b)
8919+#endif
8920+
8921 : "+r" (i), "+m" (l->a.counter)
8922 : : "memory");
8923 return i + __i;
8924diff -urNp linux-2.6.32.43/arch/x86/include/asm/microcode.h linux-2.6.32.43/arch/x86/include/asm/microcode.h
8925--- linux-2.6.32.43/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8926+++ linux-2.6.32.43/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8927@@ -12,13 +12,13 @@ struct device;
8928 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8929
8930 struct microcode_ops {
8931- enum ucode_state (*request_microcode_user) (int cpu,
8932+ enum ucode_state (* const request_microcode_user) (int cpu,
8933 const void __user *buf, size_t size);
8934
8935- enum ucode_state (*request_microcode_fw) (int cpu,
8936+ enum ucode_state (* const request_microcode_fw) (int cpu,
8937 struct device *device);
8938
8939- void (*microcode_fini_cpu) (int cpu);
8940+ void (* const microcode_fini_cpu) (int cpu);
8941
8942 /*
8943 * The generic 'microcode_core' part guarantees that
8944@@ -38,18 +38,18 @@ struct ucode_cpu_info {
8945 extern struct ucode_cpu_info ucode_cpu_info[];
8946
8947 #ifdef CONFIG_MICROCODE_INTEL
8948-extern struct microcode_ops * __init init_intel_microcode(void);
8949+extern const struct microcode_ops * __init init_intel_microcode(void);
8950 #else
8951-static inline struct microcode_ops * __init init_intel_microcode(void)
8952+static inline const struct microcode_ops * __init init_intel_microcode(void)
8953 {
8954 return NULL;
8955 }
8956 #endif /* CONFIG_MICROCODE_INTEL */
8957
8958 #ifdef CONFIG_MICROCODE_AMD
8959-extern struct microcode_ops * __init init_amd_microcode(void);
8960+extern const struct microcode_ops * __init init_amd_microcode(void);
8961 #else
8962-static inline struct microcode_ops * __init init_amd_microcode(void)
8963+static inline const struct microcode_ops * __init init_amd_microcode(void)
8964 {
8965 return NULL;
8966 }
8967diff -urNp linux-2.6.32.43/arch/x86/include/asm/mman.h linux-2.6.32.43/arch/x86/include/asm/mman.h
8968--- linux-2.6.32.43/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8969+++ linux-2.6.32.43/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8970@@ -5,4 +5,14 @@
8971
8972 #include <asm-generic/mman.h>
8973
8974+#ifdef __KERNEL__
8975+#ifndef __ASSEMBLY__
8976+#ifdef CONFIG_X86_32
8977+#define arch_mmap_check i386_mmap_check
8978+int i386_mmap_check(unsigned long addr, unsigned long len,
8979+ unsigned long flags);
8980+#endif
8981+#endif
8982+#endif
8983+
8984 #endif /* _ASM_X86_MMAN_H */
8985diff -urNp linux-2.6.32.43/arch/x86/include/asm/mmu_context.h linux-2.6.32.43/arch/x86/include/asm/mmu_context.h
8986--- linux-2.6.32.43/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
8987+++ linux-2.6.32.43/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
8988@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
8989
8990 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
8991 {
8992+
8993+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8994+ unsigned int i;
8995+ pgd_t *pgd;
8996+
8997+ pax_open_kernel();
8998+ pgd = get_cpu_pgd(smp_processor_id());
8999+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9000+ if (paravirt_enabled())
9001+ set_pgd(pgd+i, native_make_pgd(0));
9002+ else
9003+ pgd[i] = native_make_pgd(0);
9004+ pax_close_kernel();
9005+#endif
9006+
9007 #ifdef CONFIG_SMP
9008 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9009 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9010@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
9011 struct task_struct *tsk)
9012 {
9013 unsigned cpu = smp_processor_id();
9014+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
9015+ int tlbstate = TLBSTATE_OK;
9016+#endif
9017
9018 if (likely(prev != next)) {
9019 #ifdef CONFIG_SMP
9020+#ifdef CONFIG_X86_32
9021+ tlbstate = percpu_read(cpu_tlbstate.state);
9022+#endif
9023 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9024 percpu_write(cpu_tlbstate.active_mm, next);
9025 #endif
9026 cpumask_set_cpu(cpu, mm_cpumask(next));
9027
9028 /* Re-load page tables */
9029+#ifdef CONFIG_PAX_PER_CPU_PGD
9030+ pax_open_kernel();
9031+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9032+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9033+ pax_close_kernel();
9034+ load_cr3(get_cpu_pgd(cpu));
9035+#else
9036 load_cr3(next->pgd);
9037+#endif
9038
9039 /* stop flush ipis for the previous mm */
9040 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9041@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9042 */
9043 if (unlikely(prev->context.ldt != next->context.ldt))
9044 load_LDT_nolock(&next->context);
9045- }
9046+
9047+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9048+ if (!nx_enabled) {
9049+ smp_mb__before_clear_bit();
9050+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9051+ smp_mb__after_clear_bit();
9052+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9053+ }
9054+#endif
9055+
9056+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9057+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9058+ prev->context.user_cs_limit != next->context.user_cs_limit))
9059+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9060 #ifdef CONFIG_SMP
9061+ else if (unlikely(tlbstate != TLBSTATE_OK))
9062+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9063+#endif
9064+#endif
9065+
9066+ }
9067 else {
9068+
9069+#ifdef CONFIG_PAX_PER_CPU_PGD
9070+ pax_open_kernel();
9071+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9072+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9073+ pax_close_kernel();
9074+ load_cr3(get_cpu_pgd(cpu));
9075+#endif
9076+
9077+#ifdef CONFIG_SMP
9078 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9079 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9080
9081@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9082 * tlb flush IPI delivery. We must reload CR3
9083 * to make sure to use no freed page tables.
9084 */
9085+
9086+#ifndef CONFIG_PAX_PER_CPU_PGD
9087 load_cr3(next->pgd);
9088+#endif
9089+
9090 load_LDT_nolock(&next->context);
9091+
9092+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9093+ if (!nx_enabled)
9094+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9095+#endif
9096+
9097+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9098+#ifdef CONFIG_PAX_PAGEEXEC
9099+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9100+#endif
9101+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9102+#endif
9103+
9104 }
9105- }
9106 #endif
9107+ }
9108 }
9109
9110 #define activate_mm(prev, next) \
9111diff -urNp linux-2.6.32.43/arch/x86/include/asm/mmu.h linux-2.6.32.43/arch/x86/include/asm/mmu.h
9112--- linux-2.6.32.43/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9113+++ linux-2.6.32.43/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9114@@ -9,10 +9,23 @@
9115 * we put the segment information here.
9116 */
9117 typedef struct {
9118- void *ldt;
9119+ struct desc_struct *ldt;
9120 int size;
9121 struct mutex lock;
9122- void *vdso;
9123+ unsigned long vdso;
9124+
9125+#ifdef CONFIG_X86_32
9126+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9127+ unsigned long user_cs_base;
9128+ unsigned long user_cs_limit;
9129+
9130+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9131+ cpumask_t cpu_user_cs_mask;
9132+#endif
9133+
9134+#endif
9135+#endif
9136+
9137 } mm_context_t;
9138
9139 #ifdef CONFIG_SMP
9140diff -urNp linux-2.6.32.43/arch/x86/include/asm/module.h linux-2.6.32.43/arch/x86/include/asm/module.h
9141--- linux-2.6.32.43/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9142+++ linux-2.6.32.43/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9143@@ -5,6 +5,7 @@
9144
9145 #ifdef CONFIG_X86_64
9146 /* X86_64 does not define MODULE_PROC_FAMILY */
9147+#define MODULE_PROC_FAMILY ""
9148 #elif defined CONFIG_M386
9149 #define MODULE_PROC_FAMILY "386 "
9150 #elif defined CONFIG_M486
9151@@ -59,13 +60,36 @@
9152 #error unknown processor family
9153 #endif
9154
9155-#ifdef CONFIG_X86_32
9156-# ifdef CONFIG_4KSTACKS
9157-# define MODULE_STACKSIZE "4KSTACKS "
9158-# else
9159-# define MODULE_STACKSIZE ""
9160-# endif
9161-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9162+#ifdef CONFIG_PAX_MEMORY_UDEREF
9163+#define MODULE_PAX_UDEREF "UDEREF "
9164+#else
9165+#define MODULE_PAX_UDEREF ""
9166+#endif
9167+
9168+#ifdef CONFIG_PAX_KERNEXEC
9169+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9170+#else
9171+#define MODULE_PAX_KERNEXEC ""
9172+#endif
9173+
9174+#ifdef CONFIG_PAX_REFCOUNT
9175+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9176+#else
9177+#define MODULE_PAX_REFCOUNT ""
9178 #endif
9179
9180+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9181+#define MODULE_STACKSIZE "4KSTACKS "
9182+#else
9183+#define MODULE_STACKSIZE ""
9184+#endif
9185+
9186+#ifdef CONFIG_GRKERNSEC
9187+#define MODULE_GRSEC "GRSECURITY "
9188+#else
9189+#define MODULE_GRSEC ""
9190+#endif
9191+
9192+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9193+
9194 #endif /* _ASM_X86_MODULE_H */
9195diff -urNp linux-2.6.32.43/arch/x86/include/asm/page_64_types.h linux-2.6.32.43/arch/x86/include/asm/page_64_types.h
9196--- linux-2.6.32.43/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9197+++ linux-2.6.32.43/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9198@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9199
9200 /* duplicated to the one in bootmem.h */
9201 extern unsigned long max_pfn;
9202-extern unsigned long phys_base;
9203+extern const unsigned long phys_base;
9204
9205 extern unsigned long __phys_addr(unsigned long);
9206 #define __phys_reloc_hide(x) (x)
9207diff -urNp linux-2.6.32.43/arch/x86/include/asm/paravirt.h linux-2.6.32.43/arch/x86/include/asm/paravirt.h
9208--- linux-2.6.32.43/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9209+++ linux-2.6.32.43/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9210@@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9211 pv_mmu_ops.set_fixmap(idx, phys, flags);
9212 }
9213
9214+#ifdef CONFIG_PAX_KERNEXEC
9215+static inline unsigned long pax_open_kernel(void)
9216+{
9217+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9218+}
9219+
9220+static inline unsigned long pax_close_kernel(void)
9221+{
9222+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9223+}
9224+#else
9225+static inline unsigned long pax_open_kernel(void) { return 0; }
9226+static inline unsigned long pax_close_kernel(void) { return 0; }
9227+#endif
9228+
9229 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9230
9231 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9232@@ -945,7 +960,7 @@ extern void default_banner(void);
9233
9234 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9235 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9236-#define PARA_INDIRECT(addr) *%cs:addr
9237+#define PARA_INDIRECT(addr) *%ss:addr
9238 #endif
9239
9240 #define INTERRUPT_RETURN \
9241@@ -1022,6 +1037,21 @@ extern void default_banner(void);
9242 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9243 CLBR_NONE, \
9244 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9245+
9246+#define GET_CR0_INTO_RDI \
9247+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9248+ mov %rax,%rdi
9249+
9250+#define SET_RDI_INTO_CR0 \
9251+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9252+
9253+#define GET_CR3_INTO_RDI \
9254+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9255+ mov %rax,%rdi
9256+
9257+#define SET_RDI_INTO_CR3 \
9258+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9259+
9260 #endif /* CONFIG_X86_32 */
9261
9262 #endif /* __ASSEMBLY__ */
9263diff -urNp linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h
9264--- linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9265+++ linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h 2011-04-17 15:56:46.000000000 -0400
9266@@ -316,6 +316,12 @@ struct pv_mmu_ops {
9267 an mfn. We can tell which is which from the index. */
9268 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9269 phys_addr_t phys, pgprot_t flags);
9270+
9271+#ifdef CONFIG_PAX_KERNEXEC
9272+ unsigned long (*pax_open_kernel)(void);
9273+ unsigned long (*pax_close_kernel)(void);
9274+#endif
9275+
9276 };
9277
9278 struct raw_spinlock;
9279diff -urNp linux-2.6.32.43/arch/x86/include/asm/pci_x86.h linux-2.6.32.43/arch/x86/include/asm/pci_x86.h
9280--- linux-2.6.32.43/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9281+++ linux-2.6.32.43/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9282@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9283 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9284
9285 struct pci_raw_ops {
9286- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9287+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9288 int reg, int len, u32 *val);
9289- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9290+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9291 int reg, int len, u32 val);
9292 };
9293
9294-extern struct pci_raw_ops *raw_pci_ops;
9295-extern struct pci_raw_ops *raw_pci_ext_ops;
9296+extern const struct pci_raw_ops *raw_pci_ops;
9297+extern const struct pci_raw_ops *raw_pci_ext_ops;
9298
9299-extern struct pci_raw_ops pci_direct_conf1;
9300+extern const struct pci_raw_ops pci_direct_conf1;
9301 extern bool port_cf9_safe;
9302
9303 /* arch_initcall level */
9304diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgalloc.h linux-2.6.32.43/arch/x86/include/asm/pgalloc.h
9305--- linux-2.6.32.43/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9306+++ linux-2.6.32.43/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9307@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9308 pmd_t *pmd, pte_t *pte)
9309 {
9310 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9311+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9312+}
9313+
9314+static inline void pmd_populate_user(struct mm_struct *mm,
9315+ pmd_t *pmd, pte_t *pte)
9316+{
9317+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9318 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9319 }
9320
9321diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h
9322--- linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9323+++ linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9324@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9325
9326 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9327 {
9328+ pax_open_kernel();
9329 *pmdp = pmd;
9330+ pax_close_kernel();
9331 }
9332
9333 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9334diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h
9335--- linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9336+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9337@@ -26,9 +26,6 @@
9338 struct mm_struct;
9339 struct vm_area_struct;
9340
9341-extern pgd_t swapper_pg_dir[1024];
9342-extern pgd_t trampoline_pg_dir[1024];
9343-
9344 static inline void pgtable_cache_init(void) { }
9345 static inline void check_pgt_cache(void) { }
9346 void paging_init(void);
9347@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9348 # include <asm/pgtable-2level.h>
9349 #endif
9350
9351+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9352+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9353+#ifdef CONFIG_X86_PAE
9354+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9355+#endif
9356+
9357 #if defined(CONFIG_HIGHPTE)
9358 #define __KM_PTE \
9359 (in_nmi() ? KM_NMI_PTE : \
9360@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9361 /* Clear a kernel PTE and flush it from the TLB */
9362 #define kpte_clear_flush(ptep, vaddr) \
9363 do { \
9364+ pax_open_kernel(); \
9365 pte_clear(&init_mm, (vaddr), (ptep)); \
9366+ pax_close_kernel(); \
9367 __flush_tlb_one((vaddr)); \
9368 } while (0)
9369
9370@@ -85,6 +90,9 @@ do { \
9371
9372 #endif /* !__ASSEMBLY__ */
9373
9374+#define HAVE_ARCH_UNMAPPED_AREA
9375+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9376+
9377 /*
9378 * kern_addr_valid() is (1) for FLATMEM and (0) for
9379 * SPARSEMEM and DISCONTIGMEM
9380diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h
9381--- linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9382+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9383@@ -8,7 +8,7 @@
9384 */
9385 #ifdef CONFIG_X86_PAE
9386 # include <asm/pgtable-3level_types.h>
9387-# define PMD_SIZE (1UL << PMD_SHIFT)
9388+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9389 # define PMD_MASK (~(PMD_SIZE - 1))
9390 #else
9391 # include <asm/pgtable-2level_types.h>
9392@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9393 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9394 #endif
9395
9396+#ifdef CONFIG_PAX_KERNEXEC
9397+#ifndef __ASSEMBLY__
9398+extern unsigned char MODULES_EXEC_VADDR[];
9399+extern unsigned char MODULES_EXEC_END[];
9400+#endif
9401+#include <asm/boot.h>
9402+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9403+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9404+#else
9405+#define ktla_ktva(addr) (addr)
9406+#define ktva_ktla(addr) (addr)
9407+#endif
9408+
9409 #define MODULES_VADDR VMALLOC_START
9410 #define MODULES_END VMALLOC_END
9411 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9412diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h
9413--- linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9414+++ linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9415@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9416
9417 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9418 {
9419+ pax_open_kernel();
9420 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9421+ pax_close_kernel();
9422 }
9423
9424 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9425 {
9426+ pax_open_kernel();
9427 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9428+ pax_close_kernel();
9429 }
9430
9431 /*
9432diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h
9433--- linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9434+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9435@@ -16,10 +16,13 @@
9436
9437 extern pud_t level3_kernel_pgt[512];
9438 extern pud_t level3_ident_pgt[512];
9439+extern pud_t level3_vmalloc_pgt[512];
9440+extern pud_t level3_vmemmap_pgt[512];
9441+extern pud_t level2_vmemmap_pgt[512];
9442 extern pmd_t level2_kernel_pgt[512];
9443 extern pmd_t level2_fixmap_pgt[512];
9444-extern pmd_t level2_ident_pgt[512];
9445-extern pgd_t init_level4_pgt[];
9446+extern pmd_t level2_ident_pgt[512*2];
9447+extern pgd_t init_level4_pgt[512];
9448
9449 #define swapper_pg_dir init_level4_pgt
9450
9451@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9452
9453 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9454 {
9455+ pax_open_kernel();
9456 *pmdp = pmd;
9457+ pax_close_kernel();
9458 }
9459
9460 static inline void native_pmd_clear(pmd_t *pmd)
9461@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9462
9463 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9464 {
9465+ pax_open_kernel();
9466 *pgdp = pgd;
9467+ pax_close_kernel();
9468 }
9469
9470 static inline void native_pgd_clear(pgd_t *pgd)
9471diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h
9472--- linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9473+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9474@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9475 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9476 #define MODULES_END _AC(0xffffffffff000000, UL)
9477 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9478+#define MODULES_EXEC_VADDR MODULES_VADDR
9479+#define MODULES_EXEC_END MODULES_END
9480+
9481+#define ktla_ktva(addr) (addr)
9482+#define ktva_ktla(addr) (addr)
9483
9484 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9485diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable.h linux-2.6.32.43/arch/x86/include/asm/pgtable.h
9486--- linux-2.6.32.43/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9487+++ linux-2.6.32.43/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9488@@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9489
9490 #define arch_end_context_switch(prev) do {} while(0)
9491
9492+#define pax_open_kernel() native_pax_open_kernel()
9493+#define pax_close_kernel() native_pax_close_kernel()
9494 #endif /* CONFIG_PARAVIRT */
9495
9496+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9497+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9498+
9499+#ifdef CONFIG_PAX_KERNEXEC
9500+static inline unsigned long native_pax_open_kernel(void)
9501+{
9502+ unsigned long cr0;
9503+
9504+ preempt_disable();
9505+ barrier();
9506+ cr0 = read_cr0() ^ X86_CR0_WP;
9507+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9508+ write_cr0(cr0);
9509+ return cr0 ^ X86_CR0_WP;
9510+}
9511+
9512+static inline unsigned long native_pax_close_kernel(void)
9513+{
9514+ unsigned long cr0;
9515+
9516+ cr0 = read_cr0() ^ X86_CR0_WP;
9517+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9518+ write_cr0(cr0);
9519+ barrier();
9520+ preempt_enable_no_resched();
9521+ return cr0 ^ X86_CR0_WP;
9522+}
9523+#else
9524+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9525+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9526+#endif
9527+
9528 /*
9529 * The following only work if pte_present() is true.
9530 * Undefined behaviour if not..
9531 */
9532+static inline int pte_user(pte_t pte)
9533+{
9534+ return pte_val(pte) & _PAGE_USER;
9535+}
9536+
9537 static inline int pte_dirty(pte_t pte)
9538 {
9539 return pte_flags(pte) & _PAGE_DIRTY;
9540@@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9541 return pte_clear_flags(pte, _PAGE_RW);
9542 }
9543
9544+static inline pte_t pte_mkread(pte_t pte)
9545+{
9546+ return __pte(pte_val(pte) | _PAGE_USER);
9547+}
9548+
9549 static inline pte_t pte_mkexec(pte_t pte)
9550 {
9551- return pte_clear_flags(pte, _PAGE_NX);
9552+#ifdef CONFIG_X86_PAE
9553+ if (__supported_pte_mask & _PAGE_NX)
9554+ return pte_clear_flags(pte, _PAGE_NX);
9555+ else
9556+#endif
9557+ return pte_set_flags(pte, _PAGE_USER);
9558+}
9559+
9560+static inline pte_t pte_exprotect(pte_t pte)
9561+{
9562+#ifdef CONFIG_X86_PAE
9563+ if (__supported_pte_mask & _PAGE_NX)
9564+ return pte_set_flags(pte, _PAGE_NX);
9565+ else
9566+#endif
9567+ return pte_clear_flags(pte, _PAGE_USER);
9568 }
9569
9570 static inline pte_t pte_mkdirty(pte_t pte)
9571@@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9572 #endif
9573
9574 #ifndef __ASSEMBLY__
9575+
9576+#ifdef CONFIG_PAX_PER_CPU_PGD
9577+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9578+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9579+{
9580+ return cpu_pgd[cpu];
9581+}
9582+#endif
9583+
9584 #include <linux/mm_types.h>
9585
9586 static inline int pte_none(pte_t pte)
9587@@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9588
9589 static inline int pgd_bad(pgd_t pgd)
9590 {
9591- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9592+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9593 }
9594
9595 static inline int pgd_none(pgd_t pgd)
9596@@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9597 * pgd_offset() returns a (pgd_t *)
9598 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9599 */
9600-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9601+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9602+
9603+#ifdef CONFIG_PAX_PER_CPU_PGD
9604+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9605+#endif
9606+
9607 /*
9608 * a shortcut which implies the use of the kernel's pgd, instead
9609 * of a process's
9610@@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9611 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9612 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9613
9614+#ifdef CONFIG_X86_32
9615+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9616+#else
9617+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9618+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9619+
9620+#ifdef CONFIG_PAX_MEMORY_UDEREF
9621+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9622+#else
9623+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9624+#endif
9625+
9626+#endif
9627+
9628 #ifndef __ASSEMBLY__
9629
9630 extern int direct_gbpages;
9631@@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9632 * dst and src can be on the same page, but the range must not overlap,
9633 * and must not cross a page boundary.
9634 */
9635-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9636+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9637 {
9638- memcpy(dst, src, count * sizeof(pgd_t));
9639+ pax_open_kernel();
9640+ while (count--)
9641+ *dst++ = *src++;
9642+ pax_close_kernel();
9643 }
9644
9645+#ifdef CONFIG_PAX_PER_CPU_PGD
9646+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9647+#endif
9648+
9649+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9650+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9651+#else
9652+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9653+#endif
9654
9655 #include <asm-generic/pgtable.h>
9656 #endif /* __ASSEMBLY__ */
9657diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h
9658--- linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9659+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9660@@ -16,12 +16,11 @@
9661 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9662 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9663 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9664-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9665+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9666 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9667 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9668 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9669-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9670-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9671+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9672 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9673
9674 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9675@@ -39,7 +38,6 @@
9676 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9677 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9678 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9679-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9680 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9681 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9682 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9683@@ -55,8 +53,10 @@
9684
9685 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9686 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9687-#else
9688+#elif defined(CONFIG_KMEMCHECK)
9689 #define _PAGE_NX (_AT(pteval_t, 0))
9690+#else
9691+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9692 #endif
9693
9694 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9695@@ -93,6 +93,9 @@
9696 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9697 _PAGE_ACCESSED)
9698
9699+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9700+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9701+
9702 #define __PAGE_KERNEL_EXEC \
9703 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9704 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9705@@ -103,8 +106,8 @@
9706 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9707 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9708 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9709-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9710-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9711+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9712+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9713 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9714 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9715 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9716@@ -163,8 +166,8 @@
9717 * bits are combined, this will alow user to access the high address mapped
9718 * VDSO in the presence of CONFIG_COMPAT_VDSO
9719 */
9720-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9721-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9722+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9723+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9724 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9725 #endif
9726
9727@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9728 {
9729 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9730 }
9731+#endif
9732
9733+#if PAGETABLE_LEVELS == 3
9734+#include <asm-generic/pgtable-nopud.h>
9735+#endif
9736+
9737+#if PAGETABLE_LEVELS == 2
9738+#include <asm-generic/pgtable-nopmd.h>
9739+#endif
9740+
9741+#ifndef __ASSEMBLY__
9742 #if PAGETABLE_LEVELS > 3
9743 typedef struct { pudval_t pud; } pud_t;
9744
9745@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9746 return pud.pud;
9747 }
9748 #else
9749-#include <asm-generic/pgtable-nopud.h>
9750-
9751 static inline pudval_t native_pud_val(pud_t pud)
9752 {
9753 return native_pgd_val(pud.pgd);
9754@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9755 return pmd.pmd;
9756 }
9757 #else
9758-#include <asm-generic/pgtable-nopmd.h>
9759-
9760 static inline pmdval_t native_pmd_val(pmd_t pmd)
9761 {
9762 return native_pgd_val(pmd.pud.pgd);
9763@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9764
9765 extern pteval_t __supported_pte_mask;
9766 extern void set_nx(void);
9767+
9768+#ifdef CONFIG_X86_32
9769+#ifdef CONFIG_X86_PAE
9770 extern int nx_enabled;
9771+#else
9772+#define nx_enabled (0)
9773+#endif
9774+#else
9775+#define nx_enabled (1)
9776+#endif
9777
9778 #define pgprot_writecombine pgprot_writecombine
9779 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9780diff -urNp linux-2.6.32.43/arch/x86/include/asm/processor.h linux-2.6.32.43/arch/x86/include/asm/processor.h
9781--- linux-2.6.32.43/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9782+++ linux-2.6.32.43/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9783@@ -272,7 +272,7 @@ struct tss_struct {
9784
9785 } ____cacheline_aligned;
9786
9787-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9788+extern struct tss_struct init_tss[NR_CPUS];
9789
9790 /*
9791 * Save the original ist values for checking stack pointers during debugging
9792@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9793 */
9794 #define TASK_SIZE PAGE_OFFSET
9795 #define TASK_SIZE_MAX TASK_SIZE
9796+
9797+#ifdef CONFIG_PAX_SEGMEXEC
9798+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9799+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9800+#else
9801 #define STACK_TOP TASK_SIZE
9802-#define STACK_TOP_MAX STACK_TOP
9803+#endif
9804+
9805+#define STACK_TOP_MAX TASK_SIZE
9806
9807 #define INIT_THREAD { \
9808- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9809+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9810 .vm86_info = NULL, \
9811 .sysenter_cs = __KERNEL_CS, \
9812 .io_bitmap_ptr = NULL, \
9813@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9814 */
9815 #define INIT_TSS { \
9816 .x86_tss = { \
9817- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9818+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9819 .ss0 = __KERNEL_DS, \
9820 .ss1 = __KERNEL_CS, \
9821 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9822@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9823 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9824
9825 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9826-#define KSTK_TOP(info) \
9827-({ \
9828- unsigned long *__ptr = (unsigned long *)(info); \
9829- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9830-})
9831+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9832
9833 /*
9834 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9835@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9836 #define task_pt_regs(task) \
9837 ({ \
9838 struct pt_regs *__regs__; \
9839- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9840+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9841 __regs__ - 1; \
9842 })
9843
9844@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9845 /*
9846 * User space process size. 47bits minus one guard page.
9847 */
9848-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9849+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9850
9851 /* This decides where the kernel will search for a free chunk of vm
9852 * space during mmap's.
9853 */
9854 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9855- 0xc0000000 : 0xFFFFe000)
9856+ 0xc0000000 : 0xFFFFf000)
9857
9858 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9859 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9860@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9861 #define STACK_TOP_MAX TASK_SIZE_MAX
9862
9863 #define INIT_THREAD { \
9864- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9865+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9866 }
9867
9868 #define INIT_TSS { \
9869- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9870+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9871 }
9872
9873 /*
9874@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9875 */
9876 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9877
9878+#ifdef CONFIG_PAX_SEGMEXEC
9879+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9880+#endif
9881+
9882 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9883
9884 /* Get/set a process' ability to use the timestamp counter instruction */
9885diff -urNp linux-2.6.32.43/arch/x86/include/asm/ptrace.h linux-2.6.32.43/arch/x86/include/asm/ptrace.h
9886--- linux-2.6.32.43/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9887+++ linux-2.6.32.43/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9888@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9889 }
9890
9891 /*
9892- * user_mode_vm(regs) determines whether a register set came from user mode.
9893+ * user_mode(regs) determines whether a register set came from user mode.
9894 * This is true if V8086 mode was enabled OR if the register set was from
9895 * protected mode with RPL-3 CS value. This tricky test checks that with
9896 * one comparison. Many places in the kernel can bypass this full check
9897- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9898+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9899+ * be used.
9900 */
9901-static inline int user_mode(struct pt_regs *regs)
9902+static inline int user_mode_novm(struct pt_regs *regs)
9903 {
9904 #ifdef CONFIG_X86_32
9905 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9906 #else
9907- return !!(regs->cs & 3);
9908+ return !!(regs->cs & SEGMENT_RPL_MASK);
9909 #endif
9910 }
9911
9912-static inline int user_mode_vm(struct pt_regs *regs)
9913+static inline int user_mode(struct pt_regs *regs)
9914 {
9915 #ifdef CONFIG_X86_32
9916 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9917 USER_RPL;
9918 #else
9919- return user_mode(regs);
9920+ return user_mode_novm(regs);
9921 #endif
9922 }
9923
9924diff -urNp linux-2.6.32.43/arch/x86/include/asm/reboot.h linux-2.6.32.43/arch/x86/include/asm/reboot.h
9925--- linux-2.6.32.43/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
9926+++ linux-2.6.32.43/arch/x86/include/asm/reboot.h 2011-05-22 23:02:03.000000000 -0400
9927@@ -6,19 +6,19 @@
9928 struct pt_regs;
9929
9930 struct machine_ops {
9931- void (*restart)(char *cmd);
9932- void (*halt)(void);
9933- void (*power_off)(void);
9934+ void (* __noreturn restart)(char *cmd);
9935+ void (* __noreturn halt)(void);
9936+ void (* __noreturn power_off)(void);
9937 void (*shutdown)(void);
9938 void (*crash_shutdown)(struct pt_regs *);
9939- void (*emergency_restart)(void);
9940+ void (* __noreturn emergency_restart)(void);
9941 };
9942
9943 extern struct machine_ops machine_ops;
9944
9945 void native_machine_crash_shutdown(struct pt_regs *regs);
9946 void native_machine_shutdown(void);
9947-void machine_real_restart(const unsigned char *code, int length);
9948+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
9949
9950 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
9951 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
9952diff -urNp linux-2.6.32.43/arch/x86/include/asm/rwsem.h linux-2.6.32.43/arch/x86/include/asm/rwsem.h
9953--- linux-2.6.32.43/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
9954+++ linux-2.6.32.43/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
9955@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
9956 {
9957 asm volatile("# beginning down_read\n\t"
9958 LOCK_PREFIX _ASM_INC "(%1)\n\t"
9959+
9960+#ifdef CONFIG_PAX_REFCOUNT
9961+ "jno 0f\n"
9962+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
9963+ "int $4\n0:\n"
9964+ _ASM_EXTABLE(0b, 0b)
9965+#endif
9966+
9967 /* adds 0x00000001, returns the old value */
9968 " jns 1f\n"
9969 " call call_rwsem_down_read_failed\n"
9970@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
9971 "1:\n\t"
9972 " mov %1,%2\n\t"
9973 " add %3,%2\n\t"
9974+
9975+#ifdef CONFIG_PAX_REFCOUNT
9976+ "jno 0f\n"
9977+ "sub %3,%2\n"
9978+ "int $4\n0:\n"
9979+ _ASM_EXTABLE(0b, 0b)
9980+#endif
9981+
9982 " jle 2f\n\t"
9983 LOCK_PREFIX " cmpxchg %2,%0\n\t"
9984 " jnz 1b\n\t"
9985@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
9986 tmp = RWSEM_ACTIVE_WRITE_BIAS;
9987 asm volatile("# beginning down_write\n\t"
9988 LOCK_PREFIX " xadd %1,(%2)\n\t"
9989+
9990+#ifdef CONFIG_PAX_REFCOUNT
9991+ "jno 0f\n"
9992+ "mov %1,(%2)\n"
9993+ "int $4\n0:\n"
9994+ _ASM_EXTABLE(0b, 0b)
9995+#endif
9996+
9997 /* subtract 0x0000ffff, returns the old value */
9998 " test %1,%1\n\t"
9999 /* was the count 0 before? */
10000@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10001 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10002 asm volatile("# beginning __up_read\n\t"
10003 LOCK_PREFIX " xadd %1,(%2)\n\t"
10004+
10005+#ifdef CONFIG_PAX_REFCOUNT
10006+ "jno 0f\n"
10007+ "mov %1,(%2)\n"
10008+ "int $4\n0:\n"
10009+ _ASM_EXTABLE(0b, 0b)
10010+#endif
10011+
10012 /* subtracts 1, returns the old value */
10013 " jns 1f\n\t"
10014 " call call_rwsem_wake\n"
10015@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10016 rwsem_count_t tmp;
10017 asm volatile("# beginning __up_write\n\t"
10018 LOCK_PREFIX " xadd %1,(%2)\n\t"
10019+
10020+#ifdef CONFIG_PAX_REFCOUNT
10021+ "jno 0f\n"
10022+ "mov %1,(%2)\n"
10023+ "int $4\n0:\n"
10024+ _ASM_EXTABLE(0b, 0b)
10025+#endif
10026+
10027 /* tries to transition
10028 0xffff0001 -> 0x00000000 */
10029 " jz 1f\n"
10030@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10031 {
10032 asm volatile("# beginning __downgrade_write\n\t"
10033 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10034+
10035+#ifdef CONFIG_PAX_REFCOUNT
10036+ "jno 0f\n"
10037+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10038+ "int $4\n0:\n"
10039+ _ASM_EXTABLE(0b, 0b)
10040+#endif
10041+
10042 /*
10043 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10044 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10045@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10046 static inline void rwsem_atomic_add(rwsem_count_t delta,
10047 struct rw_semaphore *sem)
10048 {
10049- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10050+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10051+
10052+#ifdef CONFIG_PAX_REFCOUNT
10053+ "jno 0f\n"
10054+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10055+ "int $4\n0:\n"
10056+ _ASM_EXTABLE(0b, 0b)
10057+#endif
10058+
10059 : "+m" (sem->count)
10060 : "er" (delta));
10061 }
10062@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10063 {
10064 rwsem_count_t tmp = delta;
10065
10066- asm volatile(LOCK_PREFIX "xadd %0,%1"
10067+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10068+
10069+#ifdef CONFIG_PAX_REFCOUNT
10070+ "jno 0f\n"
10071+ "mov %0,%1\n"
10072+ "int $4\n0:\n"
10073+ _ASM_EXTABLE(0b, 0b)
10074+#endif
10075+
10076 : "+r" (tmp), "+m" (sem->count)
10077 : : "memory");
10078
10079diff -urNp linux-2.6.32.43/arch/x86/include/asm/segment.h linux-2.6.32.43/arch/x86/include/asm/segment.h
10080--- linux-2.6.32.43/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10081+++ linux-2.6.32.43/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10082@@ -62,8 +62,8 @@
10083 * 26 - ESPFIX small SS
10084 * 27 - per-cpu [ offset to per-cpu data area ]
10085 * 28 - stack_canary-20 [ for stack protector ]
10086- * 29 - unused
10087- * 30 - unused
10088+ * 29 - PCI BIOS CS
10089+ * 30 - PCI BIOS DS
10090 * 31 - TSS for double fault handler
10091 */
10092 #define GDT_ENTRY_TLS_MIN 6
10093@@ -77,6 +77,8 @@
10094
10095 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10096
10097+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10098+
10099 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10100
10101 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10102@@ -88,7 +90,7 @@
10103 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10104 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10105
10106-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10107+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10108 #ifdef CONFIG_SMP
10109 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10110 #else
10111@@ -102,6 +104,12 @@
10112 #define __KERNEL_STACK_CANARY 0
10113 #endif
10114
10115+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10116+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10117+
10118+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10119+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10120+
10121 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10122
10123 /*
10124@@ -139,7 +147,7 @@
10125 */
10126
10127 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10128-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10129+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10130
10131
10132 #else
10133@@ -163,6 +171,8 @@
10134 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10135 #define __USER32_DS __USER_DS
10136
10137+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10138+
10139 #define GDT_ENTRY_TSS 8 /* needs two entries */
10140 #define GDT_ENTRY_LDT 10 /* needs two entries */
10141 #define GDT_ENTRY_TLS_MIN 12
10142@@ -183,6 +193,7 @@
10143 #endif
10144
10145 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10146+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10147 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10148 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10149 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10150diff -urNp linux-2.6.32.43/arch/x86/include/asm/smp.h linux-2.6.32.43/arch/x86/include/asm/smp.h
10151--- linux-2.6.32.43/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10152+++ linux-2.6.32.43/arch/x86/include/asm/smp.h 2011-07-01 19:00:40.000000000 -0400
10153@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10154 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10155 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10156 DECLARE_PER_CPU(u16, cpu_llc_id);
10157-DECLARE_PER_CPU(int, cpu_number);
10158+DECLARE_PER_CPU(unsigned int, cpu_number);
10159
10160 static inline struct cpumask *cpu_sibling_mask(int cpu)
10161 {
10162@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10163 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10164
10165 /* Static state in head.S used to set up a CPU */
10166-extern struct {
10167- void *sp;
10168- unsigned short ss;
10169-} stack_start;
10170+extern unsigned long stack_start; /* Initial stack pointer address */
10171
10172 struct smp_ops {
10173 void (*smp_prepare_boot_cpu)(void);
10174@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10175 extern int safe_smp_processor_id(void);
10176
10177 #elif defined(CONFIG_X86_64_SMP)
10178-#define raw_smp_processor_id() (percpu_read(cpu_number))
10179-
10180-#define stack_smp_processor_id() \
10181-({ \
10182- struct thread_info *ti; \
10183- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10184- ti->cpu; \
10185-})
10186+#define raw_smp_processor_id() (percpu_read(cpu_number))
10187+#define stack_smp_processor_id() raw_smp_processor_id()
10188 #define safe_smp_processor_id() smp_processor_id()
10189
10190 #endif
10191diff -urNp linux-2.6.32.43/arch/x86/include/asm/spinlock.h linux-2.6.32.43/arch/x86/include/asm/spinlock.h
10192--- linux-2.6.32.43/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10193+++ linux-2.6.32.43/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10194@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10195 static inline void __raw_read_lock(raw_rwlock_t *rw)
10196 {
10197 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10198+
10199+#ifdef CONFIG_PAX_REFCOUNT
10200+ "jno 0f\n"
10201+ LOCK_PREFIX " addl $1,(%0)\n"
10202+ "int $4\n0:\n"
10203+ _ASM_EXTABLE(0b, 0b)
10204+#endif
10205+
10206 "jns 1f\n"
10207 "call __read_lock_failed\n\t"
10208 "1:\n"
10209@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10210 static inline void __raw_write_lock(raw_rwlock_t *rw)
10211 {
10212 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10213+
10214+#ifdef CONFIG_PAX_REFCOUNT
10215+ "jno 0f\n"
10216+ LOCK_PREFIX " addl %1,(%0)\n"
10217+ "int $4\n0:\n"
10218+ _ASM_EXTABLE(0b, 0b)
10219+#endif
10220+
10221 "jz 1f\n"
10222 "call __write_lock_failed\n\t"
10223 "1:\n"
10224@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10225
10226 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10227 {
10228- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10229+ asm volatile(LOCK_PREFIX "incl %0\n"
10230+
10231+#ifdef CONFIG_PAX_REFCOUNT
10232+ "jno 0f\n"
10233+ LOCK_PREFIX "decl %0\n"
10234+ "int $4\n0:\n"
10235+ _ASM_EXTABLE(0b, 0b)
10236+#endif
10237+
10238+ :"+m" (rw->lock) : : "memory");
10239 }
10240
10241 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10242 {
10243- asm volatile(LOCK_PREFIX "addl %1, %0"
10244+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10245+
10246+#ifdef CONFIG_PAX_REFCOUNT
10247+ "jno 0f\n"
10248+ LOCK_PREFIX "subl %1, %0\n"
10249+ "int $4\n0:\n"
10250+ _ASM_EXTABLE(0b, 0b)
10251+#endif
10252+
10253 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10254 }
10255
10256diff -urNp linux-2.6.32.43/arch/x86/include/asm/stackprotector.h linux-2.6.32.43/arch/x86/include/asm/stackprotector.h
10257--- linux-2.6.32.43/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10258+++ linux-2.6.32.43/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10259@@ -48,7 +48,7 @@
10260 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10261 */
10262 #define GDT_STACK_CANARY_INIT \
10263- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10264+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10265
10266 /*
10267 * Initialize the stackprotector canary value.
10268@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10269
10270 static inline void load_stack_canary_segment(void)
10271 {
10272-#ifdef CONFIG_X86_32
10273+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10274 asm volatile ("mov %0, %%gs" : : "r" (0));
10275 #endif
10276 }
10277diff -urNp linux-2.6.32.43/arch/x86/include/asm/system.h linux-2.6.32.43/arch/x86/include/asm/system.h
10278--- linux-2.6.32.43/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10279+++ linux-2.6.32.43/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10280@@ -132,7 +132,7 @@ do { \
10281 "thread_return:\n\t" \
10282 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10283 __switch_canary \
10284- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10285+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10286 "movq %%rax,%%rdi\n\t" \
10287 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10288 "jnz ret_from_fork\n\t" \
10289@@ -143,7 +143,7 @@ do { \
10290 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10291 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10292 [_tif_fork] "i" (_TIF_FORK), \
10293- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10294+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10295 [current_task] "m" (per_cpu_var(current_task)) \
10296 __switch_canary_iparam \
10297 : "memory", "cc" __EXTRA_CLOBBER)
10298@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10299 {
10300 unsigned long __limit;
10301 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10302- return __limit + 1;
10303+ return __limit;
10304 }
10305
10306 static inline void native_clts(void)
10307@@ -340,12 +340,12 @@ void enable_hlt(void);
10308
10309 void cpu_idle_wait(void);
10310
10311-extern unsigned long arch_align_stack(unsigned long sp);
10312+#define arch_align_stack(x) ((x) & ~0xfUL)
10313 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10314
10315 void default_idle(void);
10316
10317-void stop_this_cpu(void *dummy);
10318+void stop_this_cpu(void *dummy) __noreturn;
10319
10320 /*
10321 * Force strict CPU ordering.
10322diff -urNp linux-2.6.32.43/arch/x86/include/asm/thread_info.h linux-2.6.32.43/arch/x86/include/asm/thread_info.h
10323--- linux-2.6.32.43/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10324+++ linux-2.6.32.43/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10325@@ -10,6 +10,7 @@
10326 #include <linux/compiler.h>
10327 #include <asm/page.h>
10328 #include <asm/types.h>
10329+#include <asm/percpu.h>
10330
10331 /*
10332 * low level task data that entry.S needs immediate access to
10333@@ -24,7 +25,6 @@ struct exec_domain;
10334 #include <asm/atomic.h>
10335
10336 struct thread_info {
10337- struct task_struct *task; /* main task structure */
10338 struct exec_domain *exec_domain; /* execution domain */
10339 __u32 flags; /* low level flags */
10340 __u32 status; /* thread synchronous flags */
10341@@ -34,18 +34,12 @@ struct thread_info {
10342 mm_segment_t addr_limit;
10343 struct restart_block restart_block;
10344 void __user *sysenter_return;
10345-#ifdef CONFIG_X86_32
10346- unsigned long previous_esp; /* ESP of the previous stack in
10347- case of nested (IRQ) stacks
10348- */
10349- __u8 supervisor_stack[0];
10350-#endif
10351+ unsigned long lowest_stack;
10352 int uaccess_err;
10353 };
10354
10355-#define INIT_THREAD_INFO(tsk) \
10356+#define INIT_THREAD_INFO \
10357 { \
10358- .task = &tsk, \
10359 .exec_domain = &default_exec_domain, \
10360 .flags = 0, \
10361 .cpu = 0, \
10362@@ -56,7 +50,7 @@ struct thread_info {
10363 }, \
10364 }
10365
10366-#define init_thread_info (init_thread_union.thread_info)
10367+#define init_thread_info (init_thread_union.stack)
10368 #define init_stack (init_thread_union.stack)
10369
10370 #else /* !__ASSEMBLY__ */
10371@@ -163,6 +157,23 @@ struct thread_info {
10372 #define alloc_thread_info(tsk) \
10373 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10374
10375+#ifdef __ASSEMBLY__
10376+/* how to get the thread information struct from ASM */
10377+#define GET_THREAD_INFO(reg) \
10378+ mov PER_CPU_VAR(current_tinfo), reg
10379+
10380+/* use this one if reg already contains %esp */
10381+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10382+#else
10383+/* how to get the thread information struct from C */
10384+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10385+
10386+static __always_inline struct thread_info *current_thread_info(void)
10387+{
10388+ return percpu_read_stable(current_tinfo);
10389+}
10390+#endif
10391+
10392 #ifdef CONFIG_X86_32
10393
10394 #define STACK_WARN (THREAD_SIZE/8)
10395@@ -173,35 +184,13 @@ struct thread_info {
10396 */
10397 #ifndef __ASSEMBLY__
10398
10399-
10400 /* how to get the current stack pointer from C */
10401 register unsigned long current_stack_pointer asm("esp") __used;
10402
10403-/* how to get the thread information struct from C */
10404-static inline struct thread_info *current_thread_info(void)
10405-{
10406- return (struct thread_info *)
10407- (current_stack_pointer & ~(THREAD_SIZE - 1));
10408-}
10409-
10410-#else /* !__ASSEMBLY__ */
10411-
10412-/* how to get the thread information struct from ASM */
10413-#define GET_THREAD_INFO(reg) \
10414- movl $-THREAD_SIZE, reg; \
10415- andl %esp, reg
10416-
10417-/* use this one if reg already contains %esp */
10418-#define GET_THREAD_INFO_WITH_ESP(reg) \
10419- andl $-THREAD_SIZE, reg
10420-
10421 #endif
10422
10423 #else /* X86_32 */
10424
10425-#include <asm/percpu.h>
10426-#define KERNEL_STACK_OFFSET (5*8)
10427-
10428 /*
10429 * macros/functions for gaining access to the thread information structure
10430 * preempt_count needs to be 1 initially, until the scheduler is functional.
10431@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10432 #ifndef __ASSEMBLY__
10433 DECLARE_PER_CPU(unsigned long, kernel_stack);
10434
10435-static inline struct thread_info *current_thread_info(void)
10436-{
10437- struct thread_info *ti;
10438- ti = (void *)(percpu_read_stable(kernel_stack) +
10439- KERNEL_STACK_OFFSET - THREAD_SIZE);
10440- return ti;
10441-}
10442-
10443-#else /* !__ASSEMBLY__ */
10444-
10445-/* how to get the thread information struct from ASM */
10446-#define GET_THREAD_INFO(reg) \
10447- movq PER_CPU_VAR(kernel_stack),reg ; \
10448- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10449-
10450+/* how to get the current stack pointer from C */
10451+register unsigned long current_stack_pointer asm("rsp") __used;
10452 #endif
10453
10454 #endif /* !X86_32 */
10455@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10456 extern void free_thread_info(struct thread_info *ti);
10457 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10458 #define arch_task_cache_init arch_task_cache_init
10459+
10460+#define __HAVE_THREAD_FUNCTIONS
10461+#define task_thread_info(task) (&(task)->tinfo)
10462+#define task_stack_page(task) ((task)->stack)
10463+#define setup_thread_stack(p, org) do {} while (0)
10464+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10465+
10466+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10467+extern struct task_struct *alloc_task_struct(void);
10468+extern void free_task_struct(struct task_struct *);
10469+
10470 #endif
10471 #endif /* _ASM_X86_THREAD_INFO_H */
10472diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h
10473--- linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10474+++ linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10475@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10476 static __always_inline unsigned long __must_check
10477 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10478 {
10479+ pax_track_stack();
10480+
10481+ if ((long)n < 0)
10482+ return n;
10483+
10484 if (__builtin_constant_p(n)) {
10485 unsigned long ret;
10486
10487@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10488 return ret;
10489 }
10490 }
10491+ if (!__builtin_constant_p(n))
10492+ check_object_size(from, n, true);
10493 return __copy_to_user_ll(to, from, n);
10494 }
10495
10496@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10497 __copy_to_user(void __user *to, const void *from, unsigned long n)
10498 {
10499 might_fault();
10500+
10501 return __copy_to_user_inatomic(to, from, n);
10502 }
10503
10504 static __always_inline unsigned long
10505 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10506 {
10507+ if ((long)n < 0)
10508+ return n;
10509+
10510 /* Avoid zeroing the tail if the copy fails..
10511 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10512 * but as the zeroing behaviour is only significant when n is not
10513@@ -138,6 +149,12 @@ static __always_inline unsigned long
10514 __copy_from_user(void *to, const void __user *from, unsigned long n)
10515 {
10516 might_fault();
10517+
10518+ pax_track_stack();
10519+
10520+ if ((long)n < 0)
10521+ return n;
10522+
10523 if (__builtin_constant_p(n)) {
10524 unsigned long ret;
10525
10526@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10527 return ret;
10528 }
10529 }
10530+ if (!__builtin_constant_p(n))
10531+ check_object_size(to, n, false);
10532 return __copy_from_user_ll(to, from, n);
10533 }
10534
10535@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10536 const void __user *from, unsigned long n)
10537 {
10538 might_fault();
10539+
10540+ if ((long)n < 0)
10541+ return n;
10542+
10543 if (__builtin_constant_p(n)) {
10544 unsigned long ret;
10545
10546@@ -182,14 +205,62 @@ static __always_inline unsigned long
10547 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10548 unsigned long n)
10549 {
10550- return __copy_from_user_ll_nocache_nozero(to, from, n);
10551+ if ((long)n < 0)
10552+ return n;
10553+
10554+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10555+}
10556+
10557+/**
10558+ * copy_to_user: - Copy a block of data into user space.
10559+ * @to: Destination address, in user space.
10560+ * @from: Source address, in kernel space.
10561+ * @n: Number of bytes to copy.
10562+ *
10563+ * Context: User context only. This function may sleep.
10564+ *
10565+ * Copy data from kernel space to user space.
10566+ *
10567+ * Returns number of bytes that could not be copied.
10568+ * On success, this will be zero.
10569+ */
10570+static __always_inline unsigned long __must_check
10571+copy_to_user(void __user *to, const void *from, unsigned long n)
10572+{
10573+ if (access_ok(VERIFY_WRITE, to, n))
10574+ n = __copy_to_user(to, from, n);
10575+ return n;
10576+}
10577+
10578+/**
10579+ * copy_from_user: - Copy a block of data from user space.
10580+ * @to: Destination address, in kernel space.
10581+ * @from: Source address, in user space.
10582+ * @n: Number of bytes to copy.
10583+ *
10584+ * Context: User context only. This function may sleep.
10585+ *
10586+ * Copy data from user space to kernel space.
10587+ *
10588+ * Returns number of bytes that could not be copied.
10589+ * On success, this will be zero.
10590+ *
10591+ * If some data could not be copied, this function will pad the copied
10592+ * data to the requested size using zero bytes.
10593+ */
10594+static __always_inline unsigned long __must_check
10595+copy_from_user(void *to, const void __user *from, unsigned long n)
10596+{
10597+ if (access_ok(VERIFY_READ, from, n))
10598+ n = __copy_from_user(to, from, n);
10599+ else if ((long)n > 0) {
10600+ if (!__builtin_constant_p(n))
10601+ check_object_size(to, n, false);
10602+ memset(to, 0, n);
10603+ }
10604+ return n;
10605 }
10606
10607-unsigned long __must_check copy_to_user(void __user *to,
10608- const void *from, unsigned long n);
10609-unsigned long __must_check copy_from_user(void *to,
10610- const void __user *from,
10611- unsigned long n);
10612 long __must_check strncpy_from_user(char *dst, const char __user *src,
10613 long count);
10614 long __must_check __strncpy_from_user(char *dst,
10615diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h
10616--- linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10617+++ linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10618@@ -9,6 +9,9 @@
10619 #include <linux/prefetch.h>
10620 #include <linux/lockdep.h>
10621 #include <asm/page.h>
10622+#include <asm/pgtable.h>
10623+
10624+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10625
10626 /*
10627 * Copy To/From Userspace
10628@@ -19,113 +22,203 @@ __must_check unsigned long
10629 copy_user_generic(void *to, const void *from, unsigned len);
10630
10631 __must_check unsigned long
10632-copy_to_user(void __user *to, const void *from, unsigned len);
10633-__must_check unsigned long
10634-copy_from_user(void *to, const void __user *from, unsigned len);
10635-__must_check unsigned long
10636 copy_in_user(void __user *to, const void __user *from, unsigned len);
10637
10638 static __always_inline __must_check
10639-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10640+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10641 {
10642- int ret = 0;
10643+ unsigned ret = 0;
10644
10645 might_fault();
10646- if (!__builtin_constant_p(size))
10647- return copy_user_generic(dst, (__force void *)src, size);
10648+
10649+ if ((int)size < 0)
10650+ return size;
10651+
10652+#ifdef CONFIG_PAX_MEMORY_UDEREF
10653+ if (!__access_ok(VERIFY_READ, src, size))
10654+ return size;
10655+#endif
10656+
10657+ if (!__builtin_constant_p(size)) {
10658+ check_object_size(dst, size, false);
10659+
10660+#ifdef CONFIG_PAX_MEMORY_UDEREF
10661+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10662+ src += PAX_USER_SHADOW_BASE;
10663+#endif
10664+
10665+ return copy_user_generic(dst, (__force const void *)src, size);
10666+ }
10667 switch (size) {
10668- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10669+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10670 ret, "b", "b", "=q", 1);
10671 return ret;
10672- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10673+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10674 ret, "w", "w", "=r", 2);
10675 return ret;
10676- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10677+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10678 ret, "l", "k", "=r", 4);
10679 return ret;
10680- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10681+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10682 ret, "q", "", "=r", 8);
10683 return ret;
10684 case 10:
10685- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10686+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10687 ret, "q", "", "=r", 10);
10688 if (unlikely(ret))
10689 return ret;
10690 __get_user_asm(*(u16 *)(8 + (char *)dst),
10691- (u16 __user *)(8 + (char __user *)src),
10692+ (const u16 __user *)(8 + (const char __user *)src),
10693 ret, "w", "w", "=r", 2);
10694 return ret;
10695 case 16:
10696- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10697+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10698 ret, "q", "", "=r", 16);
10699 if (unlikely(ret))
10700 return ret;
10701 __get_user_asm(*(u64 *)(8 + (char *)dst),
10702- (u64 __user *)(8 + (char __user *)src),
10703+ (const u64 __user *)(8 + (const char __user *)src),
10704 ret, "q", "", "=r", 8);
10705 return ret;
10706 default:
10707- return copy_user_generic(dst, (__force void *)src, size);
10708+
10709+#ifdef CONFIG_PAX_MEMORY_UDEREF
10710+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10711+ src += PAX_USER_SHADOW_BASE;
10712+#endif
10713+
10714+ return copy_user_generic(dst, (__force const void *)src, size);
10715 }
10716 }
10717
10718 static __always_inline __must_check
10719-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10720+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10721 {
10722- int ret = 0;
10723+ unsigned ret = 0;
10724
10725 might_fault();
10726- if (!__builtin_constant_p(size))
10727+
10728+ pax_track_stack();
10729+
10730+ if ((int)size < 0)
10731+ return size;
10732+
10733+#ifdef CONFIG_PAX_MEMORY_UDEREF
10734+ if (!__access_ok(VERIFY_WRITE, dst, size))
10735+ return size;
10736+#endif
10737+
10738+ if (!__builtin_constant_p(size)) {
10739+ check_object_size(src, size, true);
10740+
10741+#ifdef CONFIG_PAX_MEMORY_UDEREF
10742+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10743+ dst += PAX_USER_SHADOW_BASE;
10744+#endif
10745+
10746 return copy_user_generic((__force void *)dst, src, size);
10747+ }
10748 switch (size) {
10749- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10750+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10751 ret, "b", "b", "iq", 1);
10752 return ret;
10753- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10754+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10755 ret, "w", "w", "ir", 2);
10756 return ret;
10757- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10758+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10759 ret, "l", "k", "ir", 4);
10760 return ret;
10761- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10762+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10763 ret, "q", "", "er", 8);
10764 return ret;
10765 case 10:
10766- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10767+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10768 ret, "q", "", "er", 10);
10769 if (unlikely(ret))
10770 return ret;
10771 asm("":::"memory");
10772- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10773+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10774 ret, "w", "w", "ir", 2);
10775 return ret;
10776 case 16:
10777- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10778+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10779 ret, "q", "", "er", 16);
10780 if (unlikely(ret))
10781 return ret;
10782 asm("":::"memory");
10783- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10784+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10785 ret, "q", "", "er", 8);
10786 return ret;
10787 default:
10788+
10789+#ifdef CONFIG_PAX_MEMORY_UDEREF
10790+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10791+ dst += PAX_USER_SHADOW_BASE;
10792+#endif
10793+
10794 return copy_user_generic((__force void *)dst, src, size);
10795 }
10796 }
10797
10798 static __always_inline __must_check
10799-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10800+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10801+{
10802+ if (access_ok(VERIFY_WRITE, to, len))
10803+ len = __copy_to_user(to, from, len);
10804+ return len;
10805+}
10806+
10807+static __always_inline __must_check
10808+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10809+{
10810+ if ((int)len < 0)
10811+ return len;
10812+
10813+ if (access_ok(VERIFY_READ, from, len))
10814+ len = __copy_from_user(to, from, len);
10815+ else if ((int)len > 0) {
10816+ if (!__builtin_constant_p(len))
10817+ check_object_size(to, len, false);
10818+ memset(to, 0, len);
10819+ }
10820+ return len;
10821+}
10822+
10823+static __always_inline __must_check
10824+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10825 {
10826- int ret = 0;
10827+ unsigned ret = 0;
10828
10829 might_fault();
10830- if (!__builtin_constant_p(size))
10831+
10832+ pax_track_stack();
10833+
10834+ if ((int)size < 0)
10835+ return size;
10836+
10837+#ifdef CONFIG_PAX_MEMORY_UDEREF
10838+ if (!__access_ok(VERIFY_READ, src, size))
10839+ return size;
10840+ if (!__access_ok(VERIFY_WRITE, dst, size))
10841+ return size;
10842+#endif
10843+
10844+ if (!__builtin_constant_p(size)) {
10845+
10846+#ifdef CONFIG_PAX_MEMORY_UDEREF
10847+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10848+ src += PAX_USER_SHADOW_BASE;
10849+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10850+ dst += PAX_USER_SHADOW_BASE;
10851+#endif
10852+
10853 return copy_user_generic((__force void *)dst,
10854- (__force void *)src, size);
10855+ (__force const void *)src, size);
10856+ }
10857 switch (size) {
10858 case 1: {
10859 u8 tmp;
10860- __get_user_asm(tmp, (u8 __user *)src,
10861+ __get_user_asm(tmp, (const u8 __user *)src,
10862 ret, "b", "b", "=q", 1);
10863 if (likely(!ret))
10864 __put_user_asm(tmp, (u8 __user *)dst,
10865@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10866 }
10867 case 2: {
10868 u16 tmp;
10869- __get_user_asm(tmp, (u16 __user *)src,
10870+ __get_user_asm(tmp, (const u16 __user *)src,
10871 ret, "w", "w", "=r", 2);
10872 if (likely(!ret))
10873 __put_user_asm(tmp, (u16 __user *)dst,
10874@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10875
10876 case 4: {
10877 u32 tmp;
10878- __get_user_asm(tmp, (u32 __user *)src,
10879+ __get_user_asm(tmp, (const u32 __user *)src,
10880 ret, "l", "k", "=r", 4);
10881 if (likely(!ret))
10882 __put_user_asm(tmp, (u32 __user *)dst,
10883@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10884 }
10885 case 8: {
10886 u64 tmp;
10887- __get_user_asm(tmp, (u64 __user *)src,
10888+ __get_user_asm(tmp, (const u64 __user *)src,
10889 ret, "q", "", "=r", 8);
10890 if (likely(!ret))
10891 __put_user_asm(tmp, (u64 __user *)dst,
10892@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10893 return ret;
10894 }
10895 default:
10896+
10897+#ifdef CONFIG_PAX_MEMORY_UDEREF
10898+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10899+ src += PAX_USER_SHADOW_BASE;
10900+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10901+ dst += PAX_USER_SHADOW_BASE;
10902+#endif
10903+
10904 return copy_user_generic((__force void *)dst,
10905- (__force void *)src, size);
10906+ (__force const void *)src, size);
10907 }
10908 }
10909
10910@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10911 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10912 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10913
10914-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
10915- unsigned size);
10916+static __must_check __always_inline unsigned long
10917+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
10918+{
10919+ pax_track_stack();
10920+
10921+ if ((int)size < 0)
10922+ return size;
10923
10924-static __must_check __always_inline int
10925+#ifdef CONFIG_PAX_MEMORY_UDEREF
10926+ if (!__access_ok(VERIFY_READ, src, size))
10927+ return size;
10928+
10929+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10930+ src += PAX_USER_SHADOW_BASE;
10931+#endif
10932+
10933+ return copy_user_generic(dst, (__force const void *)src, size);
10934+}
10935+
10936+static __must_check __always_inline unsigned long
10937 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
10938 {
10939+ if ((int)size < 0)
10940+ return size;
10941+
10942+#ifdef CONFIG_PAX_MEMORY_UDEREF
10943+ if (!__access_ok(VERIFY_WRITE, dst, size))
10944+ return size;
10945+
10946+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10947+ dst += PAX_USER_SHADOW_BASE;
10948+#endif
10949+
10950 return copy_user_generic((__force void *)dst, src, size);
10951 }
10952
10953-extern long __copy_user_nocache(void *dst, const void __user *src,
10954+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
10955 unsigned size, int zerorest);
10956
10957-static inline int
10958-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10959+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
10960 {
10961 might_sleep();
10962+
10963+ if ((int)size < 0)
10964+ return size;
10965+
10966+#ifdef CONFIG_PAX_MEMORY_UDEREF
10967+ if (!__access_ok(VERIFY_READ, src, size))
10968+ return size;
10969+#endif
10970+
10971 return __copy_user_nocache(dst, src, size, 1);
10972 }
10973
10974-static inline int
10975-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10976+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
10977 unsigned size)
10978 {
10979+ if ((int)size < 0)
10980+ return size;
10981+
10982+#ifdef CONFIG_PAX_MEMORY_UDEREF
10983+ if (!__access_ok(VERIFY_READ, src, size))
10984+ return size;
10985+#endif
10986+
10987 return __copy_user_nocache(dst, src, size, 0);
10988 }
10989
10990-unsigned long
10991+extern unsigned long
10992 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
10993
10994 #endif /* _ASM_X86_UACCESS_64_H */
10995diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess.h linux-2.6.32.43/arch/x86/include/asm/uaccess.h
10996--- linux-2.6.32.43/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
10997+++ linux-2.6.32.43/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
10998@@ -8,12 +8,15 @@
10999 #include <linux/thread_info.h>
11000 #include <linux/prefetch.h>
11001 #include <linux/string.h>
11002+#include <linux/sched.h>
11003 #include <asm/asm.h>
11004 #include <asm/page.h>
11005
11006 #define VERIFY_READ 0
11007 #define VERIFY_WRITE 1
11008
11009+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11010+
11011 /*
11012 * The fs value determines whether argument validity checking should be
11013 * performed or not. If get_fs() == USER_DS, checking is performed, with
11014@@ -29,7 +32,12 @@
11015
11016 #define get_ds() (KERNEL_DS)
11017 #define get_fs() (current_thread_info()->addr_limit)
11018+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11019+void __set_fs(mm_segment_t x);
11020+void set_fs(mm_segment_t x);
11021+#else
11022 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11023+#endif
11024
11025 #define segment_eq(a, b) ((a).seg == (b).seg)
11026
11027@@ -77,7 +85,33 @@
11028 * checks that the pointer is in the user space range - after calling
11029 * this function, memory access functions may still return -EFAULT.
11030 */
11031-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11032+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11033+#define access_ok(type, addr, size) \
11034+({ \
11035+ long __size = size; \
11036+ unsigned long __addr = (unsigned long)addr; \
11037+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11038+ unsigned long __end_ao = __addr + __size - 1; \
11039+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11040+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11041+ while(__addr_ao <= __end_ao) { \
11042+ char __c_ao; \
11043+ __addr_ao += PAGE_SIZE; \
11044+ if (__size > PAGE_SIZE) \
11045+ cond_resched(); \
11046+ if (__get_user(__c_ao, (char __user *)__addr)) \
11047+ break; \
11048+ if (type != VERIFY_WRITE) { \
11049+ __addr = __addr_ao; \
11050+ continue; \
11051+ } \
11052+ if (__put_user(__c_ao, (char __user *)__addr)) \
11053+ break; \
11054+ __addr = __addr_ao; \
11055+ } \
11056+ } \
11057+ __ret_ao; \
11058+})
11059
11060 /*
11061 * The exception table consists of pairs of addresses: the first is the
11062@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11063 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11064 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11065
11066-
11067+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11068+#define __copyuser_seg "gs;"
11069+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11070+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11071+#else
11072+#define __copyuser_seg
11073+#define __COPYUSER_SET_ES
11074+#define __COPYUSER_RESTORE_ES
11075+#endif
11076
11077 #ifdef CONFIG_X86_32
11078 #define __put_user_asm_u64(x, addr, err, errret) \
11079- asm volatile("1: movl %%eax,0(%2)\n" \
11080- "2: movl %%edx,4(%2)\n" \
11081+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11082+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11083 "3:\n" \
11084 ".section .fixup,\"ax\"\n" \
11085 "4: movl %3,%0\n" \
11086@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11087 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11088
11089 #define __put_user_asm_ex_u64(x, addr) \
11090- asm volatile("1: movl %%eax,0(%1)\n" \
11091- "2: movl %%edx,4(%1)\n" \
11092+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11093+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11094 "3:\n" \
11095 _ASM_EXTABLE(1b, 2b - 1b) \
11096 _ASM_EXTABLE(2b, 3b - 2b) \
11097@@ -374,7 +416,7 @@ do { \
11098 } while (0)
11099
11100 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11101- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11102+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11103 "2:\n" \
11104 ".section .fixup,\"ax\"\n" \
11105 "3: mov %3,%0\n" \
11106@@ -382,7 +424,7 @@ do { \
11107 " jmp 2b\n" \
11108 ".previous\n" \
11109 _ASM_EXTABLE(1b, 3b) \
11110- : "=r" (err), ltype(x) \
11111+ : "=r" (err), ltype (x) \
11112 : "m" (__m(addr)), "i" (errret), "0" (err))
11113
11114 #define __get_user_size_ex(x, ptr, size) \
11115@@ -407,7 +449,7 @@ do { \
11116 } while (0)
11117
11118 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11119- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11120+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11121 "2:\n" \
11122 _ASM_EXTABLE(1b, 2b - 1b) \
11123 : ltype(x) : "m" (__m(addr)))
11124@@ -424,13 +466,24 @@ do { \
11125 int __gu_err; \
11126 unsigned long __gu_val; \
11127 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11128- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11129+ (x) = (__typeof__(*(ptr)))__gu_val; \
11130 __gu_err; \
11131 })
11132
11133 /* FIXME: this hack is definitely wrong -AK */
11134 struct __large_struct { unsigned long buf[100]; };
11135-#define __m(x) (*(struct __large_struct __user *)(x))
11136+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11137+#define ____m(x) \
11138+({ \
11139+ unsigned long ____x = (unsigned long)(x); \
11140+ if (____x < PAX_USER_SHADOW_BASE) \
11141+ ____x += PAX_USER_SHADOW_BASE; \
11142+ (void __user *)____x; \
11143+})
11144+#else
11145+#define ____m(x) (x)
11146+#endif
11147+#define __m(x) (*(struct __large_struct __user *)____m(x))
11148
11149 /*
11150 * Tell gcc we read from memory instead of writing: this is because
11151@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11152 * aliasing issues.
11153 */
11154 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11155- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11156+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11157 "2:\n" \
11158 ".section .fixup,\"ax\"\n" \
11159 "3: mov %3,%0\n" \
11160@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11161 ".previous\n" \
11162 _ASM_EXTABLE(1b, 3b) \
11163 : "=r"(err) \
11164- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11165+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11166
11167 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11168- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11169+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11170 "2:\n" \
11171 _ASM_EXTABLE(1b, 2b - 1b) \
11172 : : ltype(x), "m" (__m(addr)))
11173@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11174 * On error, the variable @x is set to zero.
11175 */
11176
11177+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11178+#define __get_user(x, ptr) get_user((x), (ptr))
11179+#else
11180 #define __get_user(x, ptr) \
11181 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11182+#endif
11183
11184 /**
11185 * __put_user: - Write a simple value into user space, with less checking.
11186@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11187 * Returns zero on success, or -EFAULT on error.
11188 */
11189
11190+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11191+#define __put_user(x, ptr) put_user((x), (ptr))
11192+#else
11193 #define __put_user(x, ptr) \
11194 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11195+#endif
11196
11197 #define __get_user_unaligned __get_user
11198 #define __put_user_unaligned __put_user
11199@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11200 #define get_user_ex(x, ptr) do { \
11201 unsigned long __gue_val; \
11202 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11203- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11204+ (x) = (__typeof__(*(ptr)))__gue_val; \
11205 } while (0)
11206
11207 #ifdef CONFIG_X86_WP_WORKS_OK
11208@@ -567,6 +628,7 @@ extern struct movsl_mask {
11209
11210 #define ARCH_HAS_NOCACHE_UACCESS 1
11211
11212+#define ARCH_HAS_SORT_EXTABLE
11213 #ifdef CONFIG_X86_32
11214 # include "uaccess_32.h"
11215 #else
11216diff -urNp linux-2.6.32.43/arch/x86/include/asm/vgtod.h linux-2.6.32.43/arch/x86/include/asm/vgtod.h
11217--- linux-2.6.32.43/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11218+++ linux-2.6.32.43/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11219@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11220 int sysctl_enabled;
11221 struct timezone sys_tz;
11222 struct { /* extract of a clocksource struct */
11223+ char name[8];
11224 cycle_t (*vread)(void);
11225 cycle_t cycle_last;
11226 cycle_t mask;
11227diff -urNp linux-2.6.32.43/arch/x86/include/asm/vmi.h linux-2.6.32.43/arch/x86/include/asm/vmi.h
11228--- linux-2.6.32.43/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11229+++ linux-2.6.32.43/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11230@@ -191,6 +191,7 @@ struct vrom_header {
11231 u8 reserved[96]; /* Reserved for headers */
11232 char vmi_init[8]; /* VMI_Init jump point */
11233 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11234+ char rom_data[8048]; /* rest of the option ROM */
11235 } __attribute__((packed));
11236
11237 struct pnp_header {
11238diff -urNp linux-2.6.32.43/arch/x86/include/asm/vsyscall.h linux-2.6.32.43/arch/x86/include/asm/vsyscall.h
11239--- linux-2.6.32.43/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11240+++ linux-2.6.32.43/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11241@@ -15,9 +15,10 @@ enum vsyscall_num {
11242
11243 #ifdef __KERNEL__
11244 #include <linux/seqlock.h>
11245+#include <linux/getcpu.h>
11246+#include <linux/time.h>
11247
11248 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11249-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11250
11251 /* Definitions for CONFIG_GENERIC_TIME definitions */
11252 #define __section_vsyscall_gtod_data __attribute__ \
11253@@ -31,7 +32,6 @@ enum vsyscall_num {
11254 #define VGETCPU_LSL 2
11255
11256 extern int __vgetcpu_mode;
11257-extern volatile unsigned long __jiffies;
11258
11259 /* kernel space (writeable) */
11260 extern int vgetcpu_mode;
11261@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11262
11263 extern void map_vsyscall(void);
11264
11265+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11266+extern time_t vtime(time_t *t);
11267+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11268 #endif /* __KERNEL__ */
11269
11270 #endif /* _ASM_X86_VSYSCALL_H */
11271diff -urNp linux-2.6.32.43/arch/x86/include/asm/xsave.h linux-2.6.32.43/arch/x86/include/asm/xsave.h
11272--- linux-2.6.32.43/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11273+++ linux-2.6.32.43/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11274@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11275 static inline int xsave_user(struct xsave_struct __user *buf)
11276 {
11277 int err;
11278+
11279+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11280+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11281+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11282+#endif
11283+
11284 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11285 "2:\n"
11286 ".section .fixup,\"ax\"\n"
11287@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11288 u32 lmask = mask;
11289 u32 hmask = mask >> 32;
11290
11291+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11292+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11293+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11294+#endif
11295+
11296 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11297 "2:\n"
11298 ".section .fixup,\"ax\"\n"
11299diff -urNp linux-2.6.32.43/arch/x86/Kconfig linux-2.6.32.43/arch/x86/Kconfig
11300--- linux-2.6.32.43/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11301+++ linux-2.6.32.43/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11302@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11303
11304 config X86_32_LAZY_GS
11305 def_bool y
11306- depends on X86_32 && !CC_STACKPROTECTOR
11307+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11308
11309 config KTIME_SCALAR
11310 def_bool X86_32
11311@@ -1008,7 +1008,7 @@ choice
11312
11313 config NOHIGHMEM
11314 bool "off"
11315- depends on !X86_NUMAQ
11316+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11317 ---help---
11318 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11319 However, the address space of 32-bit x86 processors is only 4
11320@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11321
11322 config HIGHMEM4G
11323 bool "4GB"
11324- depends on !X86_NUMAQ
11325+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11326 ---help---
11327 Select this if you have a 32-bit processor and between 1 and 4
11328 gigabytes of physical RAM.
11329@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11330 hex
11331 default 0xB0000000 if VMSPLIT_3G_OPT
11332 default 0x80000000 if VMSPLIT_2G
11333- default 0x78000000 if VMSPLIT_2G_OPT
11334+ default 0x70000000 if VMSPLIT_2G_OPT
11335 default 0x40000000 if VMSPLIT_1G
11336 default 0xC0000000
11337 depends on X86_32
11338@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11339
11340 config EFI
11341 bool "EFI runtime service support"
11342- depends on ACPI
11343+ depends on ACPI && !PAX_KERNEXEC
11344 ---help---
11345 This enables the kernel to use EFI runtime services that are
11346 available (such as the EFI variable services).
11347@@ -1460,6 +1460,7 @@ config SECCOMP
11348
11349 config CC_STACKPROTECTOR
11350 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11351+ depends on X86_64 || !PAX_MEMORY_UDEREF
11352 ---help---
11353 This option turns on the -fstack-protector GCC feature. This
11354 feature puts, at the beginning of functions, a canary value on
11355@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11356 config PHYSICAL_START
11357 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11358 default "0x1000000"
11359+ range 0x400000 0x40000000
11360 ---help---
11361 This gives the physical address where the kernel is loaded.
11362
11363@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11364 hex
11365 prompt "Alignment value to which kernel should be aligned" if X86_32
11366 default "0x1000000"
11367+ range 0x400000 0x1000000 if PAX_KERNEXEC
11368 range 0x2000 0x1000000
11369 ---help---
11370 This value puts the alignment restrictions on physical address
11371@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11372 Say N if you want to disable CPU hotplug.
11373
11374 config COMPAT_VDSO
11375- def_bool y
11376+ def_bool n
11377 prompt "Compat VDSO support"
11378 depends on X86_32 || IA32_EMULATION
11379+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11380 ---help---
11381 Map the 32-bit VDSO to the predictable old-style address too.
11382 ---help---
11383diff -urNp linux-2.6.32.43/arch/x86/Kconfig.cpu linux-2.6.32.43/arch/x86/Kconfig.cpu
11384--- linux-2.6.32.43/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11385+++ linux-2.6.32.43/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11386@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11387
11388 config X86_F00F_BUG
11389 def_bool y
11390- depends on M586MMX || M586TSC || M586 || M486 || M386
11391+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11392
11393 config X86_WP_WORKS_OK
11394 def_bool y
11395@@ -360,7 +360,7 @@ config X86_POPAD_OK
11396
11397 config X86_ALIGNMENT_16
11398 def_bool y
11399- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11400+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11401
11402 config X86_INTEL_USERCOPY
11403 def_bool y
11404@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11405 # generates cmov.
11406 config X86_CMOV
11407 def_bool y
11408- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11409+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11410
11411 config X86_MINIMUM_CPU_FAMILY
11412 int
11413diff -urNp linux-2.6.32.43/arch/x86/Kconfig.debug linux-2.6.32.43/arch/x86/Kconfig.debug
11414--- linux-2.6.32.43/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11415+++ linux-2.6.32.43/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11416@@ -99,7 +99,7 @@ config X86_PTDUMP
11417 config DEBUG_RODATA
11418 bool "Write protect kernel read-only data structures"
11419 default y
11420- depends on DEBUG_KERNEL
11421+ depends on DEBUG_KERNEL && BROKEN
11422 ---help---
11423 Mark the kernel read-only data as write-protected in the pagetables,
11424 in order to catch accidental (and incorrect) writes to such const
11425diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S
11426--- linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11427+++ linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11428@@ -91,6 +91,9 @@ _start:
11429 /* Do any other stuff... */
11430
11431 #ifndef CONFIG_64BIT
11432+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11433+ call verify_cpu
11434+
11435 /* This could also be done in C code... */
11436 movl pmode_cr3, %eax
11437 movl %eax, %cr3
11438@@ -104,7 +107,7 @@ _start:
11439 movl %eax, %ecx
11440 orl %edx, %ecx
11441 jz 1f
11442- movl $0xc0000080, %ecx
11443+ mov $MSR_EFER, %ecx
11444 wrmsr
11445 1:
11446
11447@@ -114,6 +117,7 @@ _start:
11448 movl pmode_cr0, %eax
11449 movl %eax, %cr0
11450 jmp pmode_return
11451+# include "../../verify_cpu.S"
11452 #else
11453 pushw $0
11454 pushw trampoline_segment
11455diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c
11456--- linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11457+++ linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11458@@ -11,11 +11,12 @@
11459 #include <linux/cpumask.h>
11460 #include <asm/segment.h>
11461 #include <asm/desc.h>
11462+#include <asm/e820.h>
11463
11464 #include "realmode/wakeup.h"
11465 #include "sleep.h"
11466
11467-unsigned long acpi_wakeup_address;
11468+unsigned long acpi_wakeup_address = 0x2000;
11469 unsigned long acpi_realmode_flags;
11470
11471 /* address in low memory of the wakeup routine. */
11472@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11473 #else /* CONFIG_64BIT */
11474 header->trampoline_segment = setup_trampoline() >> 4;
11475 #ifdef CONFIG_SMP
11476- stack_start.sp = temp_stack + sizeof(temp_stack);
11477+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11478+
11479+ pax_open_kernel();
11480 early_gdt_descr.address =
11481 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11482+ pax_close_kernel();
11483+
11484 initial_gs = per_cpu_offset(smp_processor_id());
11485 #endif
11486 initial_code = (unsigned long)wakeup_long64;
11487@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11488 return;
11489 }
11490
11491- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11492-
11493- if (!acpi_realmode) {
11494- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11495- return;
11496- }
11497-
11498- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11499+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11500+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11501 }
11502
11503
11504diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S
11505--- linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11506+++ linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11507@@ -30,13 +30,11 @@ wakeup_pmode_return:
11508 # and restore the stack ... but you need gdt for this to work
11509 movl saved_context_esp, %esp
11510
11511- movl %cs:saved_magic, %eax
11512- cmpl $0x12345678, %eax
11513+ cmpl $0x12345678, saved_magic
11514 jne bogus_magic
11515
11516 # jump to place where we left off
11517- movl saved_eip, %eax
11518- jmp *%eax
11519+ jmp *(saved_eip)
11520
11521 bogus_magic:
11522 jmp bogus_magic
11523diff -urNp linux-2.6.32.43/arch/x86/kernel/alternative.c linux-2.6.32.43/arch/x86/kernel/alternative.c
11524--- linux-2.6.32.43/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11525+++ linux-2.6.32.43/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11526@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11527
11528 BUG_ON(p->len > MAX_PATCH_LEN);
11529 /* prep the buffer with the original instructions */
11530- memcpy(insnbuf, p->instr, p->len);
11531+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11532 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11533 (unsigned long)p->instr, p->len);
11534
11535@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11536 if (smp_alt_once)
11537 free_init_pages("SMP alternatives",
11538 (unsigned long)__smp_locks,
11539- (unsigned long)__smp_locks_end);
11540+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11541
11542 restart_nmi();
11543 }
11544@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11545 * instructions. And on the local CPU you need to be protected again NMI or MCE
11546 * handlers seeing an inconsistent instruction while you patch.
11547 */
11548-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11549+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11550 size_t len)
11551 {
11552 unsigned long flags;
11553 local_irq_save(flags);
11554- memcpy(addr, opcode, len);
11555+
11556+ pax_open_kernel();
11557+ memcpy(ktla_ktva(addr), opcode, len);
11558 sync_core();
11559+ pax_close_kernel();
11560+
11561 local_irq_restore(flags);
11562 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11563 that causes hangs on some VIA CPUs. */
11564@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11565 */
11566 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11567 {
11568- unsigned long flags;
11569- char *vaddr;
11570+ unsigned char *vaddr = ktla_ktva(addr);
11571 struct page *pages[2];
11572- int i;
11573+ size_t i;
11574
11575 if (!core_kernel_text((unsigned long)addr)) {
11576- pages[0] = vmalloc_to_page(addr);
11577- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11578+ pages[0] = vmalloc_to_page(vaddr);
11579+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11580 } else {
11581- pages[0] = virt_to_page(addr);
11582+ pages[0] = virt_to_page(vaddr);
11583 WARN_ON(!PageReserved(pages[0]));
11584- pages[1] = virt_to_page(addr + PAGE_SIZE);
11585+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11586 }
11587 BUG_ON(!pages[0]);
11588- local_irq_save(flags);
11589- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11590- if (pages[1])
11591- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11592- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11593- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11594- clear_fixmap(FIX_TEXT_POKE0);
11595- if (pages[1])
11596- clear_fixmap(FIX_TEXT_POKE1);
11597- local_flush_tlb();
11598- sync_core();
11599- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11600- that causes hangs on some VIA CPUs. */
11601+ text_poke_early(addr, opcode, len);
11602 for (i = 0; i < len; i++)
11603- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11604- local_irq_restore(flags);
11605+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11606 return addr;
11607 }
11608diff -urNp linux-2.6.32.43/arch/x86/kernel/amd_iommu.c linux-2.6.32.43/arch/x86/kernel/amd_iommu.c
11609--- linux-2.6.32.43/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11610+++ linux-2.6.32.43/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11611@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11612 }
11613 }
11614
11615-static struct dma_map_ops amd_iommu_dma_ops = {
11616+static const struct dma_map_ops amd_iommu_dma_ops = {
11617 .alloc_coherent = alloc_coherent,
11618 .free_coherent = free_coherent,
11619 .map_page = map_page,
11620diff -urNp linux-2.6.32.43/arch/x86/kernel/apic/apic.c linux-2.6.32.43/arch/x86/kernel/apic/apic.c
11621--- linux-2.6.32.43/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11622+++ linux-2.6.32.43/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11623@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11624 apic_write(APIC_ESR, 0);
11625 v1 = apic_read(APIC_ESR);
11626 ack_APIC_irq();
11627- atomic_inc(&irq_err_count);
11628+ atomic_inc_unchecked(&irq_err_count);
11629
11630 /*
11631 * Here is what the APIC error bits mean:
11632@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11633 u16 *bios_cpu_apicid;
11634 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11635
11636+ pax_track_stack();
11637+
11638 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11639 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11640
11641diff -urNp linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c
11642--- linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11643+++ linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11644@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11645 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11646 GFP_ATOMIC);
11647 if (!ioapic_entries)
11648- return 0;
11649+ return NULL;
11650
11651 for (apic = 0; apic < nr_ioapics; apic++) {
11652 ioapic_entries[apic] =
11653@@ -733,7 +733,7 @@ nomem:
11654 kfree(ioapic_entries[apic]);
11655 kfree(ioapic_entries);
11656
11657- return 0;
11658+ return NULL;
11659 }
11660
11661 /*
11662@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11663 }
11664 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11665
11666-void lock_vector_lock(void)
11667+void lock_vector_lock(void) __acquires(vector_lock)
11668 {
11669 /* Used to the online set of cpus does not change
11670 * during assign_irq_vector.
11671@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11672 spin_lock(&vector_lock);
11673 }
11674
11675-void unlock_vector_lock(void)
11676+void unlock_vector_lock(void) __releases(vector_lock)
11677 {
11678 spin_unlock(&vector_lock);
11679 }
11680@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11681 ack_APIC_irq();
11682 }
11683
11684-atomic_t irq_mis_count;
11685+atomic_unchecked_t irq_mis_count;
11686
11687 static void ack_apic_level(unsigned int irq)
11688 {
11689@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11690
11691 /* Tail end of version 0x11 I/O APIC bug workaround */
11692 if (!(v & (1 << (i & 0x1f)))) {
11693- atomic_inc(&irq_mis_count);
11694+ atomic_inc_unchecked(&irq_mis_count);
11695 spin_lock(&ioapic_lock);
11696 __mask_and_edge_IO_APIC_irq(cfg);
11697 __unmask_and_level_IO_APIC_irq(cfg);
11698diff -urNp linux-2.6.32.43/arch/x86/kernel/apm_32.c linux-2.6.32.43/arch/x86/kernel/apm_32.c
11699--- linux-2.6.32.43/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11700+++ linux-2.6.32.43/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11701@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11702 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11703 * even though they are called in protected mode.
11704 */
11705-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11706+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11707 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11708
11709 static const char driver_version[] = "1.16ac"; /* no spaces */
11710@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11711 BUG_ON(cpu != 0);
11712 gdt = get_cpu_gdt_table(cpu);
11713 save_desc_40 = gdt[0x40 / 8];
11714+
11715+ pax_open_kernel();
11716 gdt[0x40 / 8] = bad_bios_desc;
11717+ pax_close_kernel();
11718
11719 apm_irq_save(flags);
11720 APM_DO_SAVE_SEGS;
11721@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11722 &call->esi);
11723 APM_DO_RESTORE_SEGS;
11724 apm_irq_restore(flags);
11725+
11726+ pax_open_kernel();
11727 gdt[0x40 / 8] = save_desc_40;
11728+ pax_close_kernel();
11729+
11730 put_cpu();
11731
11732 return call->eax & 0xff;
11733@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11734 BUG_ON(cpu != 0);
11735 gdt = get_cpu_gdt_table(cpu);
11736 save_desc_40 = gdt[0x40 / 8];
11737+
11738+ pax_open_kernel();
11739 gdt[0x40 / 8] = bad_bios_desc;
11740+ pax_close_kernel();
11741
11742 apm_irq_save(flags);
11743 APM_DO_SAVE_SEGS;
11744@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11745 &call->eax);
11746 APM_DO_RESTORE_SEGS;
11747 apm_irq_restore(flags);
11748+
11749+ pax_open_kernel();
11750 gdt[0x40 / 8] = save_desc_40;
11751+ pax_close_kernel();
11752+
11753 put_cpu();
11754 return error;
11755 }
11756@@ -975,7 +989,7 @@ recalc:
11757
11758 static void apm_power_off(void)
11759 {
11760- unsigned char po_bios_call[] = {
11761+ const unsigned char po_bios_call[] = {
11762 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11763 0x8e, 0xd0, /* movw ax,ss */
11764 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11765@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11766 * code to that CPU.
11767 */
11768 gdt = get_cpu_gdt_table(0);
11769+
11770+ pax_open_kernel();
11771 set_desc_base(&gdt[APM_CS >> 3],
11772 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11773 set_desc_base(&gdt[APM_CS_16 >> 3],
11774 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11775 set_desc_base(&gdt[APM_DS >> 3],
11776 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11777+ pax_close_kernel();
11778
11779 proc_create("apm", 0, NULL, &apm_file_ops);
11780
11781diff -urNp linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c
11782--- linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11783+++ linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11784@@ -51,7 +51,6 @@ void foo(void)
11785 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11786 BLANK();
11787
11788- OFFSET(TI_task, thread_info, task);
11789 OFFSET(TI_exec_domain, thread_info, exec_domain);
11790 OFFSET(TI_flags, thread_info, flags);
11791 OFFSET(TI_status, thread_info, status);
11792@@ -60,6 +59,8 @@ void foo(void)
11793 OFFSET(TI_restart_block, thread_info, restart_block);
11794 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11795 OFFSET(TI_cpu, thread_info, cpu);
11796+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11797+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11798 BLANK();
11799
11800 OFFSET(GDS_size, desc_ptr, size);
11801@@ -99,6 +100,7 @@ void foo(void)
11802
11803 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11804 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
11805+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11806 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
11807 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
11808 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
11809@@ -115,6 +117,11 @@ void foo(void)
11810 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
11811 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11812 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11813+
11814+#ifdef CONFIG_PAX_KERNEXEC
11815+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11816+#endif
11817+
11818 #endif
11819
11820 #ifdef CONFIG_XEN
11821diff -urNp linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c
11822--- linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
11823+++ linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
11824@@ -44,6 +44,8 @@ int main(void)
11825 ENTRY(addr_limit);
11826 ENTRY(preempt_count);
11827 ENTRY(status);
11828+ ENTRY(lowest_stack);
11829+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11830 #ifdef CONFIG_IA32_EMULATION
11831 ENTRY(sysenter_return);
11832 #endif
11833@@ -63,6 +65,18 @@ int main(void)
11834 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
11835 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
11836 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
11837+
11838+#ifdef CONFIG_PAX_KERNEXEC
11839+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
11840+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
11841+#endif
11842+
11843+#ifdef CONFIG_PAX_MEMORY_UDEREF
11844+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
11845+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
11846+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
11847+#endif
11848+
11849 #endif
11850
11851
11852@@ -115,6 +129,7 @@ int main(void)
11853 ENTRY(cr8);
11854 BLANK();
11855 #undef ENTRY
11856+ DEFINE(TSS_size, sizeof(struct tss_struct));
11857 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
11858 BLANK();
11859 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
11860@@ -130,6 +145,7 @@ int main(void)
11861
11862 BLANK();
11863 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11864+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
11865 #ifdef CONFIG_XEN
11866 BLANK();
11867 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
11868diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/amd.c linux-2.6.32.43/arch/x86/kernel/cpu/amd.c
11869--- linux-2.6.32.43/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
11870+++ linux-2.6.32.43/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
11871@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
11872 unsigned int size)
11873 {
11874 /* AMD errata T13 (order #21922) */
11875- if ((c->x86 == 6)) {
11876+ if (c->x86 == 6) {
11877 /* Duron Rev A0 */
11878 if (c->x86_model == 3 && c->x86_mask == 0)
11879 size = 64;
11880diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/common.c linux-2.6.32.43/arch/x86/kernel/cpu/common.c
11881--- linux-2.6.32.43/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
11882+++ linux-2.6.32.43/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
11883@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
11884
11885 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
11886
11887-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
11888-#ifdef CONFIG_X86_64
11889- /*
11890- * We need valid kernel segments for data and code in long mode too
11891- * IRET will check the segment types kkeil 2000/10/28
11892- * Also sysret mandates a special GDT layout
11893- *
11894- * TLS descriptors are currently at a different place compared to i386.
11895- * Hopefully nobody expects them at a fixed place (Wine?)
11896- */
11897- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
11898- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
11899- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
11900- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
11901- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
11902- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
11903-#else
11904- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
11905- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11906- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
11907- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
11908- /*
11909- * Segments used for calling PnP BIOS have byte granularity.
11910- * They code segments and data segments have fixed 64k limits,
11911- * the transfer segment sizes are set at run time.
11912- */
11913- /* 32-bit code */
11914- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11915- /* 16-bit code */
11916- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11917- /* 16-bit data */
11918- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
11919- /* 16-bit data */
11920- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
11921- /* 16-bit data */
11922- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
11923- /*
11924- * The APM segments have byte granularity and their bases
11925- * are set at run time. All have 64k limits.
11926- */
11927- /* 32-bit code */
11928- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
11929- /* 16-bit code */
11930- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
11931- /* data */
11932- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
11933-
11934- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11935- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
11936- GDT_STACK_CANARY_INIT
11937-#endif
11938-} };
11939-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
11940-
11941 static int __init x86_xsave_setup(char *s)
11942 {
11943 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
11944@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
11945 {
11946 struct desc_ptr gdt_descr;
11947
11948- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
11949+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
11950 gdt_descr.size = GDT_SIZE - 1;
11951 load_gdt(&gdt_descr);
11952 /* Reload the per-cpu base */
11953@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
11954 /* Filter out anything that depends on CPUID levels we don't have */
11955 filter_cpuid_features(c, true);
11956
11957+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
11958+ setup_clear_cpu_cap(X86_FEATURE_SEP);
11959+#endif
11960+
11961 /* If the model name is still unset, do table lookup. */
11962 if (!c->x86_model_id[0]) {
11963 const char *p;
11964@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
11965 }
11966 __setup("clearcpuid=", setup_disablecpuid);
11967
11968+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
11969+EXPORT_PER_CPU_SYMBOL(current_tinfo);
11970+
11971 #ifdef CONFIG_X86_64
11972 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
11973
11974@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
11975 EXPORT_PER_CPU_SYMBOL(current_task);
11976
11977 DEFINE_PER_CPU(unsigned long, kernel_stack) =
11978- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
11979+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
11980 EXPORT_PER_CPU_SYMBOL(kernel_stack);
11981
11982 DEFINE_PER_CPU(char *, irq_stack_ptr) =
11983@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
11984 {
11985 memset(regs, 0, sizeof(struct pt_regs));
11986 regs->fs = __KERNEL_PERCPU;
11987- regs->gs = __KERNEL_STACK_CANARY;
11988+ savesegment(gs, regs->gs);
11989
11990 return regs;
11991 }
11992@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
11993 int i;
11994
11995 cpu = stack_smp_processor_id();
11996- t = &per_cpu(init_tss, cpu);
11997+ t = init_tss + cpu;
11998 orig_ist = &per_cpu(orig_ist, cpu);
11999
12000 #ifdef CONFIG_NUMA
12001@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12002 switch_to_new_gdt(cpu);
12003 loadsegment(fs, 0);
12004
12005- load_idt((const struct desc_ptr *)&idt_descr);
12006+ load_idt(&idt_descr);
12007
12008 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12009 syscall_init();
12010@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12011 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12012 barrier();
12013
12014- check_efer();
12015 if (cpu != 0)
12016 enable_x2apic();
12017
12018@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12019 {
12020 int cpu = smp_processor_id();
12021 struct task_struct *curr = current;
12022- struct tss_struct *t = &per_cpu(init_tss, cpu);
12023+ struct tss_struct *t = init_tss + cpu;
12024 struct thread_struct *thread = &curr->thread;
12025
12026 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12027diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/intel.c linux-2.6.32.43/arch/x86/kernel/cpu/intel.c
12028--- linux-2.6.32.43/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12029+++ linux-2.6.32.43/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12030@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12031 * Update the IDT descriptor and reload the IDT so that
12032 * it uses the read-only mapped virtual address.
12033 */
12034- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12035+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12036 load_idt(&idt_descr);
12037 }
12038 #endif
12039diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c
12040--- linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12041+++ linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12042@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12043 return ret;
12044 }
12045
12046-static struct sysfs_ops sysfs_ops = {
12047+static const struct sysfs_ops sysfs_ops = {
12048 .show = show,
12049 .store = store,
12050 };
12051diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/Makefile linux-2.6.32.43/arch/x86/kernel/cpu/Makefile
12052--- linux-2.6.32.43/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12053+++ linux-2.6.32.43/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12054@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12055 CFLAGS_REMOVE_common.o = -pg
12056 endif
12057
12058-# Make sure load_percpu_segment has no stackprotector
12059-nostackp := $(call cc-option, -fno-stack-protector)
12060-CFLAGS_common.o := $(nostackp)
12061-
12062 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12063 obj-y += proc.o capflags.o powerflags.o common.o
12064 obj-y += vmware.o hypervisor.o sched.o
12065diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c
12066--- linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12067+++ linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12068@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12069 return ret;
12070 }
12071
12072-static struct sysfs_ops threshold_ops = {
12073+static const struct sysfs_ops threshold_ops = {
12074 .show = show,
12075 .store = store,
12076 };
12077diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c
12078--- linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12079+++ linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12080@@ -43,6 +43,7 @@
12081 #include <asm/ipi.h>
12082 #include <asm/mce.h>
12083 #include <asm/msr.h>
12084+#include <asm/local.h>
12085
12086 #include "mce-internal.h"
12087
12088@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12089 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12090 m->cs, m->ip);
12091
12092- if (m->cs == __KERNEL_CS)
12093+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12094 print_symbol("{%s}", m->ip);
12095 pr_cont("\n");
12096 }
12097@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12098
12099 #define PANIC_TIMEOUT 5 /* 5 seconds */
12100
12101-static atomic_t mce_paniced;
12102+static atomic_unchecked_t mce_paniced;
12103
12104 static int fake_panic;
12105-static atomic_t mce_fake_paniced;
12106+static atomic_unchecked_t mce_fake_paniced;
12107
12108 /* Panic in progress. Enable interrupts and wait for final IPI */
12109 static void wait_for_panic(void)
12110@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12111 /*
12112 * Make sure only one CPU runs in machine check panic
12113 */
12114- if (atomic_inc_return(&mce_paniced) > 1)
12115+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12116 wait_for_panic();
12117 barrier();
12118
12119@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12120 console_verbose();
12121 } else {
12122 /* Don't log too much for fake panic */
12123- if (atomic_inc_return(&mce_fake_paniced) > 1)
12124+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12125 return;
12126 }
12127 print_mce_head();
12128@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12129 * might have been modified by someone else.
12130 */
12131 rmb();
12132- if (atomic_read(&mce_paniced))
12133+ if (atomic_read_unchecked(&mce_paniced))
12134 wait_for_panic();
12135 if (!monarch_timeout)
12136 goto out;
12137@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12138 */
12139
12140 static DEFINE_SPINLOCK(mce_state_lock);
12141-static int open_count; /* #times opened */
12142+static local_t open_count; /* #times opened */
12143 static int open_exclu; /* already open exclusive? */
12144
12145 static int mce_open(struct inode *inode, struct file *file)
12146 {
12147 spin_lock(&mce_state_lock);
12148
12149- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12150+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12151 spin_unlock(&mce_state_lock);
12152
12153 return -EBUSY;
12154@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12155
12156 if (file->f_flags & O_EXCL)
12157 open_exclu = 1;
12158- open_count++;
12159+ local_inc(&open_count);
12160
12161 spin_unlock(&mce_state_lock);
12162
12163@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12164 {
12165 spin_lock(&mce_state_lock);
12166
12167- open_count--;
12168+ local_dec(&open_count);
12169 open_exclu = 0;
12170
12171 spin_unlock(&mce_state_lock);
12172@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12173 static void mce_reset(void)
12174 {
12175 cpu_missing = 0;
12176- atomic_set(&mce_fake_paniced, 0);
12177+ atomic_set_unchecked(&mce_fake_paniced, 0);
12178 atomic_set(&mce_executing, 0);
12179 atomic_set(&mce_callin, 0);
12180 atomic_set(&global_nwo, 0);
12181diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c
12182--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12183+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12184@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12185 return 0;
12186 }
12187
12188-static struct mtrr_ops amd_mtrr_ops = {
12189+static const struct mtrr_ops amd_mtrr_ops = {
12190 .vendor = X86_VENDOR_AMD,
12191 .set = amd_set_mtrr,
12192 .get = amd_get_mtrr,
12193diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c
12194--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12195+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12196@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12197 return 0;
12198 }
12199
12200-static struct mtrr_ops centaur_mtrr_ops = {
12201+static const struct mtrr_ops centaur_mtrr_ops = {
12202 .vendor = X86_VENDOR_CENTAUR,
12203 .set = centaur_set_mcr,
12204 .get = centaur_get_mcr,
12205diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c
12206--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12207+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12208@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12209 post_set();
12210 }
12211
12212-static struct mtrr_ops cyrix_mtrr_ops = {
12213+static const struct mtrr_ops cyrix_mtrr_ops = {
12214 .vendor = X86_VENDOR_CYRIX,
12215 .set_all = cyrix_set_all,
12216 .set = cyrix_set_arr,
12217diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c
12218--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12219+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12220@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12221 /*
12222 * Generic structure...
12223 */
12224-struct mtrr_ops generic_mtrr_ops = {
12225+const struct mtrr_ops generic_mtrr_ops = {
12226 .use_intel_if = 1,
12227 .set_all = generic_set_all,
12228 .get = generic_get_mtrr,
12229diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c
12230--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12231+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12232@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12233 u64 size_or_mask, size_and_mask;
12234 static bool mtrr_aps_delayed_init;
12235
12236-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12237+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12238
12239-struct mtrr_ops *mtrr_if;
12240+const struct mtrr_ops *mtrr_if;
12241
12242 static void set_mtrr(unsigned int reg, unsigned long base,
12243 unsigned long size, mtrr_type type);
12244
12245-void set_mtrr_ops(struct mtrr_ops *ops)
12246+void set_mtrr_ops(const struct mtrr_ops *ops)
12247 {
12248 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12249 mtrr_ops[ops->vendor] = ops;
12250diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h
12251--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12252+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12253@@ -12,19 +12,19 @@
12254 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12255
12256 struct mtrr_ops {
12257- u32 vendor;
12258- u32 use_intel_if;
12259- void (*set)(unsigned int reg, unsigned long base,
12260+ const u32 vendor;
12261+ const u32 use_intel_if;
12262+ void (* const set)(unsigned int reg, unsigned long base,
12263 unsigned long size, mtrr_type type);
12264- void (*set_all)(void);
12265+ void (* const set_all)(void);
12266
12267- void (*get)(unsigned int reg, unsigned long *base,
12268+ void (* const get)(unsigned int reg, unsigned long *base,
12269 unsigned long *size, mtrr_type *type);
12270- int (*get_free_region)(unsigned long base, unsigned long size,
12271+ int (* const get_free_region)(unsigned long base, unsigned long size,
12272 int replace_reg);
12273- int (*validate_add_page)(unsigned long base, unsigned long size,
12274+ int (* const validate_add_page)(unsigned long base, unsigned long size,
12275 unsigned int type);
12276- int (*have_wrcomb)(void);
12277+ int (* const have_wrcomb)(void);
12278 };
12279
12280 extern int generic_get_free_region(unsigned long base, unsigned long size,
12281@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12282 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12283 unsigned int type);
12284
12285-extern struct mtrr_ops generic_mtrr_ops;
12286+extern const struct mtrr_ops generic_mtrr_ops;
12287
12288 extern int positive_have_wrcomb(void);
12289
12290@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12291 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12292 void get_mtrr_state(void);
12293
12294-extern void set_mtrr_ops(struct mtrr_ops *ops);
12295+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12296
12297 extern u64 size_or_mask, size_and_mask;
12298-extern struct mtrr_ops *mtrr_if;
12299+extern const struct mtrr_ops *mtrr_if;
12300
12301 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12302 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12303diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c
12304--- linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12305+++ linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12306@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12307
12308 /* Interface defining a CPU specific perfctr watchdog */
12309 struct wd_ops {
12310- int (*reserve)(void);
12311- void (*unreserve)(void);
12312- int (*setup)(unsigned nmi_hz);
12313- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12314- void (*stop)(void);
12315+ int (* const reserve)(void);
12316+ void (* const unreserve)(void);
12317+ int (* const setup)(unsigned nmi_hz);
12318+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12319+ void (* const stop)(void);
12320 unsigned perfctr;
12321 unsigned evntsel;
12322 u64 checkbit;
12323@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12324 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12325 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12326
12327+/* cannot be const */
12328 static struct wd_ops intel_arch_wd_ops;
12329
12330 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12331@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12332 return 1;
12333 }
12334
12335+/* cannot be const */
12336 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12337 .reserve = single_msr_reserve,
12338 .unreserve = single_msr_unreserve,
12339diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c
12340--- linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12341+++ linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12342@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12343 * count to the generic event atomically:
12344 */
12345 again:
12346- prev_raw_count = atomic64_read(&hwc->prev_count);
12347+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12348 rdmsrl(hwc->event_base + idx, new_raw_count);
12349
12350- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12351+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12352 new_raw_count) != prev_raw_count)
12353 goto again;
12354
12355@@ -741,7 +741,7 @@ again:
12356 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12357 delta >>= shift;
12358
12359- atomic64_add(delta, &event->count);
12360+ atomic64_add_unchecked(delta, &event->count);
12361 atomic64_sub(delta, &hwc->period_left);
12362
12363 return new_raw_count;
12364@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12365 * The hw event starts counting from this event offset,
12366 * mark it to be able to extra future deltas:
12367 */
12368- atomic64_set(&hwc->prev_count, (u64)-left);
12369+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12370
12371 err = checking_wrmsrl(hwc->event_base + idx,
12372 (u64)(-left) & x86_pmu.event_mask);
12373@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12374 break;
12375
12376 callchain_store(entry, frame.return_address);
12377- fp = frame.next_frame;
12378+ fp = (__force const void __user *)frame.next_frame;
12379 }
12380 }
12381
12382diff -urNp linux-2.6.32.43/arch/x86/kernel/crash.c linux-2.6.32.43/arch/x86/kernel/crash.c
12383--- linux-2.6.32.43/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12384+++ linux-2.6.32.43/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12385@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12386 regs = args->regs;
12387
12388 #ifdef CONFIG_X86_32
12389- if (!user_mode_vm(regs)) {
12390+ if (!user_mode(regs)) {
12391 crash_fixup_ss_esp(&fixed_regs, regs);
12392 regs = &fixed_regs;
12393 }
12394diff -urNp linux-2.6.32.43/arch/x86/kernel/doublefault_32.c linux-2.6.32.43/arch/x86/kernel/doublefault_32.c
12395--- linux-2.6.32.43/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12396+++ linux-2.6.32.43/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12397@@ -11,7 +11,7 @@
12398
12399 #define DOUBLEFAULT_STACKSIZE (1024)
12400 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12401-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12402+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12403
12404 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12405
12406@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12407 unsigned long gdt, tss;
12408
12409 store_gdt(&gdt_desc);
12410- gdt = gdt_desc.address;
12411+ gdt = (unsigned long)gdt_desc.address;
12412
12413 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12414
12415@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12416 /* 0x2 bit is always set */
12417 .flags = X86_EFLAGS_SF | 0x2,
12418 .sp = STACK_START,
12419- .es = __USER_DS,
12420+ .es = __KERNEL_DS,
12421 .cs = __KERNEL_CS,
12422 .ss = __KERNEL_DS,
12423- .ds = __USER_DS,
12424+ .ds = __KERNEL_DS,
12425 .fs = __KERNEL_PERCPU,
12426
12427 .__cr3 = __pa_nodebug(swapper_pg_dir),
12428diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c
12429--- linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12430+++ linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12431@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12432 #endif
12433
12434 for (;;) {
12435- struct thread_info *context;
12436+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12437+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12438
12439- context = (struct thread_info *)
12440- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12441- bp = print_context_stack(context, stack, bp, ops,
12442- data, NULL, &graph);
12443-
12444- stack = (unsigned long *)context->previous_esp;
12445- if (!stack)
12446+ if (stack_start == task_stack_page(task))
12447 break;
12448+ stack = *(unsigned long **)stack_start;
12449 if (ops->stack(data, "IRQ") < 0)
12450 break;
12451 touch_nmi_watchdog();
12452@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12453 * When in-kernel, we also print out the stack and code at the
12454 * time of the fault..
12455 */
12456- if (!user_mode_vm(regs)) {
12457+ if (!user_mode(regs)) {
12458 unsigned int code_prologue = code_bytes * 43 / 64;
12459 unsigned int code_len = code_bytes;
12460 unsigned char c;
12461 u8 *ip;
12462+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12463
12464 printk(KERN_EMERG "Stack:\n");
12465 show_stack_log_lvl(NULL, regs, &regs->sp,
12466@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12467
12468 printk(KERN_EMERG "Code: ");
12469
12470- ip = (u8 *)regs->ip - code_prologue;
12471+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12472 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12473 /* try starting at IP */
12474- ip = (u8 *)regs->ip;
12475+ ip = (u8 *)regs->ip + cs_base;
12476 code_len = code_len - code_prologue + 1;
12477 }
12478 for (i = 0; i < code_len; i++, ip++) {
12479@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12480 printk(" Bad EIP value.");
12481 break;
12482 }
12483- if (ip == (u8 *)regs->ip)
12484+ if (ip == (u8 *)regs->ip + cs_base)
12485 printk("<%02x> ", c);
12486 else
12487 printk("%02x ", c);
12488@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12489 {
12490 unsigned short ud2;
12491
12492+ ip = ktla_ktva(ip);
12493 if (ip < PAGE_OFFSET)
12494 return 0;
12495 if (probe_kernel_address((unsigned short *)ip, ud2))
12496diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c
12497--- linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12498+++ linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12499@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12500 unsigned long *irq_stack_end =
12501 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12502 unsigned used = 0;
12503- struct thread_info *tinfo;
12504 int graph = 0;
12505+ void *stack_start;
12506
12507 if (!task)
12508 task = current;
12509@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12510 * current stack address. If the stacks consist of nested
12511 * exceptions
12512 */
12513- tinfo = task_thread_info(task);
12514 for (;;) {
12515 char *id;
12516 unsigned long *estack_end;
12517+
12518 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12519 &used, &id);
12520
12521@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12522 if (ops->stack(data, id) < 0)
12523 break;
12524
12525- bp = print_context_stack(tinfo, stack, bp, ops,
12526+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12527 data, estack_end, &graph);
12528 ops->stack(data, "<EOE>");
12529 /*
12530@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12531 if (stack >= irq_stack && stack < irq_stack_end) {
12532 if (ops->stack(data, "IRQ") < 0)
12533 break;
12534- bp = print_context_stack(tinfo, stack, bp,
12535+ bp = print_context_stack(task, irq_stack, stack, bp,
12536 ops, data, irq_stack_end, &graph);
12537 /*
12538 * We link to the next stack (which would be
12539@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12540 /*
12541 * This handles the process stack:
12542 */
12543- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12544+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12545+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12546 put_cpu();
12547 }
12548 EXPORT_SYMBOL(dump_trace);
12549diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack.c linux-2.6.32.43/arch/x86/kernel/dumpstack.c
12550--- linux-2.6.32.43/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12551+++ linux-2.6.32.43/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12552@@ -2,6 +2,9 @@
12553 * Copyright (C) 1991, 1992 Linus Torvalds
12554 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12555 */
12556+#ifdef CONFIG_GRKERNSEC_HIDESYM
12557+#define __INCLUDED_BY_HIDESYM 1
12558+#endif
12559 #include <linux/kallsyms.h>
12560 #include <linux/kprobes.h>
12561 #include <linux/uaccess.h>
12562@@ -28,7 +31,7 @@ static int die_counter;
12563
12564 void printk_address(unsigned long address, int reliable)
12565 {
12566- printk(" [<%p>] %s%pS\n", (void *) address,
12567+ printk(" [<%p>] %s%pA\n", (void *) address,
12568 reliable ? "" : "? ", (void *) address);
12569 }
12570
12571@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12572 static void
12573 print_ftrace_graph_addr(unsigned long addr, void *data,
12574 const struct stacktrace_ops *ops,
12575- struct thread_info *tinfo, int *graph)
12576+ struct task_struct *task, int *graph)
12577 {
12578- struct task_struct *task = tinfo->task;
12579 unsigned long ret_addr;
12580 int index = task->curr_ret_stack;
12581
12582@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12583 static inline void
12584 print_ftrace_graph_addr(unsigned long addr, void *data,
12585 const struct stacktrace_ops *ops,
12586- struct thread_info *tinfo, int *graph)
12587+ struct task_struct *task, int *graph)
12588 { }
12589 #endif
12590
12591@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12592 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12593 */
12594
12595-static inline int valid_stack_ptr(struct thread_info *tinfo,
12596- void *p, unsigned int size, void *end)
12597+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12598 {
12599- void *t = tinfo;
12600 if (end) {
12601 if (p < end && p >= (end-THREAD_SIZE))
12602 return 1;
12603@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12604 }
12605
12606 unsigned long
12607-print_context_stack(struct thread_info *tinfo,
12608+print_context_stack(struct task_struct *task, void *stack_start,
12609 unsigned long *stack, unsigned long bp,
12610 const struct stacktrace_ops *ops, void *data,
12611 unsigned long *end, int *graph)
12612 {
12613 struct stack_frame *frame = (struct stack_frame *)bp;
12614
12615- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12616+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12617 unsigned long addr;
12618
12619 addr = *stack;
12620@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12621 } else {
12622 ops->address(data, addr, 0);
12623 }
12624- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12625+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12626 }
12627 stack++;
12628 }
12629@@ -180,7 +180,7 @@ void dump_stack(void)
12630 #endif
12631
12632 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12633- current->pid, current->comm, print_tainted(),
12634+ task_pid_nr(current), current->comm, print_tainted(),
12635 init_utsname()->release,
12636 (int)strcspn(init_utsname()->version, " "),
12637 init_utsname()->version);
12638@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12639 return flags;
12640 }
12641
12642+extern void gr_handle_kernel_exploit(void);
12643+
12644 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12645 {
12646 if (regs && kexec_should_crash(current))
12647@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12648 panic("Fatal exception in interrupt");
12649 if (panic_on_oops)
12650 panic("Fatal exception");
12651- do_exit(signr);
12652+
12653+ gr_handle_kernel_exploit();
12654+
12655+ do_group_exit(signr);
12656 }
12657
12658 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12659@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12660 unsigned long flags = oops_begin();
12661 int sig = SIGSEGV;
12662
12663- if (!user_mode_vm(regs))
12664+ if (!user_mode(regs))
12665 report_bug(regs->ip, regs);
12666
12667 if (__die(str, regs, err))
12668diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack.h linux-2.6.32.43/arch/x86/kernel/dumpstack.h
12669--- linux-2.6.32.43/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12670+++ linux-2.6.32.43/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12671@@ -15,7 +15,7 @@
12672 #endif
12673
12674 extern unsigned long
12675-print_context_stack(struct thread_info *tinfo,
12676+print_context_stack(struct task_struct *task, void *stack_start,
12677 unsigned long *stack, unsigned long bp,
12678 const struct stacktrace_ops *ops, void *data,
12679 unsigned long *end, int *graph);
12680diff -urNp linux-2.6.32.43/arch/x86/kernel/e820.c linux-2.6.32.43/arch/x86/kernel/e820.c
12681--- linux-2.6.32.43/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12682+++ linux-2.6.32.43/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12683@@ -733,7 +733,7 @@ struct early_res {
12684 };
12685 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12686 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12687- {}
12688+ { 0, 0, {0}, 0 }
12689 };
12690
12691 static int __init find_overlapped_early(u64 start, u64 end)
12692diff -urNp linux-2.6.32.43/arch/x86/kernel/early_printk.c linux-2.6.32.43/arch/x86/kernel/early_printk.c
12693--- linux-2.6.32.43/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12694+++ linux-2.6.32.43/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12695@@ -7,6 +7,7 @@
12696 #include <linux/pci_regs.h>
12697 #include <linux/pci_ids.h>
12698 #include <linux/errno.h>
12699+#include <linux/sched.h>
12700 #include <asm/io.h>
12701 #include <asm/processor.h>
12702 #include <asm/fcntl.h>
12703@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12704 int n;
12705 va_list ap;
12706
12707+ pax_track_stack();
12708+
12709 va_start(ap, fmt);
12710 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12711 early_console->write(early_console, buf, n);
12712diff -urNp linux-2.6.32.43/arch/x86/kernel/efi_32.c linux-2.6.32.43/arch/x86/kernel/efi_32.c
12713--- linux-2.6.32.43/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12714+++ linux-2.6.32.43/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12715@@ -38,70 +38,38 @@
12716 */
12717
12718 static unsigned long efi_rt_eflags;
12719-static pgd_t efi_bak_pg_dir_pointer[2];
12720+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12721
12722-void efi_call_phys_prelog(void)
12723+void __init efi_call_phys_prelog(void)
12724 {
12725- unsigned long cr4;
12726- unsigned long temp;
12727 struct desc_ptr gdt_descr;
12728
12729 local_irq_save(efi_rt_eflags);
12730
12731- /*
12732- * If I don't have PAE, I should just duplicate two entries in page
12733- * directory. If I have PAE, I just need to duplicate one entry in
12734- * page directory.
12735- */
12736- cr4 = read_cr4_safe();
12737
12738- if (cr4 & X86_CR4_PAE) {
12739- efi_bak_pg_dir_pointer[0].pgd =
12740- swapper_pg_dir[pgd_index(0)].pgd;
12741- swapper_pg_dir[0].pgd =
12742- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12743- } else {
12744- efi_bak_pg_dir_pointer[0].pgd =
12745- swapper_pg_dir[pgd_index(0)].pgd;
12746- efi_bak_pg_dir_pointer[1].pgd =
12747- swapper_pg_dir[pgd_index(0x400000)].pgd;
12748- swapper_pg_dir[pgd_index(0)].pgd =
12749- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12750- temp = PAGE_OFFSET + 0x400000;
12751- swapper_pg_dir[pgd_index(0x400000)].pgd =
12752- swapper_pg_dir[pgd_index(temp)].pgd;
12753- }
12754+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12755+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12756+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12757
12758 /*
12759 * After the lock is released, the original page table is restored.
12760 */
12761 __flush_tlb_all();
12762
12763- gdt_descr.address = __pa(get_cpu_gdt_table(0));
12764+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12765 gdt_descr.size = GDT_SIZE - 1;
12766 load_gdt(&gdt_descr);
12767 }
12768
12769-void efi_call_phys_epilog(void)
12770+void __init efi_call_phys_epilog(void)
12771 {
12772- unsigned long cr4;
12773 struct desc_ptr gdt_descr;
12774
12775- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12776+ gdt_descr.address = get_cpu_gdt_table(0);
12777 gdt_descr.size = GDT_SIZE - 1;
12778 load_gdt(&gdt_descr);
12779
12780- cr4 = read_cr4_safe();
12781-
12782- if (cr4 & X86_CR4_PAE) {
12783- swapper_pg_dir[pgd_index(0)].pgd =
12784- efi_bak_pg_dir_pointer[0].pgd;
12785- } else {
12786- swapper_pg_dir[pgd_index(0)].pgd =
12787- efi_bak_pg_dir_pointer[0].pgd;
12788- swapper_pg_dir[pgd_index(0x400000)].pgd =
12789- efi_bak_pg_dir_pointer[1].pgd;
12790- }
12791+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
12792
12793 /*
12794 * After the lock is released, the original page table is restored.
12795diff -urNp linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S
12796--- linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
12797+++ linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
12798@@ -6,6 +6,7 @@
12799 */
12800
12801 #include <linux/linkage.h>
12802+#include <linux/init.h>
12803 #include <asm/page_types.h>
12804
12805 /*
12806@@ -20,7 +21,7 @@
12807 * service functions will comply with gcc calling convention, too.
12808 */
12809
12810-.text
12811+__INIT
12812 ENTRY(efi_call_phys)
12813 /*
12814 * 0. The function can only be called in Linux kernel. So CS has been
12815@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
12816 * The mapping of lower virtual memory has been created in prelog and
12817 * epilog.
12818 */
12819- movl $1f, %edx
12820- subl $__PAGE_OFFSET, %edx
12821- jmp *%edx
12822+ jmp 1f-__PAGE_OFFSET
12823 1:
12824
12825 /*
12826@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
12827 * parameter 2, ..., param n. To make things easy, we save the return
12828 * address of efi_call_phys in a global variable.
12829 */
12830- popl %edx
12831- movl %edx, saved_return_addr
12832- /* get the function pointer into ECX*/
12833- popl %ecx
12834- movl %ecx, efi_rt_function_ptr
12835- movl $2f, %edx
12836- subl $__PAGE_OFFSET, %edx
12837- pushl %edx
12838+ popl (saved_return_addr)
12839+ popl (efi_rt_function_ptr)
12840
12841 /*
12842 * 3. Clear PG bit in %CR0.
12843@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
12844 /*
12845 * 5. Call the physical function.
12846 */
12847- jmp *%ecx
12848+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
12849
12850-2:
12851 /*
12852 * 6. After EFI runtime service returns, control will return to
12853 * following instruction. We'd better readjust stack pointer first.
12854@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
12855 movl %cr0, %edx
12856 orl $0x80000000, %edx
12857 movl %edx, %cr0
12858- jmp 1f
12859-1:
12860+
12861 /*
12862 * 8. Now restore the virtual mode from flat mode by
12863 * adding EIP with PAGE_OFFSET.
12864 */
12865- movl $1f, %edx
12866- jmp *%edx
12867+ jmp 1f+__PAGE_OFFSET
12868 1:
12869
12870 /*
12871 * 9. Balance the stack. And because EAX contain the return value,
12872 * we'd better not clobber it.
12873 */
12874- leal efi_rt_function_ptr, %edx
12875- movl (%edx), %ecx
12876- pushl %ecx
12877+ pushl (efi_rt_function_ptr)
12878
12879 /*
12880- * 10. Push the saved return address onto the stack and return.
12881+ * 10. Return to the saved return address.
12882 */
12883- leal saved_return_addr, %edx
12884- movl (%edx), %ecx
12885- pushl %ecx
12886- ret
12887+ jmpl *(saved_return_addr)
12888 ENDPROC(efi_call_phys)
12889 .previous
12890
12891-.data
12892+__INITDATA
12893 saved_return_addr:
12894 .long 0
12895 efi_rt_function_ptr:
12896diff -urNp linux-2.6.32.43/arch/x86/kernel/entry_32.S linux-2.6.32.43/arch/x86/kernel/entry_32.S
12897--- linux-2.6.32.43/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
12898+++ linux-2.6.32.43/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
12899@@ -185,13 +185,146 @@
12900 /*CFI_REL_OFFSET gs, PT_GS*/
12901 .endm
12902 .macro SET_KERNEL_GS reg
12903+
12904+#ifdef CONFIG_CC_STACKPROTECTOR
12905 movl $(__KERNEL_STACK_CANARY), \reg
12906+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
12907+ movl $(__USER_DS), \reg
12908+#else
12909+ xorl \reg, \reg
12910+#endif
12911+
12912 movl \reg, %gs
12913 .endm
12914
12915 #endif /* CONFIG_X86_32_LAZY_GS */
12916
12917-.macro SAVE_ALL
12918+.macro pax_enter_kernel
12919+#ifdef CONFIG_PAX_KERNEXEC
12920+ call pax_enter_kernel
12921+#endif
12922+.endm
12923+
12924+.macro pax_exit_kernel
12925+#ifdef CONFIG_PAX_KERNEXEC
12926+ call pax_exit_kernel
12927+#endif
12928+.endm
12929+
12930+#ifdef CONFIG_PAX_KERNEXEC
12931+ENTRY(pax_enter_kernel)
12932+#ifdef CONFIG_PARAVIRT
12933+ pushl %eax
12934+ pushl %ecx
12935+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
12936+ mov %eax, %esi
12937+#else
12938+ mov %cr0, %esi
12939+#endif
12940+ bts $16, %esi
12941+ jnc 1f
12942+ mov %cs, %esi
12943+ cmp $__KERNEL_CS, %esi
12944+ jz 3f
12945+ ljmp $__KERNEL_CS, $3f
12946+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
12947+2:
12948+#ifdef CONFIG_PARAVIRT
12949+ mov %esi, %eax
12950+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
12951+#else
12952+ mov %esi, %cr0
12953+#endif
12954+3:
12955+#ifdef CONFIG_PARAVIRT
12956+ popl %ecx
12957+ popl %eax
12958+#endif
12959+ ret
12960+ENDPROC(pax_enter_kernel)
12961+
12962+ENTRY(pax_exit_kernel)
12963+#ifdef CONFIG_PARAVIRT
12964+ pushl %eax
12965+ pushl %ecx
12966+#endif
12967+ mov %cs, %esi
12968+ cmp $__KERNEXEC_KERNEL_CS, %esi
12969+ jnz 2f
12970+#ifdef CONFIG_PARAVIRT
12971+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
12972+ mov %eax, %esi
12973+#else
12974+ mov %cr0, %esi
12975+#endif
12976+ btr $16, %esi
12977+ ljmp $__KERNEL_CS, $1f
12978+1:
12979+#ifdef CONFIG_PARAVIRT
12980+ mov %esi, %eax
12981+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
12982+#else
12983+ mov %esi, %cr0
12984+#endif
12985+2:
12986+#ifdef CONFIG_PARAVIRT
12987+ popl %ecx
12988+ popl %eax
12989+#endif
12990+ ret
12991+ENDPROC(pax_exit_kernel)
12992+#endif
12993+
12994+.macro pax_erase_kstack
12995+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
12996+ call pax_erase_kstack
12997+#endif
12998+.endm
12999+
13000+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13001+/*
13002+ * ebp: thread_info
13003+ * ecx, edx: can be clobbered
13004+ */
13005+ENTRY(pax_erase_kstack)
13006+ pushl %edi
13007+ pushl %eax
13008+
13009+ mov TI_lowest_stack(%ebp), %edi
13010+ mov $-0xBEEF, %eax
13011+ std
13012+
13013+1: mov %edi, %ecx
13014+ and $THREAD_SIZE_asm - 1, %ecx
13015+ shr $2, %ecx
13016+ repne scasl
13017+ jecxz 2f
13018+
13019+ cmp $2*16, %ecx
13020+ jc 2f
13021+
13022+ mov $2*16, %ecx
13023+ repe scasl
13024+ jecxz 2f
13025+ jne 1b
13026+
13027+2: cld
13028+ mov %esp, %ecx
13029+ sub %edi, %ecx
13030+ shr $2, %ecx
13031+ rep stosl
13032+
13033+ mov TI_task_thread_sp0(%ebp), %edi
13034+ sub $128, %edi
13035+ mov %edi, TI_lowest_stack(%ebp)
13036+
13037+ popl %eax
13038+ popl %edi
13039+ ret
13040+ENDPROC(pax_erase_kstack)
13041+#endif
13042+
13043+.macro __SAVE_ALL _DS
13044 cld
13045 PUSH_GS
13046 pushl %fs
13047@@ -224,7 +357,7 @@
13048 pushl %ebx
13049 CFI_ADJUST_CFA_OFFSET 4
13050 CFI_REL_OFFSET ebx, 0
13051- movl $(__USER_DS), %edx
13052+ movl $\_DS, %edx
13053 movl %edx, %ds
13054 movl %edx, %es
13055 movl $(__KERNEL_PERCPU), %edx
13056@@ -232,6 +365,15 @@
13057 SET_KERNEL_GS %edx
13058 .endm
13059
13060+.macro SAVE_ALL
13061+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13062+ __SAVE_ALL __KERNEL_DS
13063+ pax_enter_kernel
13064+#else
13065+ __SAVE_ALL __USER_DS
13066+#endif
13067+.endm
13068+
13069 .macro RESTORE_INT_REGS
13070 popl %ebx
13071 CFI_ADJUST_CFA_OFFSET -4
13072@@ -352,7 +494,15 @@ check_userspace:
13073 movb PT_CS(%esp), %al
13074 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13075 cmpl $USER_RPL, %eax
13076+
13077+#ifdef CONFIG_PAX_KERNEXEC
13078+ jae resume_userspace
13079+
13080+ PAX_EXIT_KERNEL
13081+ jmp resume_kernel
13082+#else
13083 jb resume_kernel # not returning to v8086 or userspace
13084+#endif
13085
13086 ENTRY(resume_userspace)
13087 LOCKDEP_SYS_EXIT
13088@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13089 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13090 # int/exception return?
13091 jne work_pending
13092- jmp restore_all
13093+ jmp restore_all_pax
13094 END(ret_from_exception)
13095
13096 #ifdef CONFIG_PREEMPT
13097@@ -414,25 +564,36 @@ sysenter_past_esp:
13098 /*CFI_REL_OFFSET cs, 0*/
13099 /*
13100 * Push current_thread_info()->sysenter_return to the stack.
13101- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13102- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13103 */
13104- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13105+ pushl $0
13106 CFI_ADJUST_CFA_OFFSET 4
13107 CFI_REL_OFFSET eip, 0
13108
13109 pushl %eax
13110 CFI_ADJUST_CFA_OFFSET 4
13111 SAVE_ALL
13112+ GET_THREAD_INFO(%ebp)
13113+ movl TI_sysenter_return(%ebp),%ebp
13114+ movl %ebp,PT_EIP(%esp)
13115 ENABLE_INTERRUPTS(CLBR_NONE)
13116
13117 /*
13118 * Load the potential sixth argument from user stack.
13119 * Careful about security.
13120 */
13121+ movl PT_OLDESP(%esp),%ebp
13122+
13123+#ifdef CONFIG_PAX_MEMORY_UDEREF
13124+ mov PT_OLDSS(%esp),%ds
13125+1: movl %ds:(%ebp),%ebp
13126+ push %ss
13127+ pop %ds
13128+#else
13129 cmpl $__PAGE_OFFSET-3,%ebp
13130 jae syscall_fault
13131 1: movl (%ebp),%ebp
13132+#endif
13133+
13134 movl %ebp,PT_EBP(%esp)
13135 .section __ex_table,"a"
13136 .align 4
13137@@ -455,12 +616,23 @@ sysenter_do_call:
13138 testl $_TIF_ALLWORK_MASK, %ecx
13139 jne sysexit_audit
13140 sysenter_exit:
13141+
13142+#ifdef CONFIG_PAX_RANDKSTACK
13143+ pushl_cfi %eax
13144+ call pax_randomize_kstack
13145+ popl_cfi %eax
13146+#endif
13147+
13148+ pax_erase_kstack
13149+
13150 /* if something modifies registers it must also disable sysexit */
13151 movl PT_EIP(%esp), %edx
13152 movl PT_OLDESP(%esp), %ecx
13153 xorl %ebp,%ebp
13154 TRACE_IRQS_ON
13155 1: mov PT_FS(%esp), %fs
13156+2: mov PT_DS(%esp), %ds
13157+3: mov PT_ES(%esp), %es
13158 PTGS_TO_GS
13159 ENABLE_INTERRUPTS_SYSEXIT
13160
13161@@ -477,6 +649,9 @@ sysenter_audit:
13162 movl %eax,%edx /* 2nd arg: syscall number */
13163 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13164 call audit_syscall_entry
13165+
13166+ pax_erase_kstack
13167+
13168 pushl %ebx
13169 CFI_ADJUST_CFA_OFFSET 4
13170 movl PT_EAX(%esp),%eax /* reload syscall number */
13171@@ -504,11 +679,17 @@ sysexit_audit:
13172
13173 CFI_ENDPROC
13174 .pushsection .fixup,"ax"
13175-2: movl $0,PT_FS(%esp)
13176+4: movl $0,PT_FS(%esp)
13177+ jmp 1b
13178+5: movl $0,PT_DS(%esp)
13179+ jmp 1b
13180+6: movl $0,PT_ES(%esp)
13181 jmp 1b
13182 .section __ex_table,"a"
13183 .align 4
13184- .long 1b,2b
13185+ .long 1b,4b
13186+ .long 2b,5b
13187+ .long 3b,6b
13188 .popsection
13189 PTGS_TO_GS_EX
13190 ENDPROC(ia32_sysenter_target)
13191@@ -538,6 +719,14 @@ syscall_exit:
13192 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13193 jne syscall_exit_work
13194
13195+restore_all_pax:
13196+
13197+#ifdef CONFIG_PAX_RANDKSTACK
13198+ call pax_randomize_kstack
13199+#endif
13200+
13201+ pax_erase_kstack
13202+
13203 restore_all:
13204 TRACE_IRQS_IRET
13205 restore_all_notrace:
13206@@ -602,7 +791,13 @@ ldt_ss:
13207 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13208 mov %dx, %ax /* eax: new kernel esp */
13209 sub %eax, %edx /* offset (low word is 0) */
13210- PER_CPU(gdt_page, %ebx)
13211+#ifdef CONFIG_SMP
13212+ movl PER_CPU_VAR(cpu_number), %ebx
13213+ shll $PAGE_SHIFT_asm, %ebx
13214+ addl $cpu_gdt_table, %ebx
13215+#else
13216+ movl $cpu_gdt_table, %ebx
13217+#endif
13218 shr $16, %edx
13219 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13220 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13221@@ -636,31 +831,25 @@ work_resched:
13222 movl TI_flags(%ebp), %ecx
13223 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13224 # than syscall tracing?
13225- jz restore_all
13226+ jz restore_all_pax
13227 testb $_TIF_NEED_RESCHED, %cl
13228 jnz work_resched
13229
13230 work_notifysig: # deal with pending signals and
13231 # notify-resume requests
13232+ movl %esp, %eax
13233 #ifdef CONFIG_VM86
13234 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13235- movl %esp, %eax
13236- jne work_notifysig_v86 # returning to kernel-space or
13237+ jz 1f # returning to kernel-space or
13238 # vm86-space
13239- xorl %edx, %edx
13240- call do_notify_resume
13241- jmp resume_userspace_sig
13242
13243- ALIGN
13244-work_notifysig_v86:
13245 pushl %ecx # save ti_flags for do_notify_resume
13246 CFI_ADJUST_CFA_OFFSET 4
13247 call save_v86_state # %eax contains pt_regs pointer
13248 popl %ecx
13249 CFI_ADJUST_CFA_OFFSET -4
13250 movl %eax, %esp
13251-#else
13252- movl %esp, %eax
13253+1:
13254 #endif
13255 xorl %edx, %edx
13256 call do_notify_resume
13257@@ -673,6 +862,9 @@ syscall_trace_entry:
13258 movl $-ENOSYS,PT_EAX(%esp)
13259 movl %esp, %eax
13260 call syscall_trace_enter
13261+
13262+ pax_erase_kstack
13263+
13264 /* What it returned is what we'll actually use. */
13265 cmpl $(nr_syscalls), %eax
13266 jnae syscall_call
13267@@ -695,6 +887,10 @@ END(syscall_exit_work)
13268
13269 RING0_INT_FRAME # can't unwind into user space anyway
13270 syscall_fault:
13271+#ifdef CONFIG_PAX_MEMORY_UDEREF
13272+ push %ss
13273+ pop %ds
13274+#endif
13275 GET_THREAD_INFO(%ebp)
13276 movl $-EFAULT,PT_EAX(%esp)
13277 jmp resume_userspace
13278@@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13279 PTREGSCALL(vm86)
13280 PTREGSCALL(vm86old)
13281
13282+ ALIGN;
13283+ENTRY(kernel_execve)
13284+ push %ebp
13285+ sub $PT_OLDSS+4,%esp
13286+ push %edi
13287+ push %ecx
13288+ push %eax
13289+ lea 3*4(%esp),%edi
13290+ mov $PT_OLDSS/4+1,%ecx
13291+ xorl %eax,%eax
13292+ rep stosl
13293+ pop %eax
13294+ pop %ecx
13295+ pop %edi
13296+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13297+ mov %eax,PT_EBX(%esp)
13298+ mov %edx,PT_ECX(%esp)
13299+ mov %ecx,PT_EDX(%esp)
13300+ mov %esp,%eax
13301+ call sys_execve
13302+ GET_THREAD_INFO(%ebp)
13303+ test %eax,%eax
13304+ jz syscall_exit
13305+ add $PT_OLDSS+4,%esp
13306+ pop %ebp
13307+ ret
13308+
13309 .macro FIXUP_ESPFIX_STACK
13310 /*
13311 * Switch back for ESPFIX stack to the normal zerobased stack
13312@@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13313 * normal stack and adjusts ESP with the matching offset.
13314 */
13315 /* fixup the stack */
13316- PER_CPU(gdt_page, %ebx)
13317+#ifdef CONFIG_SMP
13318+ movl PER_CPU_VAR(cpu_number), %ebx
13319+ shll $PAGE_SHIFT_asm, %ebx
13320+ addl $cpu_gdt_table, %ebx
13321+#else
13322+ movl $cpu_gdt_table, %ebx
13323+#endif
13324 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13325 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13326 shl $16, %eax
13327@@ -1198,7 +1427,6 @@ return_to_handler:
13328 ret
13329 #endif
13330
13331-.section .rodata,"a"
13332 #include "syscall_table_32.S"
13333
13334 syscall_table_size=(.-sys_call_table)
13335@@ -1255,9 +1483,12 @@ error_code:
13336 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13337 REG_TO_PTGS %ecx
13338 SET_KERNEL_GS %ecx
13339- movl $(__USER_DS), %ecx
13340+ movl $(__KERNEL_DS), %ecx
13341 movl %ecx, %ds
13342 movl %ecx, %es
13343+
13344+ pax_enter_kernel
13345+
13346 TRACE_IRQS_OFF
13347 movl %esp,%eax # pt_regs pointer
13348 call *%edi
13349@@ -1351,6 +1582,9 @@ nmi_stack_correct:
13350 xorl %edx,%edx # zero error code
13351 movl %esp,%eax # pt_regs pointer
13352 call do_nmi
13353+
13354+ pax_exit_kernel
13355+
13356 jmp restore_all_notrace
13357 CFI_ENDPROC
13358
13359@@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13360 FIXUP_ESPFIX_STACK # %eax == %esp
13361 xorl %edx,%edx # zero error code
13362 call do_nmi
13363+
13364+ pax_exit_kernel
13365+
13366 RESTORE_REGS
13367 lss 12+4(%esp), %esp # back to espfix stack
13368 CFI_ADJUST_CFA_OFFSET -24
13369diff -urNp linux-2.6.32.43/arch/x86/kernel/entry_64.S linux-2.6.32.43/arch/x86/kernel/entry_64.S
13370--- linux-2.6.32.43/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13371+++ linux-2.6.32.43/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13372@@ -53,6 +53,7 @@
13373 #include <asm/paravirt.h>
13374 #include <asm/ftrace.h>
13375 #include <asm/percpu.h>
13376+#include <asm/pgtable.h>
13377
13378 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13379 #include <linux/elf-em.h>
13380@@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13381 ENDPROC(native_usergs_sysret64)
13382 #endif /* CONFIG_PARAVIRT */
13383
13384+ .macro ljmpq sel, off
13385+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13386+ .byte 0x48; ljmp *1234f(%rip)
13387+ .pushsection .rodata
13388+ .align 16
13389+ 1234: .quad \off; .word \sel
13390+ .popsection
13391+#else
13392+ pushq $\sel
13393+ pushq $\off
13394+ lretq
13395+#endif
13396+ .endm
13397+
13398+ .macro pax_enter_kernel
13399+#ifdef CONFIG_PAX_KERNEXEC
13400+ call pax_enter_kernel
13401+#endif
13402+ .endm
13403+
13404+ .macro pax_exit_kernel
13405+#ifdef CONFIG_PAX_KERNEXEC
13406+ call pax_exit_kernel
13407+#endif
13408+ .endm
13409+
13410+#ifdef CONFIG_PAX_KERNEXEC
13411+ENTRY(pax_enter_kernel)
13412+ pushq %rdi
13413+
13414+#ifdef CONFIG_PARAVIRT
13415+ PV_SAVE_REGS(CLBR_RDI)
13416+#endif
13417+
13418+ GET_CR0_INTO_RDI
13419+ bts $16,%rdi
13420+ jnc 1f
13421+ mov %cs,%edi
13422+ cmp $__KERNEL_CS,%edi
13423+ jz 3f
13424+ ljmpq __KERNEL_CS,3f
13425+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13426+2: SET_RDI_INTO_CR0
13427+3:
13428+
13429+#ifdef CONFIG_PARAVIRT
13430+ PV_RESTORE_REGS(CLBR_RDI)
13431+#endif
13432+
13433+ popq %rdi
13434+ retq
13435+ENDPROC(pax_enter_kernel)
13436+
13437+ENTRY(pax_exit_kernel)
13438+ pushq %rdi
13439+
13440+#ifdef CONFIG_PARAVIRT
13441+ PV_SAVE_REGS(CLBR_RDI)
13442+#endif
13443+
13444+ mov %cs,%rdi
13445+ cmp $__KERNEXEC_KERNEL_CS,%edi
13446+ jnz 2f
13447+ GET_CR0_INTO_RDI
13448+ btr $16,%rdi
13449+ ljmpq __KERNEL_CS,1f
13450+1: SET_RDI_INTO_CR0
13451+2:
13452+
13453+#ifdef CONFIG_PARAVIRT
13454+ PV_RESTORE_REGS(CLBR_RDI);
13455+#endif
13456+
13457+ popq %rdi
13458+ retq
13459+ENDPROC(pax_exit_kernel)
13460+#endif
13461+
13462+ .macro pax_enter_kernel_user
13463+#ifdef CONFIG_PAX_MEMORY_UDEREF
13464+ call pax_enter_kernel_user
13465+#endif
13466+ .endm
13467+
13468+ .macro pax_exit_kernel_user
13469+#ifdef CONFIG_PAX_MEMORY_UDEREF
13470+ call pax_exit_kernel_user
13471+#endif
13472+#ifdef CONFIG_PAX_RANDKSTACK
13473+ push %rax
13474+ call pax_randomize_kstack
13475+ pop %rax
13476+#endif
13477+ pax_erase_kstack
13478+ .endm
13479+
13480+#ifdef CONFIG_PAX_MEMORY_UDEREF
13481+ENTRY(pax_enter_kernel_user)
13482+ pushq %rdi
13483+ pushq %rbx
13484+
13485+#ifdef CONFIG_PARAVIRT
13486+ PV_SAVE_REGS(CLBR_RDI)
13487+#endif
13488+
13489+ GET_CR3_INTO_RDI
13490+ mov %rdi,%rbx
13491+ add $__START_KERNEL_map,%rbx
13492+ sub phys_base(%rip),%rbx
13493+
13494+#ifdef CONFIG_PARAVIRT
13495+ pushq %rdi
13496+ cmpl $0, pv_info+PARAVIRT_enabled
13497+ jz 1f
13498+ i = 0
13499+ .rept USER_PGD_PTRS
13500+ mov i*8(%rbx),%rsi
13501+ mov $0,%sil
13502+ lea i*8(%rbx),%rdi
13503+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13504+ i = i + 1
13505+ .endr
13506+ jmp 2f
13507+1:
13508+#endif
13509+
13510+ i = 0
13511+ .rept USER_PGD_PTRS
13512+ movb $0,i*8(%rbx)
13513+ i = i + 1
13514+ .endr
13515+
13516+#ifdef CONFIG_PARAVIRT
13517+2: popq %rdi
13518+#endif
13519+ SET_RDI_INTO_CR3
13520+
13521+#ifdef CONFIG_PAX_KERNEXEC
13522+ GET_CR0_INTO_RDI
13523+ bts $16,%rdi
13524+ SET_RDI_INTO_CR0
13525+#endif
13526+
13527+#ifdef CONFIG_PARAVIRT
13528+ PV_RESTORE_REGS(CLBR_RDI)
13529+#endif
13530+
13531+ popq %rbx
13532+ popq %rdi
13533+ retq
13534+ENDPROC(pax_enter_kernel_user)
13535+
13536+ENTRY(pax_exit_kernel_user)
13537+ push %rdi
13538+
13539+#ifdef CONFIG_PARAVIRT
13540+ pushq %rbx
13541+ PV_SAVE_REGS(CLBR_RDI)
13542+#endif
13543+
13544+#ifdef CONFIG_PAX_KERNEXEC
13545+ GET_CR0_INTO_RDI
13546+ btr $16,%rdi
13547+ SET_RDI_INTO_CR0
13548+#endif
13549+
13550+ GET_CR3_INTO_RDI
13551+ add $__START_KERNEL_map,%rdi
13552+ sub phys_base(%rip),%rdi
13553+
13554+#ifdef CONFIG_PARAVIRT
13555+ cmpl $0, pv_info+PARAVIRT_enabled
13556+ jz 1f
13557+ mov %rdi,%rbx
13558+ i = 0
13559+ .rept USER_PGD_PTRS
13560+ mov i*8(%rbx),%rsi
13561+ mov $0x67,%sil
13562+ lea i*8(%rbx),%rdi
13563+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13564+ i = i + 1
13565+ .endr
13566+ jmp 2f
13567+1:
13568+#endif
13569+
13570+ i = 0
13571+ .rept USER_PGD_PTRS
13572+ movb $0x67,i*8(%rdi)
13573+ i = i + 1
13574+ .endr
13575+
13576+#ifdef CONFIG_PARAVIRT
13577+2: PV_RESTORE_REGS(CLBR_RDI)
13578+ popq %rbx
13579+#endif
13580+
13581+ popq %rdi
13582+ retq
13583+ENDPROC(pax_exit_kernel_user)
13584+#endif
13585+
13586+.macro pax_erase_kstack
13587+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13588+ call pax_erase_kstack
13589+#endif
13590+.endm
13591+
13592+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13593+/*
13594+ * r10: thread_info
13595+ * rcx, rdx: can be clobbered
13596+ */
13597+ENTRY(pax_erase_kstack)
13598+ pushq %rdi
13599+ pushq %rax
13600+
13601+ GET_THREAD_INFO(%r10)
13602+ mov TI_lowest_stack(%r10), %rdi
13603+ mov $-0xBEEF, %rax
13604+ std
13605+
13606+1: mov %edi, %ecx
13607+ and $THREAD_SIZE_asm - 1, %ecx
13608+ shr $3, %ecx
13609+ repne scasq
13610+ jecxz 2f
13611+
13612+ cmp $2*8, %ecx
13613+ jc 2f
13614+
13615+ mov $2*8, %ecx
13616+ repe scasq
13617+ jecxz 2f
13618+ jne 1b
13619+
13620+2: cld
13621+ mov %esp, %ecx
13622+ sub %edi, %ecx
13623+ shr $3, %ecx
13624+ rep stosq
13625+
13626+ mov TI_task_thread_sp0(%r10), %rdi
13627+ sub $256, %rdi
13628+ mov %rdi, TI_lowest_stack(%r10)
13629+
13630+ popq %rax
13631+ popq %rdi
13632+ ret
13633+ENDPROC(pax_erase_kstack)
13634+#endif
13635
13636 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13637 #ifdef CONFIG_TRACE_IRQFLAGS
13638@@ -317,7 +569,7 @@ ENTRY(save_args)
13639 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13640 movq_cfi rbp, 8 /* push %rbp */
13641 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13642- testl $3, CS(%rdi)
13643+ testb $3, CS(%rdi)
13644 je 1f
13645 SWAPGS
13646 /*
13647@@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13648
13649 RESTORE_REST
13650
13651- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13652+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13653 je int_ret_from_sys_call
13654
13655 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13656@@ -455,7 +707,7 @@ END(ret_from_fork)
13657 ENTRY(system_call)
13658 CFI_STARTPROC simple
13659 CFI_SIGNAL_FRAME
13660- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13661+ CFI_DEF_CFA rsp,0
13662 CFI_REGISTER rip,rcx
13663 /*CFI_REGISTER rflags,r11*/
13664 SWAPGS_UNSAFE_STACK
13665@@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13666
13667 movq %rsp,PER_CPU_VAR(old_rsp)
13668 movq PER_CPU_VAR(kernel_stack),%rsp
13669+ pax_enter_kernel_user
13670 /*
13671 * No need to follow this irqs off/on section - it's straight
13672 * and short:
13673 */
13674 ENABLE_INTERRUPTS(CLBR_NONE)
13675- SAVE_ARGS 8,1
13676+ SAVE_ARGS 8*6,1
13677 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13678 movq %rcx,RIP-ARGOFFSET(%rsp)
13679 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13680@@ -502,6 +755,7 @@ sysret_check:
13681 andl %edi,%edx
13682 jnz sysret_careful
13683 CFI_REMEMBER_STATE
13684+ pax_exit_kernel_user
13685 /*
13686 * sysretq will re-enable interrupts:
13687 */
13688@@ -562,6 +816,9 @@ auditsys:
13689 movq %rax,%rsi /* 2nd arg: syscall number */
13690 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13691 call audit_syscall_entry
13692+
13693+ pax_erase_kstack
13694+
13695 LOAD_ARGS 0 /* reload call-clobbered registers */
13696 jmp system_call_fastpath
13697
13698@@ -592,6 +849,9 @@ tracesys:
13699 FIXUP_TOP_OF_STACK %rdi
13700 movq %rsp,%rdi
13701 call syscall_trace_enter
13702+
13703+ pax_erase_kstack
13704+
13705 /*
13706 * Reload arg registers from stack in case ptrace changed them.
13707 * We don't reload %rax because syscall_trace_enter() returned
13708@@ -613,7 +873,7 @@ tracesys:
13709 GLOBAL(int_ret_from_sys_call)
13710 DISABLE_INTERRUPTS(CLBR_NONE)
13711 TRACE_IRQS_OFF
13712- testl $3,CS-ARGOFFSET(%rsp)
13713+ testb $3,CS-ARGOFFSET(%rsp)
13714 je retint_restore_args
13715 movl $_TIF_ALLWORK_MASK,%edi
13716 /* edi: mask to check */
13717@@ -800,6 +1060,16 @@ END(interrupt)
13718 CFI_ADJUST_CFA_OFFSET 10*8
13719 call save_args
13720 PARTIAL_FRAME 0
13721+#ifdef CONFIG_PAX_MEMORY_UDEREF
13722+ testb $3, CS(%rdi)
13723+ jnz 1f
13724+ pax_enter_kernel
13725+ jmp 2f
13726+1: pax_enter_kernel_user
13727+2:
13728+#else
13729+ pax_enter_kernel
13730+#endif
13731 call \func
13732 .endm
13733
13734@@ -822,7 +1092,7 @@ ret_from_intr:
13735 CFI_ADJUST_CFA_OFFSET -8
13736 exit_intr:
13737 GET_THREAD_INFO(%rcx)
13738- testl $3,CS-ARGOFFSET(%rsp)
13739+ testb $3,CS-ARGOFFSET(%rsp)
13740 je retint_kernel
13741
13742 /* Interrupt came from user space */
13743@@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13744 * The iretq could re-enable interrupts:
13745 */
13746 DISABLE_INTERRUPTS(CLBR_ANY)
13747+ pax_exit_kernel_user
13748 TRACE_IRQS_IRETQ
13749 SWAPGS
13750 jmp restore_args
13751
13752 retint_restore_args: /* return to kernel space */
13753 DISABLE_INTERRUPTS(CLBR_ANY)
13754+ pax_exit_kernel
13755 /*
13756 * The iretq could re-enable interrupts:
13757 */
13758@@ -1032,6 +1304,16 @@ ENTRY(\sym)
13759 CFI_ADJUST_CFA_OFFSET 15*8
13760 call error_entry
13761 DEFAULT_FRAME 0
13762+#ifdef CONFIG_PAX_MEMORY_UDEREF
13763+ testb $3, CS(%rsp)
13764+ jnz 1f
13765+ pax_enter_kernel
13766+ jmp 2f
13767+1: pax_enter_kernel_user
13768+2:
13769+#else
13770+ pax_enter_kernel
13771+#endif
13772 movq %rsp,%rdi /* pt_regs pointer */
13773 xorl %esi,%esi /* no error code */
13774 call \do_sym
13775@@ -1049,6 +1331,16 @@ ENTRY(\sym)
13776 subq $15*8, %rsp
13777 call save_paranoid
13778 TRACE_IRQS_OFF
13779+#ifdef CONFIG_PAX_MEMORY_UDEREF
13780+ testb $3, CS(%rsp)
13781+ jnz 1f
13782+ pax_enter_kernel
13783+ jmp 2f
13784+1: pax_enter_kernel_user
13785+2:
13786+#else
13787+ pax_enter_kernel
13788+#endif
13789 movq %rsp,%rdi /* pt_regs pointer */
13790 xorl %esi,%esi /* no error code */
13791 call \do_sym
13792@@ -1066,9 +1358,24 @@ ENTRY(\sym)
13793 subq $15*8, %rsp
13794 call save_paranoid
13795 TRACE_IRQS_OFF
13796+#ifdef CONFIG_PAX_MEMORY_UDEREF
13797+ testb $3, CS(%rsp)
13798+ jnz 1f
13799+ pax_enter_kernel
13800+ jmp 2f
13801+1: pax_enter_kernel_user
13802+2:
13803+#else
13804+ pax_enter_kernel
13805+#endif
13806 movq %rsp,%rdi /* pt_regs pointer */
13807 xorl %esi,%esi /* no error code */
13808- PER_CPU(init_tss, %rbp)
13809+#ifdef CONFIG_SMP
13810+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
13811+ lea init_tss(%rbp), %rbp
13812+#else
13813+ lea init_tss(%rip), %rbp
13814+#endif
13815 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13816 call \do_sym
13817 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
13818@@ -1085,6 +1392,16 @@ ENTRY(\sym)
13819 CFI_ADJUST_CFA_OFFSET 15*8
13820 call error_entry
13821 DEFAULT_FRAME 0
13822+#ifdef CONFIG_PAX_MEMORY_UDEREF
13823+ testb $3, CS(%rsp)
13824+ jnz 1f
13825+ pax_enter_kernel
13826+ jmp 2f
13827+1: pax_enter_kernel_user
13828+2:
13829+#else
13830+ pax_enter_kernel
13831+#endif
13832 movq %rsp,%rdi /* pt_regs pointer */
13833 movq ORIG_RAX(%rsp),%rsi /* get error code */
13834 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13835@@ -1104,6 +1421,16 @@ ENTRY(\sym)
13836 call save_paranoid
13837 DEFAULT_FRAME 0
13838 TRACE_IRQS_OFF
13839+#ifdef CONFIG_PAX_MEMORY_UDEREF
13840+ testb $3, CS(%rsp)
13841+ jnz 1f
13842+ pax_enter_kernel
13843+ jmp 2f
13844+1: pax_enter_kernel_user
13845+2:
13846+#else
13847+ pax_enter_kernel
13848+#endif
13849 movq %rsp,%rdi /* pt_regs pointer */
13850 movq ORIG_RAX(%rsp),%rsi /* get error code */
13851 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
13852@@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
13853 TRACE_IRQS_OFF
13854 testl %ebx,%ebx /* swapgs needed? */
13855 jnz paranoid_restore
13856- testl $3,CS(%rsp)
13857+ testb $3,CS(%rsp)
13858 jnz paranoid_userspace
13859+#ifdef CONFIG_PAX_MEMORY_UDEREF
13860+ pax_exit_kernel
13861+ TRACE_IRQS_IRETQ 0
13862+ SWAPGS_UNSAFE_STACK
13863+ RESTORE_ALL 8
13864+ jmp irq_return
13865+#endif
13866 paranoid_swapgs:
13867+#ifdef CONFIG_PAX_MEMORY_UDEREF
13868+ pax_exit_kernel_user
13869+#else
13870+ pax_exit_kernel
13871+#endif
13872 TRACE_IRQS_IRETQ 0
13873 SWAPGS_UNSAFE_STACK
13874 RESTORE_ALL 8
13875 jmp irq_return
13876 paranoid_restore:
13877+ pax_exit_kernel
13878 TRACE_IRQS_IRETQ 0
13879 RESTORE_ALL 8
13880 jmp irq_return
13881@@ -1470,7 +1810,7 @@ ENTRY(error_entry)
13882 movq_cfi r14, R14+8
13883 movq_cfi r15, R15+8
13884 xorl %ebx,%ebx
13885- testl $3,CS+8(%rsp)
13886+ testb $3,CS+8(%rsp)
13887 je error_kernelspace
13888 error_swapgs:
13889 SWAPGS
13890@@ -1529,6 +1869,16 @@ ENTRY(nmi)
13891 CFI_ADJUST_CFA_OFFSET 15*8
13892 call save_paranoid
13893 DEFAULT_FRAME 0
13894+#ifdef CONFIG_PAX_MEMORY_UDEREF
13895+ testb $3, CS(%rsp)
13896+ jnz 1f
13897+ pax_enter_kernel
13898+ jmp 2f
13899+1: pax_enter_kernel_user
13900+2:
13901+#else
13902+ pax_enter_kernel
13903+#endif
13904 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
13905 movq %rsp,%rdi
13906 movq $-1,%rsi
13907@@ -1539,11 +1889,25 @@ ENTRY(nmi)
13908 DISABLE_INTERRUPTS(CLBR_NONE)
13909 testl %ebx,%ebx /* swapgs needed? */
13910 jnz nmi_restore
13911- testl $3,CS(%rsp)
13912+ testb $3,CS(%rsp)
13913 jnz nmi_userspace
13914+#ifdef CONFIG_PAX_MEMORY_UDEREF
13915+ pax_exit_kernel
13916+ SWAPGS_UNSAFE_STACK
13917+ RESTORE_ALL 8
13918+ jmp irq_return
13919+#endif
13920 nmi_swapgs:
13921+#ifdef CONFIG_PAX_MEMORY_UDEREF
13922+ pax_exit_kernel_user
13923+#else
13924+ pax_exit_kernel
13925+#endif
13926 SWAPGS_UNSAFE_STACK
13927+ RESTORE_ALL 8
13928+ jmp irq_return
13929 nmi_restore:
13930+ pax_exit_kernel
13931 RESTORE_ALL 8
13932 jmp irq_return
13933 nmi_userspace:
13934diff -urNp linux-2.6.32.43/arch/x86/kernel/ftrace.c linux-2.6.32.43/arch/x86/kernel/ftrace.c
13935--- linux-2.6.32.43/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
13936+++ linux-2.6.32.43/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
13937@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
13938 static void *mod_code_newcode; /* holds the text to write to the IP */
13939
13940 static unsigned nmi_wait_count;
13941-static atomic_t nmi_update_count = ATOMIC_INIT(0);
13942+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
13943
13944 int ftrace_arch_read_dyn_info(char *buf, int size)
13945 {
13946@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
13947
13948 r = snprintf(buf, size, "%u %u",
13949 nmi_wait_count,
13950- atomic_read(&nmi_update_count));
13951+ atomic_read_unchecked(&nmi_update_count));
13952 return r;
13953 }
13954
13955@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
13956 {
13957 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
13958 smp_rmb();
13959+ pax_open_kernel();
13960 ftrace_mod_code();
13961- atomic_inc(&nmi_update_count);
13962+ pax_close_kernel();
13963+ atomic_inc_unchecked(&nmi_update_count);
13964 }
13965 /* Must have previous changes seen before executions */
13966 smp_mb();
13967@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
13968
13969
13970
13971-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
13972+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
13973
13974 static unsigned char *ftrace_nop_replace(void)
13975 {
13976@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
13977 {
13978 unsigned char replaced[MCOUNT_INSN_SIZE];
13979
13980+ ip = ktla_ktva(ip);
13981+
13982 /*
13983 * Note: Due to modules and __init, code can
13984 * disappear and change, we need to protect against faulting
13985@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
13986 unsigned char old[MCOUNT_INSN_SIZE], *new;
13987 int ret;
13988
13989- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
13990+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
13991 new = ftrace_call_replace(ip, (unsigned long)func);
13992 ret = ftrace_modify_code(ip, old, new);
13993
13994@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
13995 switch (faulted) {
13996 case 0:
13997 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
13998- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
13999+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14000 break;
14001 case 1:
14002 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14003- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14004+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14005 break;
14006 case 2:
14007 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14008- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14009+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14010 break;
14011 }
14012
14013@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14014 {
14015 unsigned char code[MCOUNT_INSN_SIZE];
14016
14017+ ip = ktla_ktva(ip);
14018+
14019 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14020 return -EFAULT;
14021
14022diff -urNp linux-2.6.32.43/arch/x86/kernel/head32.c linux-2.6.32.43/arch/x86/kernel/head32.c
14023--- linux-2.6.32.43/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14024+++ linux-2.6.32.43/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14025@@ -16,6 +16,7 @@
14026 #include <asm/apic.h>
14027 #include <asm/io_apic.h>
14028 #include <asm/bios_ebda.h>
14029+#include <asm/boot.h>
14030
14031 static void __init i386_default_early_setup(void)
14032 {
14033@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14034 {
14035 reserve_trampoline_memory();
14036
14037- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14038+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14039
14040 #ifdef CONFIG_BLK_DEV_INITRD
14041 /* Reserve INITRD */
14042diff -urNp linux-2.6.32.43/arch/x86/kernel/head_32.S linux-2.6.32.43/arch/x86/kernel/head_32.S
14043--- linux-2.6.32.43/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14044+++ linux-2.6.32.43/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14045@@ -19,10 +19,17 @@
14046 #include <asm/setup.h>
14047 #include <asm/processor-flags.h>
14048 #include <asm/percpu.h>
14049+#include <asm/msr-index.h>
14050
14051 /* Physical address */
14052 #define pa(X) ((X) - __PAGE_OFFSET)
14053
14054+#ifdef CONFIG_PAX_KERNEXEC
14055+#define ta(X) (X)
14056+#else
14057+#define ta(X) ((X) - __PAGE_OFFSET)
14058+#endif
14059+
14060 /*
14061 * References to members of the new_cpu_data structure.
14062 */
14063@@ -52,11 +59,7 @@
14064 * and small than max_low_pfn, otherwise will waste some page table entries
14065 */
14066
14067-#if PTRS_PER_PMD > 1
14068-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14069-#else
14070-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14071-#endif
14072+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14073
14074 /* Enough space to fit pagetables for the low memory linear map */
14075 MAPPING_BEYOND_END = \
14076@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14077 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14078
14079 /*
14080+ * Real beginning of normal "text" segment
14081+ */
14082+ENTRY(stext)
14083+ENTRY(_stext)
14084+
14085+/*
14086 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14087 * %esi points to the real-mode code as a 32-bit pointer.
14088 * CS and DS must be 4 GB flat segments, but we don't depend on
14089@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14090 * can.
14091 */
14092 __HEAD
14093+
14094+#ifdef CONFIG_PAX_KERNEXEC
14095+ jmp startup_32
14096+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14097+.fill PAGE_SIZE-5,1,0xcc
14098+#endif
14099+
14100 ENTRY(startup_32)
14101+ movl pa(stack_start),%ecx
14102+
14103 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14104 us to not reload segments */
14105 testb $(1<<6), BP_loadflags(%esi)
14106@@ -95,7 +113,60 @@ ENTRY(startup_32)
14107 movl %eax,%es
14108 movl %eax,%fs
14109 movl %eax,%gs
14110+ movl %eax,%ss
14111 2:
14112+ leal -__PAGE_OFFSET(%ecx),%esp
14113+
14114+#ifdef CONFIG_SMP
14115+ movl $pa(cpu_gdt_table),%edi
14116+ movl $__per_cpu_load,%eax
14117+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14118+ rorl $16,%eax
14119+ movb %al,__KERNEL_PERCPU + 4(%edi)
14120+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14121+ movl $__per_cpu_end - 1,%eax
14122+ subl $__per_cpu_start,%eax
14123+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14124+#endif
14125+
14126+#ifdef CONFIG_PAX_MEMORY_UDEREF
14127+ movl $NR_CPUS,%ecx
14128+ movl $pa(cpu_gdt_table),%edi
14129+1:
14130+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14131+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14132+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14133+ addl $PAGE_SIZE_asm,%edi
14134+ loop 1b
14135+#endif
14136+
14137+#ifdef CONFIG_PAX_KERNEXEC
14138+ movl $pa(boot_gdt),%edi
14139+ movl $__LOAD_PHYSICAL_ADDR,%eax
14140+ movw %ax,__BOOT_CS + 2(%edi)
14141+ rorl $16,%eax
14142+ movb %al,__BOOT_CS + 4(%edi)
14143+ movb %ah,__BOOT_CS + 7(%edi)
14144+ rorl $16,%eax
14145+
14146+ ljmp $(__BOOT_CS),$1f
14147+1:
14148+
14149+ movl $NR_CPUS,%ecx
14150+ movl $pa(cpu_gdt_table),%edi
14151+ addl $__PAGE_OFFSET,%eax
14152+1:
14153+ movw %ax,__KERNEL_CS + 2(%edi)
14154+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14155+ rorl $16,%eax
14156+ movb %al,__KERNEL_CS + 4(%edi)
14157+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14158+ movb %ah,__KERNEL_CS + 7(%edi)
14159+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14160+ rorl $16,%eax
14161+ addl $PAGE_SIZE_asm,%edi
14162+ loop 1b
14163+#endif
14164
14165 /*
14166 * Clear BSS first so that there are no surprises...
14167@@ -140,9 +211,7 @@ ENTRY(startup_32)
14168 cmpl $num_subarch_entries, %eax
14169 jae bad_subarch
14170
14171- movl pa(subarch_entries)(,%eax,4), %eax
14172- subl $__PAGE_OFFSET, %eax
14173- jmp *%eax
14174+ jmp *pa(subarch_entries)(,%eax,4)
14175
14176 bad_subarch:
14177 WEAK(lguest_entry)
14178@@ -154,10 +223,10 @@ WEAK(xen_entry)
14179 __INITDATA
14180
14181 subarch_entries:
14182- .long default_entry /* normal x86/PC */
14183- .long lguest_entry /* lguest hypervisor */
14184- .long xen_entry /* Xen hypervisor */
14185- .long default_entry /* Moorestown MID */
14186+ .long ta(default_entry) /* normal x86/PC */
14187+ .long ta(lguest_entry) /* lguest hypervisor */
14188+ .long ta(xen_entry) /* Xen hypervisor */
14189+ .long ta(default_entry) /* Moorestown MID */
14190 num_subarch_entries = (. - subarch_entries) / 4
14191 .previous
14192 #endif /* CONFIG_PARAVIRT */
14193@@ -218,8 +287,11 @@ default_entry:
14194 movl %eax, pa(max_pfn_mapped)
14195
14196 /* Do early initialization of the fixmap area */
14197- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14198- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14199+#ifdef CONFIG_COMPAT_VDSO
14200+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14201+#else
14202+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14203+#endif
14204 #else /* Not PAE */
14205
14206 page_pde_offset = (__PAGE_OFFSET >> 20);
14207@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14208 movl %eax, pa(max_pfn_mapped)
14209
14210 /* Do early initialization of the fixmap area */
14211- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14212- movl %eax,pa(swapper_pg_dir+0xffc)
14213+#ifdef CONFIG_COMPAT_VDSO
14214+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14215+#else
14216+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14217+#endif
14218 #endif
14219 jmp 3f
14220 /*
14221@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14222 movl %eax,%es
14223 movl %eax,%fs
14224 movl %eax,%gs
14225+ movl pa(stack_start),%ecx
14226+ movl %eax,%ss
14227+ leal -__PAGE_OFFSET(%ecx),%esp
14228 #endif /* CONFIG_SMP */
14229 3:
14230
14231@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14232 orl %edx,%eax
14233 movl %eax,%cr4
14234
14235+#ifdef CONFIG_X86_PAE
14236 btl $5, %eax # check if PAE is enabled
14237 jnc 6f
14238
14239@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14240 cpuid
14241 cmpl $0x80000000, %eax
14242 jbe 6f
14243+
14244+ /* Clear bogus XD_DISABLE bits */
14245+ call verify_cpu
14246+
14247 mov $0x80000001, %eax
14248 cpuid
14249 /* Execute Disable bit supported? */
14250@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14251 jnc 6f
14252
14253 /* Setup EFER (Extended Feature Enable Register) */
14254- movl $0xc0000080, %ecx
14255+ movl $MSR_EFER, %ecx
14256 rdmsr
14257
14258 btsl $11, %eax
14259 /* Make changes effective */
14260 wrmsr
14261
14262+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14263+ movl $1,pa(nx_enabled)
14264+#endif
14265+
14266 6:
14267
14268 /*
14269@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14270 movl %eax,%cr0 /* ..and set paging (PG) bit */
14271 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14272 1:
14273- /* Set up the stack pointer */
14274- lss stack_start,%esp
14275+ /* Shift the stack pointer to a virtual address */
14276+ addl $__PAGE_OFFSET, %esp
14277
14278 /*
14279 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14280@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14281
14282 #ifdef CONFIG_SMP
14283 cmpb $0, ready
14284- jz 1f /* Initial CPU cleans BSS */
14285- jmp checkCPUtype
14286-1:
14287+ jnz checkCPUtype
14288 #endif /* CONFIG_SMP */
14289
14290 /*
14291@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14292 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14293 movl %eax,%ss # after changing gdt.
14294
14295- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14296+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14297 movl %eax,%ds
14298 movl %eax,%es
14299
14300@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14301 */
14302 cmpb $0,ready
14303 jne 1f
14304- movl $per_cpu__gdt_page,%eax
14305+ movl $cpu_gdt_table,%eax
14306 movl $per_cpu__stack_canary,%ecx
14307+#ifdef CONFIG_SMP
14308+ addl $__per_cpu_load,%ecx
14309+#endif
14310 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14311 shrl $16, %ecx
14312 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14313 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14314 1:
14315-#endif
14316 movl $(__KERNEL_STACK_CANARY),%eax
14317+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14318+ movl $(__USER_DS),%eax
14319+#else
14320+ xorl %eax,%eax
14321+#endif
14322 movl %eax,%gs
14323
14324 xorl %eax,%eax # Clear LDT
14325@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14326
14327 cld # gcc2 wants the direction flag cleared at all times
14328 pushl $0 # fake return address for unwinder
14329-#ifdef CONFIG_SMP
14330- movb ready, %cl
14331 movb $1, ready
14332- cmpb $0,%cl # the first CPU calls start_kernel
14333- je 1f
14334- movl (stack_start), %esp
14335-1:
14336-#endif /* CONFIG_SMP */
14337 jmp *(initial_code)
14338
14339 /*
14340@@ -546,22 +631,22 @@ early_page_fault:
14341 jmp early_fault
14342
14343 early_fault:
14344- cld
14345 #ifdef CONFIG_PRINTK
14346+ cmpl $1,%ss:early_recursion_flag
14347+ je hlt_loop
14348+ incl %ss:early_recursion_flag
14349+ cld
14350 pusha
14351 movl $(__KERNEL_DS),%eax
14352 movl %eax,%ds
14353 movl %eax,%es
14354- cmpl $2,early_recursion_flag
14355- je hlt_loop
14356- incl early_recursion_flag
14357 movl %cr2,%eax
14358 pushl %eax
14359 pushl %edx /* trapno */
14360 pushl $fault_msg
14361 call printk
14362+; call dump_stack
14363 #endif
14364- call dump_stack
14365 hlt_loop:
14366 hlt
14367 jmp hlt_loop
14368@@ -569,8 +654,11 @@ hlt_loop:
14369 /* This is the default interrupt "handler" :-) */
14370 ALIGN
14371 ignore_int:
14372- cld
14373 #ifdef CONFIG_PRINTK
14374+ cmpl $2,%ss:early_recursion_flag
14375+ je hlt_loop
14376+ incl %ss:early_recursion_flag
14377+ cld
14378 pushl %eax
14379 pushl %ecx
14380 pushl %edx
14381@@ -579,9 +667,6 @@ ignore_int:
14382 movl $(__KERNEL_DS),%eax
14383 movl %eax,%ds
14384 movl %eax,%es
14385- cmpl $2,early_recursion_flag
14386- je hlt_loop
14387- incl early_recursion_flag
14388 pushl 16(%esp)
14389 pushl 24(%esp)
14390 pushl 32(%esp)
14391@@ -600,6 +685,8 @@ ignore_int:
14392 #endif
14393 iret
14394
14395+#include "verify_cpu.S"
14396+
14397 __REFDATA
14398 .align 4
14399 ENTRY(initial_code)
14400@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14401 /*
14402 * BSS section
14403 */
14404-__PAGE_ALIGNED_BSS
14405- .align PAGE_SIZE_asm
14406 #ifdef CONFIG_X86_PAE
14407+.section .swapper_pg_pmd,"a",@progbits
14408 swapper_pg_pmd:
14409 .fill 1024*KPMDS,4,0
14410 #else
14411+.section .swapper_pg_dir,"a",@progbits
14412 ENTRY(swapper_pg_dir)
14413 .fill 1024,4,0
14414 #endif
14415+.section .swapper_pg_fixmap,"a",@progbits
14416 swapper_pg_fixmap:
14417 .fill 1024,4,0
14418 #ifdef CONFIG_X86_TRAMPOLINE
14419+.section .trampoline_pg_dir,"a",@progbits
14420 ENTRY(trampoline_pg_dir)
14421+#ifdef CONFIG_X86_PAE
14422+ .fill 4,8,0
14423+#else
14424 .fill 1024,4,0
14425 #endif
14426+#endif
14427+
14428+.section .empty_zero_page,"a",@progbits
14429 ENTRY(empty_zero_page)
14430 .fill 4096,1,0
14431
14432 /*
14433+ * The IDT has to be page-aligned to simplify the Pentium
14434+ * F0 0F bug workaround.. We have a special link segment
14435+ * for this.
14436+ */
14437+.section .idt,"a",@progbits
14438+ENTRY(idt_table)
14439+ .fill 256,8,0
14440+
14441+/*
14442 * This starts the data section.
14443 */
14444 #ifdef CONFIG_X86_PAE
14445-__PAGE_ALIGNED_DATA
14446- /* Page-aligned for the benefit of paravirt? */
14447- .align PAGE_SIZE_asm
14448+.section .swapper_pg_dir,"a",@progbits
14449+
14450 ENTRY(swapper_pg_dir)
14451 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14452 # if KPMDS == 3
14453@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14454 # error "Kernel PMDs should be 1, 2 or 3"
14455 # endif
14456 .align PAGE_SIZE_asm /* needs to be page-sized too */
14457+
14458+#ifdef CONFIG_PAX_PER_CPU_PGD
14459+ENTRY(cpu_pgd)
14460+ .rept NR_CPUS
14461+ .fill 4,8,0
14462+ .endr
14463+#endif
14464+
14465 #endif
14466
14467 .data
14468+.balign 4
14469 ENTRY(stack_start)
14470- .long init_thread_union+THREAD_SIZE
14471- .long __BOOT_DS
14472+ .long init_thread_union+THREAD_SIZE-8
14473
14474 ready: .byte 0
14475
14476+.section .rodata,"a",@progbits
14477 early_recursion_flag:
14478 .long 0
14479
14480@@ -697,7 +809,7 @@ fault_msg:
14481 .word 0 # 32 bit align gdt_desc.address
14482 boot_gdt_descr:
14483 .word __BOOT_DS+7
14484- .long boot_gdt - __PAGE_OFFSET
14485+ .long pa(boot_gdt)
14486
14487 .word 0 # 32-bit align idt_desc.address
14488 idt_descr:
14489@@ -708,7 +820,7 @@ idt_descr:
14490 .word 0 # 32 bit align gdt_desc.address
14491 ENTRY(early_gdt_descr)
14492 .word GDT_ENTRIES*8-1
14493- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14494+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14495
14496 /*
14497 * The boot_gdt must mirror the equivalent in setup.S and is
14498@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14499 .align L1_CACHE_BYTES
14500 ENTRY(boot_gdt)
14501 .fill GDT_ENTRY_BOOT_CS,8,0
14502- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14503- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14504+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14505+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14506+
14507+ .align PAGE_SIZE_asm
14508+ENTRY(cpu_gdt_table)
14509+ .rept NR_CPUS
14510+ .quad 0x0000000000000000 /* NULL descriptor */
14511+ .quad 0x0000000000000000 /* 0x0b reserved */
14512+ .quad 0x0000000000000000 /* 0x13 reserved */
14513+ .quad 0x0000000000000000 /* 0x1b reserved */
14514+
14515+#ifdef CONFIG_PAX_KERNEXEC
14516+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14517+#else
14518+ .quad 0x0000000000000000 /* 0x20 unused */
14519+#endif
14520+
14521+ .quad 0x0000000000000000 /* 0x28 unused */
14522+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14523+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14524+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14525+ .quad 0x0000000000000000 /* 0x4b reserved */
14526+ .quad 0x0000000000000000 /* 0x53 reserved */
14527+ .quad 0x0000000000000000 /* 0x5b reserved */
14528+
14529+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14530+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14531+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14532+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14533+
14534+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14535+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14536+
14537+ /*
14538+ * Segments used for calling PnP BIOS have byte granularity.
14539+ * The code segments and data segments have fixed 64k limits,
14540+ * the transfer segment sizes are set at run time.
14541+ */
14542+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14543+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14544+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14545+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14546+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14547+
14548+ /*
14549+ * The APM segments have byte granularity and their bases
14550+ * are set at run time. All have 64k limits.
14551+ */
14552+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14553+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14554+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14555+
14556+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14557+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14558+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14559+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14560+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14561+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14562+
14563+ /* Be sure this is zeroed to avoid false validations in Xen */
14564+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14565+ .endr
14566diff -urNp linux-2.6.32.43/arch/x86/kernel/head_64.S linux-2.6.32.43/arch/x86/kernel/head_64.S
14567--- linux-2.6.32.43/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14568+++ linux-2.6.32.43/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14569@@ -19,6 +19,7 @@
14570 #include <asm/cache.h>
14571 #include <asm/processor-flags.h>
14572 #include <asm/percpu.h>
14573+#include <asm/cpufeature.h>
14574
14575 #ifdef CONFIG_PARAVIRT
14576 #include <asm/asm-offsets.h>
14577@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14578 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14579 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14580 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14581+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14582+L3_VMALLOC_START = pud_index(VMALLOC_START)
14583+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14584+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14585
14586 .text
14587 __HEAD
14588@@ -85,35 +90,22 @@ startup_64:
14589 */
14590 addq %rbp, init_level4_pgt + 0(%rip)
14591 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14592+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14593+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14594 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14595
14596 addq %rbp, level3_ident_pgt + 0(%rip)
14597+#ifndef CONFIG_XEN
14598+ addq %rbp, level3_ident_pgt + 8(%rip)
14599+#endif
14600
14601- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14602- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14603+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14604
14605- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14606+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14607+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14608
14609- /* Add an Identity mapping if I am above 1G */
14610- leaq _text(%rip), %rdi
14611- andq $PMD_PAGE_MASK, %rdi
14612-
14613- movq %rdi, %rax
14614- shrq $PUD_SHIFT, %rax
14615- andq $(PTRS_PER_PUD - 1), %rax
14616- jz ident_complete
14617-
14618- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14619- leaq level3_ident_pgt(%rip), %rbx
14620- movq %rdx, 0(%rbx, %rax, 8)
14621-
14622- movq %rdi, %rax
14623- shrq $PMD_SHIFT, %rax
14624- andq $(PTRS_PER_PMD - 1), %rax
14625- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14626- leaq level2_spare_pgt(%rip), %rbx
14627- movq %rdx, 0(%rbx, %rax, 8)
14628-ident_complete:
14629+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14630+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14631
14632 /*
14633 * Fixup the kernel text+data virtual addresses. Note that
14634@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14635 * after the boot processor executes this code.
14636 */
14637
14638- /* Enable PAE mode and PGE */
14639- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14640+ /* Enable PAE mode and PSE/PGE */
14641+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14642 movq %rax, %cr4
14643
14644 /* Setup early boot stage 4 level pagetables. */
14645@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14646 movl $MSR_EFER, %ecx
14647 rdmsr
14648 btsl $_EFER_SCE, %eax /* Enable System Call */
14649- btl $20,%edi /* No Execute supported? */
14650+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14651 jnc 1f
14652 btsl $_EFER_NX, %eax
14653+ leaq init_level4_pgt(%rip), %rdi
14654+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14655+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14656+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14657 1: wrmsr /* Make changes effective */
14658
14659 /* Setup cr0 */
14660@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14661 .quad x86_64_start_kernel
14662 ENTRY(initial_gs)
14663 .quad INIT_PER_CPU_VAR(irq_stack_union)
14664- __FINITDATA
14665
14666 ENTRY(stack_start)
14667 .quad init_thread_union+THREAD_SIZE-8
14668 .word 0
14669+ __FINITDATA
14670
14671 bad_address:
14672 jmp bad_address
14673
14674- .section ".init.text","ax"
14675+ __INIT
14676 #ifdef CONFIG_EARLY_PRINTK
14677 .globl early_idt_handlers
14678 early_idt_handlers:
14679@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14680 #endif /* EARLY_PRINTK */
14681 1: hlt
14682 jmp 1b
14683+ .previous
14684
14685 #ifdef CONFIG_EARLY_PRINTK
14686+ __INITDATA
14687 early_recursion_flag:
14688 .long 0
14689+ .previous
14690
14691+ .section .rodata,"a",@progbits
14692 early_idt_msg:
14693 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14694 early_idt_ripmsg:
14695 .asciz "RIP %s\n"
14696-#endif /* CONFIG_EARLY_PRINTK */
14697 .previous
14698+#endif /* CONFIG_EARLY_PRINTK */
14699
14700+ .section .rodata,"a",@progbits
14701 #define NEXT_PAGE(name) \
14702 .balign PAGE_SIZE; \
14703 ENTRY(name)
14704@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14705 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14706 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14707 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14708+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
14709+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14710+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14711+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14712 .org init_level4_pgt + L4_START_KERNEL*8, 0
14713 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14714 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14715
14716+#ifdef CONFIG_PAX_PER_CPU_PGD
14717+NEXT_PAGE(cpu_pgd)
14718+ .rept NR_CPUS
14719+ .fill 512,8,0
14720+ .endr
14721+#endif
14722+
14723 NEXT_PAGE(level3_ident_pgt)
14724 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14725+#ifdef CONFIG_XEN
14726 .fill 511,8,0
14727+#else
14728+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14729+ .fill 510,8,0
14730+#endif
14731+
14732+NEXT_PAGE(level3_vmalloc_pgt)
14733+ .fill 512,8,0
14734+
14735+NEXT_PAGE(level3_vmemmap_pgt)
14736+ .fill L3_VMEMMAP_START,8,0
14737+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14738
14739 NEXT_PAGE(level3_kernel_pgt)
14740 .fill L3_START_KERNEL,8,0
14741@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14742 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14743 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14744
14745+NEXT_PAGE(level2_vmemmap_pgt)
14746+ .fill 512,8,0
14747+
14748 NEXT_PAGE(level2_fixmap_pgt)
14749- .fill 506,8,0
14750- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14751- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14752- .fill 5,8,0
14753+ .fill 507,8,0
14754+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14755+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14756+ .fill 4,8,0
14757
14758-NEXT_PAGE(level1_fixmap_pgt)
14759+NEXT_PAGE(level1_vsyscall_pgt)
14760 .fill 512,8,0
14761
14762-NEXT_PAGE(level2_ident_pgt)
14763- /* Since I easily can, map the first 1G.
14764+ /* Since I easily can, map the first 2G.
14765 * Don't set NX because code runs from these pages.
14766 */
14767- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14768+NEXT_PAGE(level2_ident_pgt)
14769+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14770
14771 NEXT_PAGE(level2_kernel_pgt)
14772 /*
14773@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14774 * If you want to increase this then increase MODULES_VADDR
14775 * too.)
14776 */
14777- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14778- KERNEL_IMAGE_SIZE/PMD_SIZE)
14779-
14780-NEXT_PAGE(level2_spare_pgt)
14781- .fill 512, 8, 0
14782+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14783
14784 #undef PMDS
14785 #undef NEXT_PAGE
14786
14787- .data
14788+ .align PAGE_SIZE
14789+ENTRY(cpu_gdt_table)
14790+ .rept NR_CPUS
14791+ .quad 0x0000000000000000 /* NULL descriptor */
14792+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
14793+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
14794+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
14795+ .quad 0x00cffb000000ffff /* __USER32_CS */
14796+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
14797+ .quad 0x00affb000000ffff /* __USER_CS */
14798+
14799+#ifdef CONFIG_PAX_KERNEXEC
14800+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
14801+#else
14802+ .quad 0x0 /* unused */
14803+#endif
14804+
14805+ .quad 0,0 /* TSS */
14806+ .quad 0,0 /* LDT */
14807+ .quad 0,0,0 /* three TLS descriptors */
14808+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
14809+ /* asm/segment.h:GDT_ENTRIES must match this */
14810+
14811+ /* zero the remaining page */
14812+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
14813+ .endr
14814+
14815 .align 16
14816 .globl early_gdt_descr
14817 early_gdt_descr:
14818 .word GDT_ENTRIES*8-1
14819 early_gdt_descr_base:
14820- .quad INIT_PER_CPU_VAR(gdt_page)
14821+ .quad cpu_gdt_table
14822
14823 ENTRY(phys_base)
14824 /* This must match the first entry in level2_kernel_pgt */
14825 .quad 0x0000000000000000
14826
14827 #include "../../x86/xen/xen-head.S"
14828-
14829- .section .bss, "aw", @nobits
14830+
14831+ .section .rodata,"a",@progbits
14832 .align L1_CACHE_BYTES
14833 ENTRY(idt_table)
14834- .skip IDT_ENTRIES * 16
14835+ .fill 512,8,0
14836
14837 __PAGE_ALIGNED_BSS
14838 .align PAGE_SIZE
14839diff -urNp linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c
14840--- linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
14841+++ linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
14842@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
14843 EXPORT_SYMBOL(cmpxchg8b_emu);
14844 #endif
14845
14846+EXPORT_SYMBOL_GPL(cpu_gdt_table);
14847+
14848 /* Networking helper routines. */
14849 EXPORT_SYMBOL(csum_partial_copy_generic);
14850+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
14851+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
14852
14853 EXPORT_SYMBOL(__get_user_1);
14854 EXPORT_SYMBOL(__get_user_2);
14855@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
14856
14857 EXPORT_SYMBOL(csum_partial);
14858 EXPORT_SYMBOL(empty_zero_page);
14859+
14860+#ifdef CONFIG_PAX_KERNEXEC
14861+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
14862+#endif
14863diff -urNp linux-2.6.32.43/arch/x86/kernel/i8259.c linux-2.6.32.43/arch/x86/kernel/i8259.c
14864--- linux-2.6.32.43/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
14865+++ linux-2.6.32.43/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
14866@@ -208,7 +208,7 @@ spurious_8259A_irq:
14867 "spurious 8259A interrupt: IRQ%d.\n", irq);
14868 spurious_irq_mask |= irqmask;
14869 }
14870- atomic_inc(&irq_err_count);
14871+ atomic_inc_unchecked(&irq_err_count);
14872 /*
14873 * Theoretically we do not have to handle this IRQ,
14874 * but in Linux this does not cause problems and is
14875diff -urNp linux-2.6.32.43/arch/x86/kernel/init_task.c linux-2.6.32.43/arch/x86/kernel/init_task.c
14876--- linux-2.6.32.43/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
14877+++ linux-2.6.32.43/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
14878@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
14879 * way process stacks are handled. This is done by having a special
14880 * "init_task" linker map entry..
14881 */
14882-union thread_union init_thread_union __init_task_data =
14883- { INIT_THREAD_INFO(init_task) };
14884+union thread_union init_thread_union __init_task_data;
14885
14886 /*
14887 * Initial task structure.
14888@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
14889 * section. Since TSS's are completely CPU-local, we want them
14890 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
14891 */
14892-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
14893-
14894+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
14895+EXPORT_SYMBOL(init_tss);
14896diff -urNp linux-2.6.32.43/arch/x86/kernel/ioport.c linux-2.6.32.43/arch/x86/kernel/ioport.c
14897--- linux-2.6.32.43/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
14898+++ linux-2.6.32.43/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
14899@@ -6,6 +6,7 @@
14900 #include <linux/sched.h>
14901 #include <linux/kernel.h>
14902 #include <linux/capability.h>
14903+#include <linux/security.h>
14904 #include <linux/errno.h>
14905 #include <linux/types.h>
14906 #include <linux/ioport.h>
14907@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
14908
14909 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
14910 return -EINVAL;
14911+#ifdef CONFIG_GRKERNSEC_IO
14912+ if (turn_on && grsec_disable_privio) {
14913+ gr_handle_ioperm();
14914+ return -EPERM;
14915+ }
14916+#endif
14917 if (turn_on && !capable(CAP_SYS_RAWIO))
14918 return -EPERM;
14919
14920@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
14921 * because the ->io_bitmap_max value must match the bitmap
14922 * contents:
14923 */
14924- tss = &per_cpu(init_tss, get_cpu());
14925+ tss = init_tss + get_cpu();
14926
14927 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
14928
14929@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
14930 return -EINVAL;
14931 /* Trying to gain more privileges? */
14932 if (level > old) {
14933+#ifdef CONFIG_GRKERNSEC_IO
14934+ if (grsec_disable_privio) {
14935+ gr_handle_iopl();
14936+ return -EPERM;
14937+ }
14938+#endif
14939 if (!capable(CAP_SYS_RAWIO))
14940 return -EPERM;
14941 }
14942diff -urNp linux-2.6.32.43/arch/x86/kernel/irq_32.c linux-2.6.32.43/arch/x86/kernel/irq_32.c
14943--- linux-2.6.32.43/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
14944+++ linux-2.6.32.43/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
14945@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
14946 __asm__ __volatile__("andl %%esp,%0" :
14947 "=r" (sp) : "0" (THREAD_SIZE - 1));
14948
14949- return sp < (sizeof(struct thread_info) + STACK_WARN);
14950+ return sp < STACK_WARN;
14951 }
14952
14953 static void print_stack_overflow(void)
14954@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
14955 * per-CPU IRQ handling contexts (thread information and stack)
14956 */
14957 union irq_ctx {
14958- struct thread_info tinfo;
14959- u32 stack[THREAD_SIZE/sizeof(u32)];
14960-} __attribute__((aligned(PAGE_SIZE)));
14961+ unsigned long previous_esp;
14962+ u32 stack[THREAD_SIZE/sizeof(u32)];
14963+} __attribute__((aligned(THREAD_SIZE)));
14964
14965 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
14966 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
14967@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
14968 static inline int
14969 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
14970 {
14971- union irq_ctx *curctx, *irqctx;
14972+ union irq_ctx *irqctx;
14973 u32 *isp, arg1, arg2;
14974
14975- curctx = (union irq_ctx *) current_thread_info();
14976 irqctx = __get_cpu_var(hardirq_ctx);
14977
14978 /*
14979@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
14980 * handler) we can't do that and just have to keep using the
14981 * current stack (which is the irq stack already after all)
14982 */
14983- if (unlikely(curctx == irqctx))
14984+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
14985 return 0;
14986
14987 /* build the stack frame on the IRQ stack */
14988- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
14989- irqctx->tinfo.task = curctx->tinfo.task;
14990- irqctx->tinfo.previous_esp = current_stack_pointer;
14991+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
14992+ irqctx->previous_esp = current_stack_pointer;
14993
14994- /*
14995- * Copy the softirq bits in preempt_count so that the
14996- * softirq checks work in the hardirq context.
14997- */
14998- irqctx->tinfo.preempt_count =
14999- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15000- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15001+#ifdef CONFIG_PAX_MEMORY_UDEREF
15002+ __set_fs(MAKE_MM_SEG(0));
15003+#endif
15004
15005 if (unlikely(overflow))
15006 call_on_stack(print_stack_overflow, isp);
15007@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15008 : "0" (irq), "1" (desc), "2" (isp),
15009 "D" (desc->handle_irq)
15010 : "memory", "cc", "ecx");
15011+
15012+#ifdef CONFIG_PAX_MEMORY_UDEREF
15013+ __set_fs(current_thread_info()->addr_limit);
15014+#endif
15015+
15016 return 1;
15017 }
15018
15019@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15020 */
15021 void __cpuinit irq_ctx_init(int cpu)
15022 {
15023- union irq_ctx *irqctx;
15024-
15025 if (per_cpu(hardirq_ctx, cpu))
15026 return;
15027
15028- irqctx = &per_cpu(hardirq_stack, cpu);
15029- irqctx->tinfo.task = NULL;
15030- irqctx->tinfo.exec_domain = NULL;
15031- irqctx->tinfo.cpu = cpu;
15032- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15033- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15034-
15035- per_cpu(hardirq_ctx, cpu) = irqctx;
15036-
15037- irqctx = &per_cpu(softirq_stack, cpu);
15038- irqctx->tinfo.task = NULL;
15039- irqctx->tinfo.exec_domain = NULL;
15040- irqctx->tinfo.cpu = cpu;
15041- irqctx->tinfo.preempt_count = 0;
15042- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15043-
15044- per_cpu(softirq_ctx, cpu) = irqctx;
15045+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15046+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15047
15048 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15049 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15050@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15051 asmlinkage void do_softirq(void)
15052 {
15053 unsigned long flags;
15054- struct thread_info *curctx;
15055 union irq_ctx *irqctx;
15056 u32 *isp;
15057
15058@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15059 local_irq_save(flags);
15060
15061 if (local_softirq_pending()) {
15062- curctx = current_thread_info();
15063 irqctx = __get_cpu_var(softirq_ctx);
15064- irqctx->tinfo.task = curctx->task;
15065- irqctx->tinfo.previous_esp = current_stack_pointer;
15066+ irqctx->previous_esp = current_stack_pointer;
15067
15068 /* build the stack frame on the softirq stack */
15069- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15070+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15071+
15072+#ifdef CONFIG_PAX_MEMORY_UDEREF
15073+ __set_fs(MAKE_MM_SEG(0));
15074+#endif
15075
15076 call_on_stack(__do_softirq, isp);
15077+
15078+#ifdef CONFIG_PAX_MEMORY_UDEREF
15079+ __set_fs(current_thread_info()->addr_limit);
15080+#endif
15081+
15082 /*
15083 * Shouldnt happen, we returned above if in_interrupt():
15084 */
15085diff -urNp linux-2.6.32.43/arch/x86/kernel/irq.c linux-2.6.32.43/arch/x86/kernel/irq.c
15086--- linux-2.6.32.43/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15087+++ linux-2.6.32.43/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15088@@ -15,7 +15,7 @@
15089 #include <asm/mce.h>
15090 #include <asm/hw_irq.h>
15091
15092-atomic_t irq_err_count;
15093+atomic_unchecked_t irq_err_count;
15094
15095 /* Function pointer for generic interrupt vector handling */
15096 void (*generic_interrupt_extension)(void) = NULL;
15097@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15098 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15099 seq_printf(p, " Machine check polls\n");
15100 #endif
15101- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15102+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15103 #if defined(CONFIG_X86_IO_APIC)
15104- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15105+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15106 #endif
15107 return 0;
15108 }
15109@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15110
15111 u64 arch_irq_stat(void)
15112 {
15113- u64 sum = atomic_read(&irq_err_count);
15114+ u64 sum = atomic_read_unchecked(&irq_err_count);
15115
15116 #ifdef CONFIG_X86_IO_APIC
15117- sum += atomic_read(&irq_mis_count);
15118+ sum += atomic_read_unchecked(&irq_mis_count);
15119 #endif
15120 return sum;
15121 }
15122diff -urNp linux-2.6.32.43/arch/x86/kernel/kgdb.c linux-2.6.32.43/arch/x86/kernel/kgdb.c
15123--- linux-2.6.32.43/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15124+++ linux-2.6.32.43/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15125@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15126
15127 /* clear the trace bit */
15128 linux_regs->flags &= ~X86_EFLAGS_TF;
15129- atomic_set(&kgdb_cpu_doing_single_step, -1);
15130+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15131
15132 /* set the trace bit if we're stepping */
15133 if (remcomInBuffer[0] == 's') {
15134 linux_regs->flags |= X86_EFLAGS_TF;
15135 kgdb_single_step = 1;
15136- atomic_set(&kgdb_cpu_doing_single_step,
15137+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15138 raw_smp_processor_id());
15139 }
15140
15141@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15142 break;
15143
15144 case DIE_DEBUG:
15145- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15146+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15147 raw_smp_processor_id()) {
15148 if (user_mode(regs))
15149 return single_step_cont(regs, args);
15150@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15151 return instruction_pointer(regs);
15152 }
15153
15154-struct kgdb_arch arch_kgdb_ops = {
15155+const struct kgdb_arch arch_kgdb_ops = {
15156 /* Breakpoint instruction: */
15157 .gdb_bpt_instr = { 0xcc },
15158 .flags = KGDB_HW_BREAKPOINT,
15159diff -urNp linux-2.6.32.43/arch/x86/kernel/kprobes.c linux-2.6.32.43/arch/x86/kernel/kprobes.c
15160--- linux-2.6.32.43/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15161+++ linux-2.6.32.43/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15162@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15163 char op;
15164 s32 raddr;
15165 } __attribute__((packed)) * jop;
15166- jop = (struct __arch_jmp_op *)from;
15167+
15168+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15169+
15170+ pax_open_kernel();
15171 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15172 jop->op = RELATIVEJUMP_INSTRUCTION;
15173+ pax_close_kernel();
15174 }
15175
15176 /*
15177@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15178 kprobe_opcode_t opcode;
15179 kprobe_opcode_t *orig_opcodes = opcodes;
15180
15181- if (search_exception_tables((unsigned long)opcodes))
15182+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15183 return 0; /* Page fault may occur on this address. */
15184
15185 retry:
15186@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15187 disp = (u8 *) p->addr + *((s32 *) insn) -
15188 (u8 *) p->ainsn.insn;
15189 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15190+ pax_open_kernel();
15191 *(s32 *)insn = (s32) disp;
15192+ pax_close_kernel();
15193 }
15194 }
15195 #endif
15196@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15197
15198 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15199 {
15200- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15201+ pax_open_kernel();
15202+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15203+ pax_close_kernel();
15204
15205 fix_riprel(p);
15206
15207- if (can_boost(p->addr))
15208+ if (can_boost(ktla_ktva(p->addr)))
15209 p->ainsn.boostable = 0;
15210 else
15211 p->ainsn.boostable = -1;
15212
15213- p->opcode = *p->addr;
15214+ p->opcode = *(ktla_ktva(p->addr));
15215 }
15216
15217 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15218@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15219 if (p->opcode == BREAKPOINT_INSTRUCTION)
15220 regs->ip = (unsigned long)p->addr;
15221 else
15222- regs->ip = (unsigned long)p->ainsn.insn;
15223+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15224 }
15225
15226 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15227@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15228 if (p->ainsn.boostable == 1 && !p->post_handler) {
15229 /* Boost up -- we can execute copied instructions directly */
15230 reset_current_kprobe();
15231- regs->ip = (unsigned long)p->ainsn.insn;
15232+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15233 preempt_enable_no_resched();
15234 return;
15235 }
15236@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15237 struct kprobe_ctlblk *kcb;
15238
15239 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15240- if (*addr != BREAKPOINT_INSTRUCTION) {
15241+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15242 /*
15243 * The breakpoint instruction was removed right
15244 * after we hit it. Another cpu has removed
15245@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15246 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15247 {
15248 unsigned long *tos = stack_addr(regs);
15249- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15250+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15251 unsigned long orig_ip = (unsigned long)p->addr;
15252 kprobe_opcode_t *insn = p->ainsn.insn;
15253
15254@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15255 struct die_args *args = data;
15256 int ret = NOTIFY_DONE;
15257
15258- if (args->regs && user_mode_vm(args->regs))
15259+ if (args->regs && user_mode(args->regs))
15260 return ret;
15261
15262 switch (val) {
15263diff -urNp linux-2.6.32.43/arch/x86/kernel/ldt.c linux-2.6.32.43/arch/x86/kernel/ldt.c
15264--- linux-2.6.32.43/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15265+++ linux-2.6.32.43/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15266@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15267 if (reload) {
15268 #ifdef CONFIG_SMP
15269 preempt_disable();
15270- load_LDT(pc);
15271+ load_LDT_nolock(pc);
15272 if (!cpumask_equal(mm_cpumask(current->mm),
15273 cpumask_of(smp_processor_id())))
15274 smp_call_function(flush_ldt, current->mm, 1);
15275 preempt_enable();
15276 #else
15277- load_LDT(pc);
15278+ load_LDT_nolock(pc);
15279 #endif
15280 }
15281 if (oldsize) {
15282@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15283 return err;
15284
15285 for (i = 0; i < old->size; i++)
15286- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15287+ write_ldt_entry(new->ldt, i, old->ldt + i);
15288 return 0;
15289 }
15290
15291@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15292 retval = copy_ldt(&mm->context, &old_mm->context);
15293 mutex_unlock(&old_mm->context.lock);
15294 }
15295+
15296+ if (tsk == current) {
15297+ mm->context.vdso = 0;
15298+
15299+#ifdef CONFIG_X86_32
15300+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15301+ mm->context.user_cs_base = 0UL;
15302+ mm->context.user_cs_limit = ~0UL;
15303+
15304+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15305+ cpus_clear(mm->context.cpu_user_cs_mask);
15306+#endif
15307+
15308+#endif
15309+#endif
15310+
15311+ }
15312+
15313 return retval;
15314 }
15315
15316@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15317 }
15318 }
15319
15320+#ifdef CONFIG_PAX_SEGMEXEC
15321+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15322+ error = -EINVAL;
15323+ goto out_unlock;
15324+ }
15325+#endif
15326+
15327 fill_ldt(&ldt, &ldt_info);
15328 if (oldmode)
15329 ldt.avl = 0;
15330diff -urNp linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c
15331--- linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15332+++ linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15333@@ -26,7 +26,7 @@
15334 #include <asm/system.h>
15335 #include <asm/cacheflush.h>
15336
15337-static void set_idt(void *newidt, __u16 limit)
15338+static void set_idt(struct desc_struct *newidt, __u16 limit)
15339 {
15340 struct desc_ptr curidt;
15341
15342@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15343 }
15344
15345
15346-static void set_gdt(void *newgdt, __u16 limit)
15347+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15348 {
15349 struct desc_ptr curgdt;
15350
15351@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15352 }
15353
15354 control_page = page_address(image->control_code_page);
15355- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15356+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15357
15358 relocate_kernel_ptr = control_page;
15359 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15360diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_amd.c linux-2.6.32.43/arch/x86/kernel/microcode_amd.c
15361--- linux-2.6.32.43/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15362+++ linux-2.6.32.43/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15363@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15364 uci->mc = NULL;
15365 }
15366
15367-static struct microcode_ops microcode_amd_ops = {
15368+static const struct microcode_ops microcode_amd_ops = {
15369 .request_microcode_user = request_microcode_user,
15370 .request_microcode_fw = request_microcode_fw,
15371 .collect_cpu_info = collect_cpu_info_amd,
15372@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15373 .microcode_fini_cpu = microcode_fini_cpu_amd,
15374 };
15375
15376-struct microcode_ops * __init init_amd_microcode(void)
15377+const struct microcode_ops * __init init_amd_microcode(void)
15378 {
15379 return &microcode_amd_ops;
15380 }
15381diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_core.c linux-2.6.32.43/arch/x86/kernel/microcode_core.c
15382--- linux-2.6.32.43/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15383+++ linux-2.6.32.43/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15384@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15385
15386 #define MICROCODE_VERSION "2.00"
15387
15388-static struct microcode_ops *microcode_ops;
15389+static const struct microcode_ops *microcode_ops;
15390
15391 /*
15392 * Synchronization.
15393diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_intel.c linux-2.6.32.43/arch/x86/kernel/microcode_intel.c
15394--- linux-2.6.32.43/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15395+++ linux-2.6.32.43/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15396@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15397
15398 static int get_ucode_user(void *to, const void *from, size_t n)
15399 {
15400- return copy_from_user(to, from, n);
15401+ return copy_from_user(to, (__force const void __user *)from, n);
15402 }
15403
15404 static enum ucode_state
15405 request_microcode_user(int cpu, const void __user *buf, size_t size)
15406 {
15407- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15408+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15409 }
15410
15411 static void microcode_fini_cpu(int cpu)
15412@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15413 uci->mc = NULL;
15414 }
15415
15416-static struct microcode_ops microcode_intel_ops = {
15417+static const struct microcode_ops microcode_intel_ops = {
15418 .request_microcode_user = request_microcode_user,
15419 .request_microcode_fw = request_microcode_fw,
15420 .collect_cpu_info = collect_cpu_info,
15421@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15422 .microcode_fini_cpu = microcode_fini_cpu,
15423 };
15424
15425-struct microcode_ops * __init init_intel_microcode(void)
15426+const struct microcode_ops * __init init_intel_microcode(void)
15427 {
15428 return &microcode_intel_ops;
15429 }
15430diff -urNp linux-2.6.32.43/arch/x86/kernel/module.c linux-2.6.32.43/arch/x86/kernel/module.c
15431--- linux-2.6.32.43/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15432+++ linux-2.6.32.43/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15433@@ -34,7 +34,7 @@
15434 #define DEBUGP(fmt...)
15435 #endif
15436
15437-void *module_alloc(unsigned long size)
15438+static void *__module_alloc(unsigned long size, pgprot_t prot)
15439 {
15440 struct vm_struct *area;
15441
15442@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15443 if (!area)
15444 return NULL;
15445
15446- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15447- PAGE_KERNEL_EXEC);
15448+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15449+}
15450+
15451+void *module_alloc(unsigned long size)
15452+{
15453+
15454+#ifdef CONFIG_PAX_KERNEXEC
15455+ return __module_alloc(size, PAGE_KERNEL);
15456+#else
15457+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15458+#endif
15459+
15460 }
15461
15462 /* Free memory returned from module_alloc */
15463@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15464 vfree(module_region);
15465 }
15466
15467+#ifdef CONFIG_PAX_KERNEXEC
15468+#ifdef CONFIG_X86_32
15469+void *module_alloc_exec(unsigned long size)
15470+{
15471+ struct vm_struct *area;
15472+
15473+ if (size == 0)
15474+ return NULL;
15475+
15476+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15477+ return area ? area->addr : NULL;
15478+}
15479+EXPORT_SYMBOL(module_alloc_exec);
15480+
15481+void module_free_exec(struct module *mod, void *module_region)
15482+{
15483+ vunmap(module_region);
15484+}
15485+EXPORT_SYMBOL(module_free_exec);
15486+#else
15487+void module_free_exec(struct module *mod, void *module_region)
15488+{
15489+ module_free(mod, module_region);
15490+}
15491+EXPORT_SYMBOL(module_free_exec);
15492+
15493+void *module_alloc_exec(unsigned long size)
15494+{
15495+ return __module_alloc(size, PAGE_KERNEL_RX);
15496+}
15497+EXPORT_SYMBOL(module_alloc_exec);
15498+#endif
15499+#endif
15500+
15501 /* We don't need anything special. */
15502 int module_frob_arch_sections(Elf_Ehdr *hdr,
15503 Elf_Shdr *sechdrs,
15504@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15505 unsigned int i;
15506 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15507 Elf32_Sym *sym;
15508- uint32_t *location;
15509+ uint32_t *plocation, location;
15510
15511 DEBUGP("Applying relocate section %u to %u\n", relsec,
15512 sechdrs[relsec].sh_info);
15513 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15514 /* This is where to make the change */
15515- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15516- + rel[i].r_offset;
15517+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15518+ location = (uint32_t)plocation;
15519+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15520+ plocation = ktla_ktva((void *)plocation);
15521 /* This is the symbol it is referring to. Note that all
15522 undefined symbols have been resolved. */
15523 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15524@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15525 switch (ELF32_R_TYPE(rel[i].r_info)) {
15526 case R_386_32:
15527 /* We add the value into the location given */
15528- *location += sym->st_value;
15529+ pax_open_kernel();
15530+ *plocation += sym->st_value;
15531+ pax_close_kernel();
15532 break;
15533 case R_386_PC32:
15534 /* Add the value, subtract its postition */
15535- *location += sym->st_value - (uint32_t)location;
15536+ pax_open_kernel();
15537+ *plocation += sym->st_value - location;
15538+ pax_close_kernel();
15539 break;
15540 default:
15541 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15542@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15543 case R_X86_64_NONE:
15544 break;
15545 case R_X86_64_64:
15546+ pax_open_kernel();
15547 *(u64 *)loc = val;
15548+ pax_close_kernel();
15549 break;
15550 case R_X86_64_32:
15551+ pax_open_kernel();
15552 *(u32 *)loc = val;
15553+ pax_close_kernel();
15554 if (val != *(u32 *)loc)
15555 goto overflow;
15556 break;
15557 case R_X86_64_32S:
15558+ pax_open_kernel();
15559 *(s32 *)loc = val;
15560+ pax_close_kernel();
15561 if ((s64)val != *(s32 *)loc)
15562 goto overflow;
15563 break;
15564 case R_X86_64_PC32:
15565 val -= (u64)loc;
15566+ pax_open_kernel();
15567 *(u32 *)loc = val;
15568+ pax_close_kernel();
15569+
15570 #if 0
15571 if ((s64)val != *(s32 *)loc)
15572 goto overflow;
15573diff -urNp linux-2.6.32.43/arch/x86/kernel/paravirt.c linux-2.6.32.43/arch/x86/kernel/paravirt.c
15574--- linux-2.6.32.43/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15575+++ linux-2.6.32.43/arch/x86/kernel/paravirt.c 2011-07-19 18:26:50.000000000 -0400
15576@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15577 {
15578 return x;
15579 }
15580+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15581+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15582+#endif
15583
15584 void __init default_banner(void)
15585 {
15586@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15587 * corresponding structure. */
15588 static void *get_call_destination(u8 type)
15589 {
15590- struct paravirt_patch_template tmpl = {
15591+ const struct paravirt_patch_template tmpl = {
15592 .pv_init_ops = pv_init_ops,
15593 .pv_time_ops = pv_time_ops,
15594 .pv_cpu_ops = pv_cpu_ops,
15595@@ -133,6 +136,9 @@ static void *get_call_destination(u8 typ
15596 .pv_lock_ops = pv_lock_ops,
15597 #endif
15598 };
15599+
15600+ pax_track_stack();
15601+
15602 return *((void **)&tmpl + type);
15603 }
15604
15605@@ -145,15 +151,19 @@ unsigned paravirt_patch_default(u8 type,
15606 if (opfunc == NULL)
15607 /* If there's no function, patch it with a ud2a (BUG) */
15608 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15609- else if (opfunc == _paravirt_nop)
15610+ else if (opfunc == (void *)_paravirt_nop)
15611 /* If the operation is a nop, then nop the callsite */
15612 ret = paravirt_patch_nop();
15613
15614 /* identity functions just return their single argument */
15615- else if (opfunc == _paravirt_ident_32)
15616+ else if (opfunc == (void *)_paravirt_ident_32)
15617 ret = paravirt_patch_ident_32(insnbuf, len);
15618- else if (opfunc == _paravirt_ident_64)
15619+ else if (opfunc == (void *)_paravirt_ident_64)
15620 ret = paravirt_patch_ident_64(insnbuf, len);
15621+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15622+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15623+ ret = paravirt_patch_ident_64(insnbuf, len);
15624+#endif
15625
15626 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15627 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15628@@ -178,7 +188,7 @@ unsigned paravirt_patch_insns(void *insn
15629 if (insn_len > len || start == NULL)
15630 insn_len = len;
15631 else
15632- memcpy(insnbuf, start, insn_len);
15633+ memcpy(insnbuf, ktla_ktva(start), insn_len);
15634
15635 return insn_len;
15636 }
15637@@ -294,22 +304,22 @@ void arch_flush_lazy_mmu_mode(void)
15638 preempt_enable();
15639 }
15640
15641-struct pv_info pv_info = {
15642+struct pv_info pv_info __read_only = {
15643 .name = "bare hardware",
15644 .paravirt_enabled = 0,
15645 .kernel_rpl = 0,
15646 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15647 };
15648
15649-struct pv_init_ops pv_init_ops = {
15650+struct pv_init_ops pv_init_ops __read_only = {
15651 .patch = native_patch,
15652 };
15653
15654-struct pv_time_ops pv_time_ops = {
15655+struct pv_time_ops pv_time_ops __read_only = {
15656 .sched_clock = native_sched_clock,
15657 };
15658
15659-struct pv_irq_ops pv_irq_ops = {
15660+struct pv_irq_ops pv_irq_ops __read_only = {
15661 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15662 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15663 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15664@@ -321,7 +331,7 @@ struct pv_irq_ops pv_irq_ops = {
15665 #endif
15666 };
15667
15668-struct pv_cpu_ops pv_cpu_ops = {
15669+struct pv_cpu_ops pv_cpu_ops __read_only = {
15670 .cpuid = native_cpuid,
15671 .get_debugreg = native_get_debugreg,
15672 .set_debugreg = native_set_debugreg,
15673@@ -382,21 +392,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15674 .end_context_switch = paravirt_nop,
15675 };
15676
15677-struct pv_apic_ops pv_apic_ops = {
15678+struct pv_apic_ops pv_apic_ops __read_only = {
15679 #ifdef CONFIG_X86_LOCAL_APIC
15680 .startup_ipi_hook = paravirt_nop,
15681 #endif
15682 };
15683
15684-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15685+#ifdef CONFIG_X86_32
15686+#ifdef CONFIG_X86_PAE
15687+/* 64-bit pagetable entries */
15688+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15689+#else
15690 /* 32-bit pagetable entries */
15691 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15692+#endif
15693 #else
15694 /* 64-bit pagetable entries */
15695 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15696 #endif
15697
15698-struct pv_mmu_ops pv_mmu_ops = {
15699+struct pv_mmu_ops pv_mmu_ops __read_only = {
15700
15701 .read_cr2 = native_read_cr2,
15702 .write_cr2 = native_write_cr2,
15703@@ -467,6 +482,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15704 },
15705
15706 .set_fixmap = native_set_fixmap,
15707+
15708+#ifdef CONFIG_PAX_KERNEXEC
15709+ .pax_open_kernel = native_pax_open_kernel,
15710+ .pax_close_kernel = native_pax_close_kernel,
15711+#endif
15712+
15713 };
15714
15715 EXPORT_SYMBOL_GPL(pv_time_ops);
15716diff -urNp linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c
15717--- linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15718+++ linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15719@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15720 __raw_spin_lock(lock);
15721 }
15722
15723-struct pv_lock_ops pv_lock_ops = {
15724+struct pv_lock_ops pv_lock_ops __read_only = {
15725 #ifdef CONFIG_SMP
15726 .spin_is_locked = __ticket_spin_is_locked,
15727 .spin_is_contended = __ticket_spin_is_contended,
15728diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c
15729--- linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15730+++ linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15731@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15732 free_pages((unsigned long)vaddr, get_order(size));
15733 }
15734
15735-static struct dma_map_ops calgary_dma_ops = {
15736+static const struct dma_map_ops calgary_dma_ops = {
15737 .alloc_coherent = calgary_alloc_coherent,
15738 .free_coherent = calgary_free_coherent,
15739 .map_sg = calgary_map_sg,
15740diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-dma.c linux-2.6.32.43/arch/x86/kernel/pci-dma.c
15741--- linux-2.6.32.43/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15742+++ linux-2.6.32.43/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15743@@ -14,7 +14,7 @@
15744
15745 static int forbid_dac __read_mostly;
15746
15747-struct dma_map_ops *dma_ops;
15748+const struct dma_map_ops *dma_ops;
15749 EXPORT_SYMBOL(dma_ops);
15750
15751 static int iommu_sac_force __read_mostly;
15752@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15753
15754 int dma_supported(struct device *dev, u64 mask)
15755 {
15756- struct dma_map_ops *ops = get_dma_ops(dev);
15757+ const struct dma_map_ops *ops = get_dma_ops(dev);
15758
15759 #ifdef CONFIG_PCI
15760 if (mask > 0xffffffff && forbid_dac > 0) {
15761diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c
15762--- linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15763+++ linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15764@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15765 return -1;
15766 }
15767
15768-static struct dma_map_ops gart_dma_ops = {
15769+static const struct dma_map_ops gart_dma_ops = {
15770 .map_sg = gart_map_sg,
15771 .unmap_sg = gart_unmap_sg,
15772 .map_page = gart_map_page,
15773diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-nommu.c linux-2.6.32.43/arch/x86/kernel/pci-nommu.c
15774--- linux-2.6.32.43/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15775+++ linux-2.6.32.43/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15776@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15777 flush_write_buffers();
15778 }
15779
15780-struct dma_map_ops nommu_dma_ops = {
15781+const struct dma_map_ops nommu_dma_ops = {
15782 .alloc_coherent = dma_generic_alloc_coherent,
15783 .free_coherent = nommu_free_coherent,
15784 .map_sg = nommu_map_sg,
15785diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c
15786--- linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15787+++ linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15788@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15789 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15790 }
15791
15792-static struct dma_map_ops swiotlb_dma_ops = {
15793+static const struct dma_map_ops swiotlb_dma_ops = {
15794 .mapping_error = swiotlb_dma_mapping_error,
15795 .alloc_coherent = x86_swiotlb_alloc_coherent,
15796 .free_coherent = swiotlb_free_coherent,
15797diff -urNp linux-2.6.32.43/arch/x86/kernel/process_32.c linux-2.6.32.43/arch/x86/kernel/process_32.c
15798--- linux-2.6.32.43/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
15799+++ linux-2.6.32.43/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
15800@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
15801 unsigned long thread_saved_pc(struct task_struct *tsk)
15802 {
15803 return ((unsigned long *)tsk->thread.sp)[3];
15804+//XXX return tsk->thread.eip;
15805 }
15806
15807 #ifndef CONFIG_SMP
15808@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
15809 unsigned short ss, gs;
15810 const char *board;
15811
15812- if (user_mode_vm(regs)) {
15813+ if (user_mode(regs)) {
15814 sp = regs->sp;
15815 ss = regs->ss & 0xffff;
15816- gs = get_user_gs(regs);
15817 } else {
15818 sp = (unsigned long) (&regs->sp);
15819 savesegment(ss, ss);
15820- savesegment(gs, gs);
15821 }
15822+ gs = get_user_gs(regs);
15823
15824 printk("\n");
15825
15826@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
15827 regs.bx = (unsigned long) fn;
15828 regs.dx = (unsigned long) arg;
15829
15830- regs.ds = __USER_DS;
15831- regs.es = __USER_DS;
15832+ regs.ds = __KERNEL_DS;
15833+ regs.es = __KERNEL_DS;
15834 regs.fs = __KERNEL_PERCPU;
15835- regs.gs = __KERNEL_STACK_CANARY;
15836+ savesegment(gs, regs.gs);
15837 regs.orig_ax = -1;
15838 regs.ip = (unsigned long) kernel_thread_helper;
15839 regs.cs = __KERNEL_CS | get_kernel_rpl();
15840@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
15841 struct task_struct *tsk;
15842 int err;
15843
15844- childregs = task_pt_regs(p);
15845+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
15846 *childregs = *regs;
15847 childregs->ax = 0;
15848 childregs->sp = sp;
15849
15850 p->thread.sp = (unsigned long) childregs;
15851 p->thread.sp0 = (unsigned long) (childregs+1);
15852+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15853
15854 p->thread.ip = (unsigned long) ret_from_fork;
15855
15856@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
15857 struct thread_struct *prev = &prev_p->thread,
15858 *next = &next_p->thread;
15859 int cpu = smp_processor_id();
15860- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15861+ struct tss_struct *tss = init_tss + cpu;
15862 bool preload_fpu;
15863
15864 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
15865@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
15866 */
15867 lazy_save_gs(prev->gs);
15868
15869+#ifdef CONFIG_PAX_MEMORY_UDEREF
15870+ __set_fs(task_thread_info(next_p)->addr_limit);
15871+#endif
15872+
15873 /*
15874 * Load the per-thread Thread-Local Storage descriptor.
15875 */
15876@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
15877 */
15878 arch_end_context_switch(next_p);
15879
15880+ percpu_write(current_task, next_p);
15881+ percpu_write(current_tinfo, &next_p->tinfo);
15882+
15883 if (preload_fpu)
15884 __math_state_restore();
15885
15886@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
15887 if (prev->gs | next->gs)
15888 lazy_load_gs(next->gs);
15889
15890- percpu_write(current_task, next_p);
15891-
15892 return prev_p;
15893 }
15894
15895@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
15896 } while (count++ < 16);
15897 return 0;
15898 }
15899-
15900diff -urNp linux-2.6.32.43/arch/x86/kernel/process_64.c linux-2.6.32.43/arch/x86/kernel/process_64.c
15901--- linux-2.6.32.43/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
15902+++ linux-2.6.32.43/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
15903@@ -91,7 +91,7 @@ static void __exit_idle(void)
15904 void exit_idle(void)
15905 {
15906 /* idle loop has pid 0 */
15907- if (current->pid)
15908+ if (task_pid_nr(current))
15909 return;
15910 __exit_idle();
15911 }
15912@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
15913 if (!board)
15914 board = "";
15915 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
15916- current->pid, current->comm, print_tainted(),
15917+ task_pid_nr(current), current->comm, print_tainted(),
15918 init_utsname()->release,
15919 (int)strcspn(init_utsname()->version, " "),
15920 init_utsname()->version, board);
15921@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
15922 struct pt_regs *childregs;
15923 struct task_struct *me = current;
15924
15925- childregs = ((struct pt_regs *)
15926- (THREAD_SIZE + task_stack_page(p))) - 1;
15927+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
15928 *childregs = *regs;
15929
15930 childregs->ax = 0;
15931@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
15932 p->thread.sp = (unsigned long) childregs;
15933 p->thread.sp0 = (unsigned long) (childregs+1);
15934 p->thread.usersp = me->thread.usersp;
15935+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
15936
15937 set_tsk_thread_flag(p, TIF_FORK);
15938
15939@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
15940 struct thread_struct *prev = &prev_p->thread;
15941 struct thread_struct *next = &next_p->thread;
15942 int cpu = smp_processor_id();
15943- struct tss_struct *tss = &per_cpu(init_tss, cpu);
15944+ struct tss_struct *tss = init_tss + cpu;
15945 unsigned fsindex, gsindex;
15946 bool preload_fpu;
15947
15948@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
15949 prev->usersp = percpu_read(old_rsp);
15950 percpu_write(old_rsp, next->usersp);
15951 percpu_write(current_task, next_p);
15952+ percpu_write(current_tinfo, &next_p->tinfo);
15953
15954- percpu_write(kernel_stack,
15955- (unsigned long)task_stack_page(next_p) +
15956- THREAD_SIZE - KERNEL_STACK_OFFSET);
15957+ percpu_write(kernel_stack, next->sp0);
15958
15959 /*
15960 * Now maybe reload the debug registers and handle I/O bitmaps
15961@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
15962 if (!p || p == current || p->state == TASK_RUNNING)
15963 return 0;
15964 stack = (unsigned long)task_stack_page(p);
15965- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
15966+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
15967 return 0;
15968 fp = *(u64 *)(p->thread.sp);
15969 do {
15970- if (fp < (unsigned long)stack ||
15971- fp >= (unsigned long)stack+THREAD_SIZE)
15972+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
15973 return 0;
15974 ip = *(u64 *)(fp+8);
15975 if (!in_sched_functions(ip))
15976diff -urNp linux-2.6.32.43/arch/x86/kernel/process.c linux-2.6.32.43/arch/x86/kernel/process.c
15977--- linux-2.6.32.43/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
15978+++ linux-2.6.32.43/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
15979@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
15980
15981 void free_thread_info(struct thread_info *ti)
15982 {
15983- free_thread_xstate(ti->task);
15984 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
15985 }
15986
15987+static struct kmem_cache *task_struct_cachep;
15988+
15989 void arch_task_cache_init(void)
15990 {
15991- task_xstate_cachep =
15992- kmem_cache_create("task_xstate", xstate_size,
15993+ /* create a slab on which task_structs can be allocated */
15994+ task_struct_cachep =
15995+ kmem_cache_create("task_struct", sizeof(struct task_struct),
15996+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
15997+
15998+ task_xstate_cachep =
15999+ kmem_cache_create("task_xstate", xstate_size,
16000 __alignof__(union thread_xstate),
16001- SLAB_PANIC | SLAB_NOTRACK, NULL);
16002+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16003+}
16004+
16005+struct task_struct *alloc_task_struct(void)
16006+{
16007+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16008+}
16009+
16010+void free_task_struct(struct task_struct *task)
16011+{
16012+ free_thread_xstate(task);
16013+ kmem_cache_free(task_struct_cachep, task);
16014 }
16015
16016 /*
16017@@ -73,7 +90,7 @@ void exit_thread(void)
16018 unsigned long *bp = t->io_bitmap_ptr;
16019
16020 if (bp) {
16021- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16022+ struct tss_struct *tss = init_tss + get_cpu();
16023
16024 t->io_bitmap_ptr = NULL;
16025 clear_thread_flag(TIF_IO_BITMAP);
16026@@ -93,6 +110,9 @@ void flush_thread(void)
16027
16028 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16029
16030+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16031+ loadsegment(gs, 0);
16032+#endif
16033 tsk->thread.debugreg0 = 0;
16034 tsk->thread.debugreg1 = 0;
16035 tsk->thread.debugreg2 = 0;
16036@@ -307,7 +327,7 @@ void default_idle(void)
16037 EXPORT_SYMBOL(default_idle);
16038 #endif
16039
16040-void stop_this_cpu(void *dummy)
16041+__noreturn void stop_this_cpu(void *dummy)
16042 {
16043 local_irq_disable();
16044 /*
16045@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16046 }
16047 early_param("idle", idle_setup);
16048
16049-unsigned long arch_align_stack(unsigned long sp)
16050+#ifdef CONFIG_PAX_RANDKSTACK
16051+asmlinkage void pax_randomize_kstack(void)
16052 {
16053- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16054- sp -= get_random_int() % 8192;
16055- return sp & ~0xf;
16056-}
16057+ struct thread_struct *thread = &current->thread;
16058+ unsigned long time;
16059
16060-unsigned long arch_randomize_brk(struct mm_struct *mm)
16061-{
16062- unsigned long range_end = mm->brk + 0x02000000;
16063- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16064+ if (!randomize_va_space)
16065+ return;
16066+
16067+ rdtscl(time);
16068+
16069+ /* P4 seems to return a 0 LSB, ignore it */
16070+#ifdef CONFIG_MPENTIUM4
16071+ time &= 0x3EUL;
16072+ time <<= 2;
16073+#elif defined(CONFIG_X86_64)
16074+ time &= 0xFUL;
16075+ time <<= 4;
16076+#else
16077+ time &= 0x1FUL;
16078+ time <<= 3;
16079+#endif
16080+
16081+ thread->sp0 ^= time;
16082+ load_sp0(init_tss + smp_processor_id(), thread);
16083+
16084+#ifdef CONFIG_X86_64
16085+ percpu_write(kernel_stack, thread->sp0);
16086+#endif
16087 }
16088+#endif
16089
16090diff -urNp linux-2.6.32.43/arch/x86/kernel/ptrace.c linux-2.6.32.43/arch/x86/kernel/ptrace.c
16091--- linux-2.6.32.43/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16092+++ linux-2.6.32.43/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16093@@ -925,7 +925,7 @@ static const struct user_regset_view use
16094 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16095 {
16096 int ret;
16097- unsigned long __user *datap = (unsigned long __user *)data;
16098+ unsigned long __user *datap = (__force unsigned long __user *)data;
16099
16100 switch (request) {
16101 /* read the word at location addr in the USER area. */
16102@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16103 if (addr < 0)
16104 return -EIO;
16105 ret = do_get_thread_area(child, addr,
16106- (struct user_desc __user *) data);
16107+ (__force struct user_desc __user *) data);
16108 break;
16109
16110 case PTRACE_SET_THREAD_AREA:
16111 if (addr < 0)
16112 return -EIO;
16113 ret = do_set_thread_area(child, addr,
16114- (struct user_desc __user *) data, 0);
16115+ (__force struct user_desc __user *) data, 0);
16116 break;
16117 #endif
16118
16119@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16120 #ifdef CONFIG_X86_PTRACE_BTS
16121 case PTRACE_BTS_CONFIG:
16122 ret = ptrace_bts_config
16123- (child, data, (struct ptrace_bts_config __user *)addr);
16124+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16125 break;
16126
16127 case PTRACE_BTS_STATUS:
16128 ret = ptrace_bts_status
16129- (child, data, (struct ptrace_bts_config __user *)addr);
16130+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16131 break;
16132
16133 case PTRACE_BTS_SIZE:
16134@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16135
16136 case PTRACE_BTS_GET:
16137 ret = ptrace_bts_read_record
16138- (child, data, (struct bts_struct __user *) addr);
16139+ (child, data, (__force struct bts_struct __user *) addr);
16140 break;
16141
16142 case PTRACE_BTS_CLEAR:
16143@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16144
16145 case PTRACE_BTS_DRAIN:
16146 ret = ptrace_bts_drain
16147- (child, data, (struct bts_struct __user *) addr);
16148+ (child, data, (__force struct bts_struct __user *) addr);
16149 break;
16150 #endif /* CONFIG_X86_PTRACE_BTS */
16151
16152@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16153 info.si_code = si_code;
16154
16155 /* User-mode ip? */
16156- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16157+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16158
16159 /* Send us the fake SIGTRAP */
16160 force_sig_info(SIGTRAP, &info, tsk);
16161@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16162 * We must return the syscall number to actually look up in the table.
16163 * This can be -1L to skip running any syscall at all.
16164 */
16165-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16166+long syscall_trace_enter(struct pt_regs *regs)
16167 {
16168 long ret = 0;
16169
16170@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16171 return ret ?: regs->orig_ax;
16172 }
16173
16174-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16175+void syscall_trace_leave(struct pt_regs *regs)
16176 {
16177 if (unlikely(current->audit_context))
16178 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16179diff -urNp linux-2.6.32.43/arch/x86/kernel/reboot.c linux-2.6.32.43/arch/x86/kernel/reboot.c
16180--- linux-2.6.32.43/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
16181+++ linux-2.6.32.43/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
16182@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16183 EXPORT_SYMBOL(pm_power_off);
16184
16185 static const struct desc_ptr no_idt = {};
16186-static int reboot_mode;
16187+static unsigned short reboot_mode;
16188 enum reboot_type reboot_type = BOOT_KBD;
16189 int reboot_force;
16190
16191@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16192 controller to pulse the CPU reset line, which is more thorough, but
16193 doesn't work with at least one type of 486 motherboard. It is easy
16194 to stop this code working; hence the copious comments. */
16195-static const unsigned long long
16196-real_mode_gdt_entries [3] =
16197+static struct desc_struct
16198+real_mode_gdt_entries [3] __read_only =
16199 {
16200- 0x0000000000000000ULL, /* Null descriptor */
16201- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16202- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16203+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16204+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16205+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16206 };
16207
16208 static const struct desc_ptr
16209@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16210 * specified by the code and length parameters.
16211 * We assume that length will aways be less that 100!
16212 */
16213-void machine_real_restart(const unsigned char *code, int length)
16214+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16215 {
16216 local_irq_disable();
16217
16218@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16219 /* Remap the kernel at virtual address zero, as well as offset zero
16220 from the kernel segment. This assumes the kernel segment starts at
16221 virtual address PAGE_OFFSET. */
16222- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16223- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16224+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16225+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16226
16227 /*
16228 * Use `swapper_pg_dir' as our page directory.
16229@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16230 boot)". This seems like a fairly standard thing that gets set by
16231 REBOOT.COM programs, and the previous reset routine did this
16232 too. */
16233- *((unsigned short *)0x472) = reboot_mode;
16234+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16235
16236 /* For the switch to real mode, copy some code to low memory. It has
16237 to be in the first 64k because it is running in 16-bit mode, and it
16238 has to have the same physical and virtual address, because it turns
16239 off paging. Copy it near the end of the first page, out of the way
16240 of BIOS variables. */
16241- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16242- real_mode_switch, sizeof (real_mode_switch));
16243- memcpy((void *)(0x1000 - 100), code, length);
16244+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16245+ memcpy(__va(0x1000 - 100), code, length);
16246
16247 /* Set up the IDT for real mode. */
16248 load_idt(&real_mode_idt);
16249@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16250 __asm__ __volatile__ ("ljmp $0x0008,%0"
16251 :
16252 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16253+ do { } while (1);
16254 }
16255 #ifdef CONFIG_APM_MODULE
16256 EXPORT_SYMBOL(machine_real_restart);
16257@@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
16258 {
16259 }
16260
16261-static void native_machine_emergency_restart(void)
16262+__noreturn static void native_machine_emergency_restart(void)
16263 {
16264 int i;
16265
16266@@ -651,13 +651,13 @@ void native_machine_shutdown(void)
16267 #endif
16268 }
16269
16270-static void __machine_emergency_restart(int emergency)
16271+static __noreturn void __machine_emergency_restart(int emergency)
16272 {
16273 reboot_emergency = emergency;
16274 machine_ops.emergency_restart();
16275 }
16276
16277-static void native_machine_restart(char *__unused)
16278+static __noreturn void native_machine_restart(char *__unused)
16279 {
16280 printk("machine restart\n");
16281
16282@@ -666,7 +666,7 @@ static void native_machine_restart(char
16283 __machine_emergency_restart(0);
16284 }
16285
16286-static void native_machine_halt(void)
16287+static __noreturn void native_machine_halt(void)
16288 {
16289 /* stop other cpus and apics */
16290 machine_shutdown();
16291@@ -677,7 +677,7 @@ static void native_machine_halt(void)
16292 stop_this_cpu(NULL);
16293 }
16294
16295-static void native_machine_power_off(void)
16296+__noreturn static void native_machine_power_off(void)
16297 {
16298 if (pm_power_off) {
16299 if (!reboot_force)
16300@@ -686,6 +686,7 @@ static void native_machine_power_off(voi
16301 }
16302 /* a fallback in case there is no PM info available */
16303 tboot_shutdown(TB_SHUTDOWN_HALT);
16304+ do { } while (1);
16305 }
16306
16307 struct machine_ops machine_ops = {
16308diff -urNp linux-2.6.32.43/arch/x86/kernel/setup.c linux-2.6.32.43/arch/x86/kernel/setup.c
16309--- linux-2.6.32.43/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16310+++ linux-2.6.32.43/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16311@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16312
16313 if (!boot_params.hdr.root_flags)
16314 root_mountflags &= ~MS_RDONLY;
16315- init_mm.start_code = (unsigned long) _text;
16316- init_mm.end_code = (unsigned long) _etext;
16317+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16318+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16319 init_mm.end_data = (unsigned long) _edata;
16320 init_mm.brk = _brk_end;
16321
16322- code_resource.start = virt_to_phys(_text);
16323- code_resource.end = virt_to_phys(_etext)-1;
16324- data_resource.start = virt_to_phys(_etext);
16325+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16326+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16327+ data_resource.start = virt_to_phys(_sdata);
16328 data_resource.end = virt_to_phys(_edata)-1;
16329 bss_resource.start = virt_to_phys(&__bss_start);
16330 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16331diff -urNp linux-2.6.32.43/arch/x86/kernel/setup_percpu.c linux-2.6.32.43/arch/x86/kernel/setup_percpu.c
16332--- linux-2.6.32.43/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16333+++ linux-2.6.32.43/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16334@@ -25,19 +25,17 @@
16335 # define DBG(x...)
16336 #endif
16337
16338-DEFINE_PER_CPU(int, cpu_number);
16339+#ifdef CONFIG_SMP
16340+DEFINE_PER_CPU(unsigned int, cpu_number);
16341 EXPORT_PER_CPU_SYMBOL(cpu_number);
16342+#endif
16343
16344-#ifdef CONFIG_X86_64
16345 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16346-#else
16347-#define BOOT_PERCPU_OFFSET 0
16348-#endif
16349
16350 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16351 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16352
16353-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16354+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16355 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16356 };
16357 EXPORT_SYMBOL(__per_cpu_offset);
16358@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16359 {
16360 #ifdef CONFIG_X86_32
16361 struct desc_struct gdt;
16362+ unsigned long base = per_cpu_offset(cpu);
16363
16364- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16365- 0x2 | DESCTYPE_S, 0x8);
16366- gdt.s = 1;
16367+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16368+ 0x83 | DESCTYPE_S, 0xC);
16369 write_gdt_entry(get_cpu_gdt_table(cpu),
16370 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16371 #endif
16372@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16373 /* alrighty, percpu areas up and running */
16374 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16375 for_each_possible_cpu(cpu) {
16376+#ifdef CONFIG_CC_STACKPROTECTOR
16377+#ifdef CONFIG_X86_32
16378+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16379+#endif
16380+#endif
16381 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16382 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16383 per_cpu(cpu_number, cpu) = cpu;
16384@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16385 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16386 #endif
16387 #endif
16388+#ifdef CONFIG_CC_STACKPROTECTOR
16389+#ifdef CONFIG_X86_32
16390+ if (!cpu)
16391+ per_cpu(stack_canary.canary, cpu) = canary;
16392+#endif
16393+#endif
16394 /*
16395 * Up to this point, the boot CPU has been using .data.init
16396 * area. Reload any changed state for the boot CPU.
16397diff -urNp linux-2.6.32.43/arch/x86/kernel/signal.c linux-2.6.32.43/arch/x86/kernel/signal.c
16398--- linux-2.6.32.43/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16399+++ linux-2.6.32.43/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16400@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16401 * Align the stack pointer according to the i386 ABI,
16402 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16403 */
16404- sp = ((sp + 4) & -16ul) - 4;
16405+ sp = ((sp - 12) & -16ul) - 4;
16406 #else /* !CONFIG_X86_32 */
16407 sp = round_down(sp, 16) - 8;
16408 #endif
16409@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16410 * Return an always-bogus address instead so we will die with SIGSEGV.
16411 */
16412 if (onsigstack && !likely(on_sig_stack(sp)))
16413- return (void __user *)-1L;
16414+ return (__force void __user *)-1L;
16415
16416 /* save i387 state */
16417 if (used_math() && save_i387_xstate(*fpstate) < 0)
16418- return (void __user *)-1L;
16419+ return (__force void __user *)-1L;
16420
16421 return (void __user *)sp;
16422 }
16423@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16424 }
16425
16426 if (current->mm->context.vdso)
16427- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16428+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16429 else
16430- restorer = &frame->retcode;
16431+ restorer = (void __user *)&frame->retcode;
16432 if (ka->sa.sa_flags & SA_RESTORER)
16433 restorer = ka->sa.sa_restorer;
16434
16435@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16436 * reasons and because gdb uses it as a signature to notice
16437 * signal handler stack frames.
16438 */
16439- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16440+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16441
16442 if (err)
16443 return -EFAULT;
16444@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16445 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16446
16447 /* Set up to return from userspace. */
16448- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16449+ if (current->mm->context.vdso)
16450+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16451+ else
16452+ restorer = (void __user *)&frame->retcode;
16453 if (ka->sa.sa_flags & SA_RESTORER)
16454 restorer = ka->sa.sa_restorer;
16455 put_user_ex(restorer, &frame->pretcode);
16456@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16457 * reasons and because gdb uses it as a signature to notice
16458 * signal handler stack frames.
16459 */
16460- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16461+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16462 } put_user_catch(err);
16463
16464 if (err)
16465@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16466 int signr;
16467 sigset_t *oldset;
16468
16469+ pax_track_stack();
16470+
16471 /*
16472 * We want the common case to go fast, which is why we may in certain
16473 * cases get here from kernel mode. Just return without doing anything
16474@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16475 * X86_32: vm86 regs switched out by assembly code before reaching
16476 * here, so testing against kernel CS suffices.
16477 */
16478- if (!user_mode(regs))
16479+ if (!user_mode_novm(regs))
16480 return;
16481
16482 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16483diff -urNp linux-2.6.32.43/arch/x86/kernel/smpboot.c linux-2.6.32.43/arch/x86/kernel/smpboot.c
16484--- linux-2.6.32.43/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16485+++ linux-2.6.32.43/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16486@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16487 */
16488 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16489
16490-void cpu_hotplug_driver_lock()
16491+void cpu_hotplug_driver_lock(void)
16492 {
16493- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16494+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16495 }
16496
16497-void cpu_hotplug_driver_unlock()
16498+void cpu_hotplug_driver_unlock(void)
16499 {
16500- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16501+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16502 }
16503
16504 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16505@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16506 * target processor state.
16507 */
16508 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16509- (unsigned long)stack_start.sp);
16510+ stack_start);
16511
16512 /*
16513 * Run STARTUP IPI loop.
16514@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16515 set_idle_for_cpu(cpu, c_idle.idle);
16516 do_rest:
16517 per_cpu(current_task, cpu) = c_idle.idle;
16518+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16519 #ifdef CONFIG_X86_32
16520 /* Stack for startup_32 can be just as for start_secondary onwards */
16521 irq_ctx_init(cpu);
16522@@ -750,13 +751,15 @@ do_rest:
16523 #else
16524 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16525 initial_gs = per_cpu_offset(cpu);
16526- per_cpu(kernel_stack, cpu) =
16527- (unsigned long)task_stack_page(c_idle.idle) -
16528- KERNEL_STACK_OFFSET + THREAD_SIZE;
16529+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16530 #endif
16531+
16532+ pax_open_kernel();
16533 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16534+ pax_close_kernel();
16535+
16536 initial_code = (unsigned long)start_secondary;
16537- stack_start.sp = (void *) c_idle.idle->thread.sp;
16538+ stack_start = c_idle.idle->thread.sp;
16539
16540 /* start_ip had better be page-aligned! */
16541 start_ip = setup_trampoline();
16542@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16543
16544 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16545
16546+#ifdef CONFIG_PAX_PER_CPU_PGD
16547+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16548+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16549+ KERNEL_PGD_PTRS);
16550+#endif
16551+
16552 err = do_boot_cpu(apicid, cpu);
16553
16554 if (err) {
16555diff -urNp linux-2.6.32.43/arch/x86/kernel/step.c linux-2.6.32.43/arch/x86/kernel/step.c
16556--- linux-2.6.32.43/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16557+++ linux-2.6.32.43/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16558@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16559 struct desc_struct *desc;
16560 unsigned long base;
16561
16562- seg &= ~7UL;
16563+ seg >>= 3;
16564
16565 mutex_lock(&child->mm->context.lock);
16566- if (unlikely((seg >> 3) >= child->mm->context.size))
16567+ if (unlikely(seg >= child->mm->context.size))
16568 addr = -1L; /* bogus selector, access would fault */
16569 else {
16570 desc = child->mm->context.ldt + seg;
16571@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16572 addr += base;
16573 }
16574 mutex_unlock(&child->mm->context.lock);
16575- }
16576+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16577+ addr = ktla_ktva(addr);
16578
16579 return addr;
16580 }
16581@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16582 unsigned char opcode[15];
16583 unsigned long addr = convert_ip_to_linear(child, regs);
16584
16585+ if (addr == -EINVAL)
16586+ return 0;
16587+
16588 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16589 for (i = 0; i < copied; i++) {
16590 switch (opcode[i]) {
16591@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16592
16593 #ifdef CONFIG_X86_64
16594 case 0x40 ... 0x4f:
16595- if (regs->cs != __USER_CS)
16596+ if ((regs->cs & 0xffff) != __USER_CS)
16597 /* 32-bit mode: register increment */
16598 return 0;
16599 /* 64-bit mode: REX prefix */
16600diff -urNp linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S
16601--- linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16602+++ linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16603@@ -1,3 +1,4 @@
16604+.section .rodata,"a",@progbits
16605 ENTRY(sys_call_table)
16606 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16607 .long sys_exit
16608diff -urNp linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c
16609--- linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16610+++ linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16611@@ -24,6 +24,21 @@
16612
16613 #include <asm/syscalls.h>
16614
16615+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16616+{
16617+ unsigned long pax_task_size = TASK_SIZE;
16618+
16619+#ifdef CONFIG_PAX_SEGMEXEC
16620+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16621+ pax_task_size = SEGMEXEC_TASK_SIZE;
16622+#endif
16623+
16624+ if (len > pax_task_size || addr > pax_task_size - len)
16625+ return -EINVAL;
16626+
16627+ return 0;
16628+}
16629+
16630 /*
16631 * Perform the select(nd, in, out, ex, tv) and mmap() system
16632 * calls. Linux/i386 didn't use to be able to handle more than
16633@@ -58,6 +73,212 @@ out:
16634 return err;
16635 }
16636
16637+unsigned long
16638+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16639+ unsigned long len, unsigned long pgoff, unsigned long flags)
16640+{
16641+ struct mm_struct *mm = current->mm;
16642+ struct vm_area_struct *vma;
16643+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16644+
16645+#ifdef CONFIG_PAX_SEGMEXEC
16646+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16647+ pax_task_size = SEGMEXEC_TASK_SIZE;
16648+#endif
16649+
16650+ pax_task_size -= PAGE_SIZE;
16651+
16652+ if (len > pax_task_size)
16653+ return -ENOMEM;
16654+
16655+ if (flags & MAP_FIXED)
16656+ return addr;
16657+
16658+#ifdef CONFIG_PAX_RANDMMAP
16659+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16660+#endif
16661+
16662+ if (addr) {
16663+ addr = PAGE_ALIGN(addr);
16664+ if (pax_task_size - len >= addr) {
16665+ vma = find_vma(mm, addr);
16666+ if (check_heap_stack_gap(vma, addr, len))
16667+ return addr;
16668+ }
16669+ }
16670+ if (len > mm->cached_hole_size) {
16671+ start_addr = addr = mm->free_area_cache;
16672+ } else {
16673+ start_addr = addr = mm->mmap_base;
16674+ mm->cached_hole_size = 0;
16675+ }
16676+
16677+#ifdef CONFIG_PAX_PAGEEXEC
16678+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16679+ start_addr = 0x00110000UL;
16680+
16681+#ifdef CONFIG_PAX_RANDMMAP
16682+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16683+ start_addr += mm->delta_mmap & 0x03FFF000UL;
16684+#endif
16685+
16686+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16687+ start_addr = addr = mm->mmap_base;
16688+ else
16689+ addr = start_addr;
16690+ }
16691+#endif
16692+
16693+full_search:
16694+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16695+ /* At this point: (!vma || addr < vma->vm_end). */
16696+ if (pax_task_size - len < addr) {
16697+ /*
16698+ * Start a new search - just in case we missed
16699+ * some holes.
16700+ */
16701+ if (start_addr != mm->mmap_base) {
16702+ start_addr = addr = mm->mmap_base;
16703+ mm->cached_hole_size = 0;
16704+ goto full_search;
16705+ }
16706+ return -ENOMEM;
16707+ }
16708+ if (check_heap_stack_gap(vma, addr, len))
16709+ break;
16710+ if (addr + mm->cached_hole_size < vma->vm_start)
16711+ mm->cached_hole_size = vma->vm_start - addr;
16712+ addr = vma->vm_end;
16713+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
16714+ start_addr = addr = mm->mmap_base;
16715+ mm->cached_hole_size = 0;
16716+ goto full_search;
16717+ }
16718+ }
16719+
16720+ /*
16721+ * Remember the place where we stopped the search:
16722+ */
16723+ mm->free_area_cache = addr + len;
16724+ return addr;
16725+}
16726+
16727+unsigned long
16728+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16729+ const unsigned long len, const unsigned long pgoff,
16730+ const unsigned long flags)
16731+{
16732+ struct vm_area_struct *vma;
16733+ struct mm_struct *mm = current->mm;
16734+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16735+
16736+#ifdef CONFIG_PAX_SEGMEXEC
16737+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16738+ pax_task_size = SEGMEXEC_TASK_SIZE;
16739+#endif
16740+
16741+ pax_task_size -= PAGE_SIZE;
16742+
16743+ /* requested length too big for entire address space */
16744+ if (len > pax_task_size)
16745+ return -ENOMEM;
16746+
16747+ if (flags & MAP_FIXED)
16748+ return addr;
16749+
16750+#ifdef CONFIG_PAX_PAGEEXEC
16751+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16752+ goto bottomup;
16753+#endif
16754+
16755+#ifdef CONFIG_PAX_RANDMMAP
16756+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16757+#endif
16758+
16759+ /* requesting a specific address */
16760+ if (addr) {
16761+ addr = PAGE_ALIGN(addr);
16762+ if (pax_task_size - len >= addr) {
16763+ vma = find_vma(mm, addr);
16764+ if (check_heap_stack_gap(vma, addr, len))
16765+ return addr;
16766+ }
16767+ }
16768+
16769+ /* check if free_area_cache is useful for us */
16770+ if (len <= mm->cached_hole_size) {
16771+ mm->cached_hole_size = 0;
16772+ mm->free_area_cache = mm->mmap_base;
16773+ }
16774+
16775+ /* either no address requested or can't fit in requested address hole */
16776+ addr = mm->free_area_cache;
16777+
16778+ /* make sure it can fit in the remaining address space */
16779+ if (addr > len) {
16780+ vma = find_vma(mm, addr-len);
16781+ if (check_heap_stack_gap(vma, addr - len, len))
16782+ /* remember the address as a hint for next time */
16783+ return (mm->free_area_cache = addr-len);
16784+ }
16785+
16786+ if (mm->mmap_base < len)
16787+ goto bottomup;
16788+
16789+ addr = mm->mmap_base-len;
16790+
16791+ do {
16792+ /*
16793+ * Lookup failure means no vma is above this address,
16794+ * else if new region fits below vma->vm_start,
16795+ * return with success:
16796+ */
16797+ vma = find_vma(mm, addr);
16798+ if (check_heap_stack_gap(vma, addr, len))
16799+ /* remember the address as a hint for next time */
16800+ return (mm->free_area_cache = addr);
16801+
16802+ /* remember the largest hole we saw so far */
16803+ if (addr + mm->cached_hole_size < vma->vm_start)
16804+ mm->cached_hole_size = vma->vm_start - addr;
16805+
16806+ /* try just below the current vma->vm_start */
16807+ addr = skip_heap_stack_gap(vma, len);
16808+ } while (!IS_ERR_VALUE(addr));
16809+
16810+bottomup:
16811+ /*
16812+ * A failed mmap() very likely causes application failure,
16813+ * so fall back to the bottom-up function here. This scenario
16814+ * can happen with large stack limits and large mmap()
16815+ * allocations.
16816+ */
16817+
16818+#ifdef CONFIG_PAX_SEGMEXEC
16819+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16820+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
16821+ else
16822+#endif
16823+
16824+ mm->mmap_base = TASK_UNMAPPED_BASE;
16825+
16826+#ifdef CONFIG_PAX_RANDMMAP
16827+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16828+ mm->mmap_base += mm->delta_mmap;
16829+#endif
16830+
16831+ mm->free_area_cache = mm->mmap_base;
16832+ mm->cached_hole_size = ~0UL;
16833+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
16834+ /*
16835+ * Restore the topdown base:
16836+ */
16837+ mm->mmap_base = base;
16838+ mm->free_area_cache = base;
16839+ mm->cached_hole_size = ~0UL;
16840+
16841+ return addr;
16842+}
16843
16844 struct sel_arg_struct {
16845 unsigned long n;
16846@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
16847 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
16848 case SEMTIMEDOP:
16849 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
16850- (const struct timespec __user *)fifth);
16851+ (__force const struct timespec __user *)fifth);
16852
16853 case SEMGET:
16854 return sys_semget(first, second, third);
16855@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
16856 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
16857 if (ret)
16858 return ret;
16859- return put_user(raddr, (ulong __user *) third);
16860+ return put_user(raddr, (__force ulong __user *) third);
16861 }
16862 case 1: /* iBCS2 emulator entry point */
16863 if (!segment_eq(get_fs(), get_ds()))
16864@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
16865
16866 return error;
16867 }
16868-
16869-
16870-/*
16871- * Do a system call from kernel instead of calling sys_execve so we
16872- * end up with proper pt_regs.
16873- */
16874-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
16875-{
16876- long __res;
16877- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
16878- : "=a" (__res)
16879- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
16880- return __res;
16881-}
16882diff -urNp linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c
16883--- linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
16884+++ linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
16885@@ -32,8 +32,8 @@ out:
16886 return error;
16887 }
16888
16889-static void find_start_end(unsigned long flags, unsigned long *begin,
16890- unsigned long *end)
16891+static void find_start_end(struct mm_struct *mm, unsigned long flags,
16892+ unsigned long *begin, unsigned long *end)
16893 {
16894 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
16895 unsigned long new_begin;
16896@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
16897 *begin = new_begin;
16898 }
16899 } else {
16900- *begin = TASK_UNMAPPED_BASE;
16901+ *begin = mm->mmap_base;
16902 *end = TASK_SIZE;
16903 }
16904 }
16905@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
16906 if (flags & MAP_FIXED)
16907 return addr;
16908
16909- find_start_end(flags, &begin, &end);
16910+ find_start_end(mm, flags, &begin, &end);
16911
16912 if (len > end)
16913 return -ENOMEM;
16914
16915+#ifdef CONFIG_PAX_RANDMMAP
16916+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16917+#endif
16918+
16919 if (addr) {
16920 addr = PAGE_ALIGN(addr);
16921 vma = find_vma(mm, addr);
16922- if (end - len >= addr &&
16923- (!vma || addr + len <= vma->vm_start))
16924+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
16925 return addr;
16926 }
16927 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
16928@@ -106,7 +109,7 @@ full_search:
16929 }
16930 return -ENOMEM;
16931 }
16932- if (!vma || addr + len <= vma->vm_start) {
16933+ if (check_heap_stack_gap(vma, addr, len)) {
16934 /*
16935 * Remember the place where we stopped the search:
16936 */
16937@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
16938 {
16939 struct vm_area_struct *vma;
16940 struct mm_struct *mm = current->mm;
16941- unsigned long addr = addr0;
16942+ unsigned long base = mm->mmap_base, addr = addr0;
16943
16944 /* requested length too big for entire address space */
16945 if (len > TASK_SIZE)
16946@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
16947 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
16948 goto bottomup;
16949
16950+#ifdef CONFIG_PAX_RANDMMAP
16951+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16952+#endif
16953+
16954 /* requesting a specific address */
16955 if (addr) {
16956 addr = PAGE_ALIGN(addr);
16957- vma = find_vma(mm, addr);
16958- if (TASK_SIZE - len >= addr &&
16959- (!vma || addr + len <= vma->vm_start))
16960- return addr;
16961+ if (TASK_SIZE - len >= addr) {
16962+ vma = find_vma(mm, addr);
16963+ if (check_heap_stack_gap(vma, addr, len))
16964+ return addr;
16965+ }
16966 }
16967
16968 /* check if free_area_cache is useful for us */
16969@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
16970 /* make sure it can fit in the remaining address space */
16971 if (addr > len) {
16972 vma = find_vma(mm, addr-len);
16973- if (!vma || addr <= vma->vm_start)
16974+ if (check_heap_stack_gap(vma, addr - len, len))
16975 /* remember the address as a hint for next time */
16976 return mm->free_area_cache = addr-len;
16977 }
16978@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
16979 * return with success:
16980 */
16981 vma = find_vma(mm, addr);
16982- if (!vma || addr+len <= vma->vm_start)
16983+ if (check_heap_stack_gap(vma, addr, len))
16984 /* remember the address as a hint for next time */
16985 return mm->free_area_cache = addr;
16986
16987@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
16988 mm->cached_hole_size = vma->vm_start - addr;
16989
16990 /* try just below the current vma->vm_start */
16991- addr = vma->vm_start-len;
16992- } while (len < vma->vm_start);
16993+ addr = skip_heap_stack_gap(vma, len);
16994+ } while (!IS_ERR_VALUE(addr));
16995
16996 bottomup:
16997 /*
16998@@ -198,13 +206,21 @@ bottomup:
16999 * can happen with large stack limits and large mmap()
17000 * allocations.
17001 */
17002+ mm->mmap_base = TASK_UNMAPPED_BASE;
17003+
17004+#ifdef CONFIG_PAX_RANDMMAP
17005+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17006+ mm->mmap_base += mm->delta_mmap;
17007+#endif
17008+
17009+ mm->free_area_cache = mm->mmap_base;
17010 mm->cached_hole_size = ~0UL;
17011- mm->free_area_cache = TASK_UNMAPPED_BASE;
17012 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17013 /*
17014 * Restore the topdown base:
17015 */
17016- mm->free_area_cache = mm->mmap_base;
17017+ mm->mmap_base = base;
17018+ mm->free_area_cache = base;
17019 mm->cached_hole_size = ~0UL;
17020
17021 return addr;
17022diff -urNp linux-2.6.32.43/arch/x86/kernel/tboot.c linux-2.6.32.43/arch/x86/kernel/tboot.c
17023--- linux-2.6.32.43/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17024+++ linux-2.6.32.43/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17025@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17026
17027 void tboot_shutdown(u32 shutdown_type)
17028 {
17029- void (*shutdown)(void);
17030+ void (* __noreturn shutdown)(void);
17031
17032 if (!tboot_enabled())
17033 return;
17034@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17035
17036 switch_to_tboot_pt();
17037
17038- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17039+ shutdown = (void *)tboot->shutdown_entry;
17040 shutdown();
17041
17042 /* should not reach here */
17043@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17044 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17045 }
17046
17047-static atomic_t ap_wfs_count;
17048+static atomic_unchecked_t ap_wfs_count;
17049
17050 static int tboot_wait_for_aps(int num_aps)
17051 {
17052@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17053 {
17054 switch (action) {
17055 case CPU_DYING:
17056- atomic_inc(&ap_wfs_count);
17057+ atomic_inc_unchecked(&ap_wfs_count);
17058 if (num_online_cpus() == 1)
17059- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17060+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17061 return NOTIFY_BAD;
17062 break;
17063 }
17064@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17065
17066 tboot_create_trampoline();
17067
17068- atomic_set(&ap_wfs_count, 0);
17069+ atomic_set_unchecked(&ap_wfs_count, 0);
17070 register_hotcpu_notifier(&tboot_cpu_notifier);
17071 return 0;
17072 }
17073diff -urNp linux-2.6.32.43/arch/x86/kernel/time.c linux-2.6.32.43/arch/x86/kernel/time.c
17074--- linux-2.6.32.43/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17075+++ linux-2.6.32.43/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17076@@ -26,17 +26,13 @@
17077 int timer_ack;
17078 #endif
17079
17080-#ifdef CONFIG_X86_64
17081-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17082-#endif
17083-
17084 unsigned long profile_pc(struct pt_regs *regs)
17085 {
17086 unsigned long pc = instruction_pointer(regs);
17087
17088- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17089+ if (!user_mode(regs) && in_lock_functions(pc)) {
17090 #ifdef CONFIG_FRAME_POINTER
17091- return *(unsigned long *)(regs->bp + sizeof(long));
17092+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17093 #else
17094 unsigned long *sp =
17095 (unsigned long *)kernel_stack_pointer(regs);
17096@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17097 * or above a saved flags. Eflags has bits 22-31 zero,
17098 * kernel addresses don't.
17099 */
17100+
17101+#ifdef CONFIG_PAX_KERNEXEC
17102+ return ktla_ktva(sp[0]);
17103+#else
17104 if (sp[0] >> 22)
17105 return sp[0];
17106 if (sp[1] >> 22)
17107 return sp[1];
17108 #endif
17109+
17110+#endif
17111 }
17112 return pc;
17113 }
17114diff -urNp linux-2.6.32.43/arch/x86/kernel/tls.c linux-2.6.32.43/arch/x86/kernel/tls.c
17115--- linux-2.6.32.43/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17116+++ linux-2.6.32.43/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17117@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17118 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17119 return -EINVAL;
17120
17121+#ifdef CONFIG_PAX_SEGMEXEC
17122+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17123+ return -EINVAL;
17124+#endif
17125+
17126 set_tls_desc(p, idx, &info, 1);
17127
17128 return 0;
17129diff -urNp linux-2.6.32.43/arch/x86/kernel/trampoline_32.S linux-2.6.32.43/arch/x86/kernel/trampoline_32.S
17130--- linux-2.6.32.43/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17131+++ linux-2.6.32.43/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17132@@ -32,6 +32,12 @@
17133 #include <asm/segment.h>
17134 #include <asm/page_types.h>
17135
17136+#ifdef CONFIG_PAX_KERNEXEC
17137+#define ta(X) (X)
17138+#else
17139+#define ta(X) ((X) - __PAGE_OFFSET)
17140+#endif
17141+
17142 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17143 __CPUINITRODATA
17144 .code16
17145@@ -60,7 +66,7 @@ r_base = .
17146 inc %ax # protected mode (PE) bit
17147 lmsw %ax # into protected mode
17148 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17149- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17150+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17151
17152 # These need to be in the same 64K segment as the above;
17153 # hence we don't use the boot_gdt_descr defined in head.S
17154diff -urNp linux-2.6.32.43/arch/x86/kernel/trampoline_64.S linux-2.6.32.43/arch/x86/kernel/trampoline_64.S
17155--- linux-2.6.32.43/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17156+++ linux-2.6.32.43/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17157@@ -91,7 +91,7 @@ startup_32:
17158 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17159 movl %eax, %ds
17160
17161- movl $X86_CR4_PAE, %eax
17162+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17163 movl %eax, %cr4 # Enable PAE mode
17164
17165 # Setup trampoline 4 level pagetables
17166@@ -127,7 +127,7 @@ startup_64:
17167 no_longmode:
17168 hlt
17169 jmp no_longmode
17170-#include "verify_cpu_64.S"
17171+#include "verify_cpu.S"
17172
17173 # Careful these need to be in the same 64K segment as the above;
17174 tidt:
17175@@ -138,7 +138,7 @@ tidt:
17176 # so the kernel can live anywhere
17177 .balign 4
17178 tgdt:
17179- .short tgdt_end - tgdt # gdt limit
17180+ .short tgdt_end - tgdt - 1 # gdt limit
17181 .long tgdt - r_base
17182 .short 0
17183 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17184diff -urNp linux-2.6.32.43/arch/x86/kernel/traps.c linux-2.6.32.43/arch/x86/kernel/traps.c
17185--- linux-2.6.32.43/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17186+++ linux-2.6.32.43/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17187@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17188
17189 /* Do we ignore FPU interrupts ? */
17190 char ignore_fpu_irq;
17191-
17192-/*
17193- * The IDT has to be page-aligned to simplify the Pentium
17194- * F0 0F bug workaround.
17195- */
17196-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17197 #endif
17198
17199 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17200@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17201 static inline void
17202 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17203 {
17204- if (!user_mode_vm(regs))
17205+ if (!user_mode(regs))
17206 die(str, regs, err);
17207 }
17208 #endif
17209
17210 static void __kprobes
17211-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17212+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17213 long error_code, siginfo_t *info)
17214 {
17215 struct task_struct *tsk = current;
17216
17217 #ifdef CONFIG_X86_32
17218- if (regs->flags & X86_VM_MASK) {
17219+ if (v8086_mode(regs)) {
17220 /*
17221 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17222 * On nmi (interrupt 2), do_trap should not be called.
17223@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17224 }
17225 #endif
17226
17227- if (!user_mode(regs))
17228+ if (!user_mode_novm(regs))
17229 goto kernel_trap;
17230
17231 #ifdef CONFIG_X86_32
17232@@ -158,7 +152,7 @@ trap_signal:
17233 printk_ratelimit()) {
17234 printk(KERN_INFO
17235 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17236- tsk->comm, tsk->pid, str,
17237+ tsk->comm, task_pid_nr(tsk), str,
17238 regs->ip, regs->sp, error_code);
17239 print_vma_addr(" in ", regs->ip);
17240 printk("\n");
17241@@ -175,8 +169,20 @@ kernel_trap:
17242 if (!fixup_exception(regs)) {
17243 tsk->thread.error_code = error_code;
17244 tsk->thread.trap_no = trapnr;
17245+
17246+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17247+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17248+ str = "PAX: suspicious stack segment fault";
17249+#endif
17250+
17251 die(str, regs, error_code);
17252 }
17253+
17254+#ifdef CONFIG_PAX_REFCOUNT
17255+ if (trapnr == 4)
17256+ pax_report_refcount_overflow(regs);
17257+#endif
17258+
17259 return;
17260
17261 #ifdef CONFIG_X86_32
17262@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17263 conditional_sti(regs);
17264
17265 #ifdef CONFIG_X86_32
17266- if (regs->flags & X86_VM_MASK)
17267+ if (v8086_mode(regs))
17268 goto gp_in_vm86;
17269 #endif
17270
17271 tsk = current;
17272- if (!user_mode(regs))
17273+ if (!user_mode_novm(regs))
17274 goto gp_in_kernel;
17275
17276+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17277+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17278+ struct mm_struct *mm = tsk->mm;
17279+ unsigned long limit;
17280+
17281+ down_write(&mm->mmap_sem);
17282+ limit = mm->context.user_cs_limit;
17283+ if (limit < TASK_SIZE) {
17284+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17285+ up_write(&mm->mmap_sem);
17286+ return;
17287+ }
17288+ up_write(&mm->mmap_sem);
17289+ }
17290+#endif
17291+
17292 tsk->thread.error_code = error_code;
17293 tsk->thread.trap_no = 13;
17294
17295@@ -305,6 +327,13 @@ gp_in_kernel:
17296 if (notify_die(DIE_GPF, "general protection fault", regs,
17297 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17298 return;
17299+
17300+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17301+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17302+ die("PAX: suspicious general protection fault", regs, error_code);
17303+ else
17304+#endif
17305+
17306 die("general protection fault", regs, error_code);
17307 }
17308
17309@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17310 dotraplinkage notrace __kprobes void
17311 do_nmi(struct pt_regs *regs, long error_code)
17312 {
17313+
17314+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17315+ if (!user_mode(regs)) {
17316+ unsigned long cs = regs->cs & 0xFFFF;
17317+ unsigned long ip = ktva_ktla(regs->ip);
17318+
17319+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17320+ regs->ip = ip;
17321+ }
17322+#endif
17323+
17324 nmi_enter();
17325
17326 inc_irq_stat(__nmi_count);
17327@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17328 }
17329
17330 #ifdef CONFIG_X86_32
17331- if (regs->flags & X86_VM_MASK)
17332+ if (v8086_mode(regs))
17333 goto debug_vm86;
17334 #endif
17335
17336@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17337 * kernel space (but re-enable TF when returning to user mode).
17338 */
17339 if (condition & DR_STEP) {
17340- if (!user_mode(regs))
17341+ if (!user_mode_novm(regs))
17342 goto clear_TF_reenable;
17343 }
17344
17345@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17346 * Handle strange cache flush from user space exception
17347 * in all other cases. This is undocumented behaviour.
17348 */
17349- if (regs->flags & X86_VM_MASK) {
17350+ if (v8086_mode(regs)) {
17351 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17352 return;
17353 }
17354@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17355 void __math_state_restore(void)
17356 {
17357 struct thread_info *thread = current_thread_info();
17358- struct task_struct *tsk = thread->task;
17359+ struct task_struct *tsk = current;
17360
17361 /*
17362 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17363@@ -825,8 +865,7 @@ void __math_state_restore(void)
17364 */
17365 asmlinkage void math_state_restore(void)
17366 {
17367- struct thread_info *thread = current_thread_info();
17368- struct task_struct *tsk = thread->task;
17369+ struct task_struct *tsk = current;
17370
17371 if (!tsk_used_math(tsk)) {
17372 local_irq_enable();
17373diff -urNp linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S
17374--- linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17375+++ linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17376@@ -1,105 +0,0 @@
17377-/*
17378- *
17379- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17380- * code has been borrowed from boot/setup.S and was introduced by
17381- * Andi Kleen.
17382- *
17383- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17384- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17385- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17386- *
17387- * This source code is licensed under the GNU General Public License,
17388- * Version 2. See the file COPYING for more details.
17389- *
17390- * This is a common code for verification whether CPU supports
17391- * long mode and SSE or not. It is not called directly instead this
17392- * file is included at various places and compiled in that context.
17393- * Following are the current usage.
17394- *
17395- * This file is included by both 16bit and 32bit code.
17396- *
17397- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17398- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17399- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17400- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17401- *
17402- * verify_cpu, returns the status of cpu check in register %eax.
17403- * 0: Success 1: Failure
17404- *
17405- * The caller needs to check for the error code and take the action
17406- * appropriately. Either display a message or halt.
17407- */
17408-
17409-#include <asm/cpufeature.h>
17410-
17411-verify_cpu:
17412- pushfl # Save caller passed flags
17413- pushl $0 # Kill any dangerous flags
17414- popfl
17415-
17416- pushfl # standard way to check for cpuid
17417- popl %eax
17418- movl %eax,%ebx
17419- xorl $0x200000,%eax
17420- pushl %eax
17421- popfl
17422- pushfl
17423- popl %eax
17424- cmpl %eax,%ebx
17425- jz verify_cpu_no_longmode # cpu has no cpuid
17426-
17427- movl $0x0,%eax # See if cpuid 1 is implemented
17428- cpuid
17429- cmpl $0x1,%eax
17430- jb verify_cpu_no_longmode # no cpuid 1
17431-
17432- xor %di,%di
17433- cmpl $0x68747541,%ebx # AuthenticAMD
17434- jnz verify_cpu_noamd
17435- cmpl $0x69746e65,%edx
17436- jnz verify_cpu_noamd
17437- cmpl $0x444d4163,%ecx
17438- jnz verify_cpu_noamd
17439- mov $1,%di # cpu is from AMD
17440-
17441-verify_cpu_noamd:
17442- movl $0x1,%eax # Does the cpu have what it takes
17443- cpuid
17444- andl $REQUIRED_MASK0,%edx
17445- xorl $REQUIRED_MASK0,%edx
17446- jnz verify_cpu_no_longmode
17447-
17448- movl $0x80000000,%eax # See if extended cpuid is implemented
17449- cpuid
17450- cmpl $0x80000001,%eax
17451- jb verify_cpu_no_longmode # no extended cpuid
17452-
17453- movl $0x80000001,%eax # Does the cpu have what it takes
17454- cpuid
17455- andl $REQUIRED_MASK1,%edx
17456- xorl $REQUIRED_MASK1,%edx
17457- jnz verify_cpu_no_longmode
17458-
17459-verify_cpu_sse_test:
17460- movl $1,%eax
17461- cpuid
17462- andl $SSE_MASK,%edx
17463- cmpl $SSE_MASK,%edx
17464- je verify_cpu_sse_ok
17465- test %di,%di
17466- jz verify_cpu_no_longmode # only try to force SSE on AMD
17467- movl $0xc0010015,%ecx # HWCR
17468- rdmsr
17469- btr $15,%eax # enable SSE
17470- wrmsr
17471- xor %di,%di # don't loop
17472- jmp verify_cpu_sse_test # try again
17473-
17474-verify_cpu_no_longmode:
17475- popfl # Restore caller passed flags
17476- movl $1,%eax
17477- ret
17478-verify_cpu_sse_ok:
17479- popfl # Restore caller passed flags
17480- xorl %eax, %eax
17481- ret
17482diff -urNp linux-2.6.32.43/arch/x86/kernel/verify_cpu.S linux-2.6.32.43/arch/x86/kernel/verify_cpu.S
17483--- linux-2.6.32.43/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17484+++ linux-2.6.32.43/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17485@@ -0,0 +1,140 @@
17486+/*
17487+ *
17488+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17489+ * code has been borrowed from boot/setup.S and was introduced by
17490+ * Andi Kleen.
17491+ *
17492+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17493+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17494+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17495+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17496+ *
17497+ * This source code is licensed under the GNU General Public License,
17498+ * Version 2. See the file COPYING for more details.
17499+ *
17500+ * This is a common code for verification whether CPU supports
17501+ * long mode and SSE or not. It is not called directly instead this
17502+ * file is included at various places and compiled in that context.
17503+ * This file is expected to run in 32bit code. Currently:
17504+ *
17505+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17506+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17507+ * arch/x86/kernel/head_32.S: processor startup
17508+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17509+ *
17510+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17511+ * 0: Success 1: Failure
17512+ *
17513+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17514+ *
17515+ * The caller needs to check for the error code and take the action
17516+ * appropriately. Either display a message or halt.
17517+ */
17518+
17519+#include <asm/cpufeature.h>
17520+#include <asm/msr-index.h>
17521+
17522+verify_cpu:
17523+ pushfl # Save caller passed flags
17524+ pushl $0 # Kill any dangerous flags
17525+ popfl
17526+
17527+ pushfl # standard way to check for cpuid
17528+ popl %eax
17529+ movl %eax,%ebx
17530+ xorl $0x200000,%eax
17531+ pushl %eax
17532+ popfl
17533+ pushfl
17534+ popl %eax
17535+ cmpl %eax,%ebx
17536+ jz verify_cpu_no_longmode # cpu has no cpuid
17537+
17538+ movl $0x0,%eax # See if cpuid 1 is implemented
17539+ cpuid
17540+ cmpl $0x1,%eax
17541+ jb verify_cpu_no_longmode # no cpuid 1
17542+
17543+ xor %di,%di
17544+ cmpl $0x68747541,%ebx # AuthenticAMD
17545+ jnz verify_cpu_noamd
17546+ cmpl $0x69746e65,%edx
17547+ jnz verify_cpu_noamd
17548+ cmpl $0x444d4163,%ecx
17549+ jnz verify_cpu_noamd
17550+ mov $1,%di # cpu is from AMD
17551+ jmp verify_cpu_check
17552+
17553+verify_cpu_noamd:
17554+ cmpl $0x756e6547,%ebx # GenuineIntel?
17555+ jnz verify_cpu_check
17556+ cmpl $0x49656e69,%edx
17557+ jnz verify_cpu_check
17558+ cmpl $0x6c65746e,%ecx
17559+ jnz verify_cpu_check
17560+
17561+ # only call IA32_MISC_ENABLE when:
17562+ # family > 6 || (family == 6 && model >= 0xd)
17563+ movl $0x1, %eax # check CPU family and model
17564+ cpuid
17565+ movl %eax, %ecx
17566+
17567+ andl $0x0ff00f00, %eax # mask family and extended family
17568+ shrl $8, %eax
17569+ cmpl $6, %eax
17570+ ja verify_cpu_clear_xd # family > 6, ok
17571+ jb verify_cpu_check # family < 6, skip
17572+
17573+ andl $0x000f00f0, %ecx # mask model and extended model
17574+ shrl $4, %ecx
17575+ cmpl $0xd, %ecx
17576+ jb verify_cpu_check # family == 6, model < 0xd, skip
17577+
17578+verify_cpu_clear_xd:
17579+ movl $MSR_IA32_MISC_ENABLE, %ecx
17580+ rdmsr
17581+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17582+ jnc verify_cpu_check # only write MSR if bit was changed
17583+ wrmsr
17584+
17585+verify_cpu_check:
17586+ movl $0x1,%eax # Does the cpu have what it takes
17587+ cpuid
17588+ andl $REQUIRED_MASK0,%edx
17589+ xorl $REQUIRED_MASK0,%edx
17590+ jnz verify_cpu_no_longmode
17591+
17592+ movl $0x80000000,%eax # See if extended cpuid is implemented
17593+ cpuid
17594+ cmpl $0x80000001,%eax
17595+ jb verify_cpu_no_longmode # no extended cpuid
17596+
17597+ movl $0x80000001,%eax # Does the cpu have what it takes
17598+ cpuid
17599+ andl $REQUIRED_MASK1,%edx
17600+ xorl $REQUIRED_MASK1,%edx
17601+ jnz verify_cpu_no_longmode
17602+
17603+verify_cpu_sse_test:
17604+ movl $1,%eax
17605+ cpuid
17606+ andl $SSE_MASK,%edx
17607+ cmpl $SSE_MASK,%edx
17608+ je verify_cpu_sse_ok
17609+ test %di,%di
17610+ jz verify_cpu_no_longmode # only try to force SSE on AMD
17611+ movl $MSR_K7_HWCR,%ecx
17612+ rdmsr
17613+ btr $15,%eax # enable SSE
17614+ wrmsr
17615+ xor %di,%di # don't loop
17616+ jmp verify_cpu_sse_test # try again
17617+
17618+verify_cpu_no_longmode:
17619+ popfl # Restore caller passed flags
17620+ movl $1,%eax
17621+ ret
17622+verify_cpu_sse_ok:
17623+ popfl # Restore caller passed flags
17624+ xorl %eax, %eax
17625+ ret
17626diff -urNp linux-2.6.32.43/arch/x86/kernel/vm86_32.c linux-2.6.32.43/arch/x86/kernel/vm86_32.c
17627--- linux-2.6.32.43/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17628+++ linux-2.6.32.43/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17629@@ -41,6 +41,7 @@
17630 #include <linux/ptrace.h>
17631 #include <linux/audit.h>
17632 #include <linux/stddef.h>
17633+#include <linux/grsecurity.h>
17634
17635 #include <asm/uaccess.h>
17636 #include <asm/io.h>
17637@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17638 do_exit(SIGSEGV);
17639 }
17640
17641- tss = &per_cpu(init_tss, get_cpu());
17642+ tss = init_tss + get_cpu();
17643 current->thread.sp0 = current->thread.saved_sp0;
17644 current->thread.sysenter_cs = __KERNEL_CS;
17645 load_sp0(tss, &current->thread);
17646@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17647 struct task_struct *tsk;
17648 int tmp, ret = -EPERM;
17649
17650+#ifdef CONFIG_GRKERNSEC_VM86
17651+ if (!capable(CAP_SYS_RAWIO)) {
17652+ gr_handle_vm86();
17653+ goto out;
17654+ }
17655+#endif
17656+
17657 tsk = current;
17658 if (tsk->thread.saved_sp0)
17659 goto out;
17660@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17661 int tmp, ret;
17662 struct vm86plus_struct __user *v86;
17663
17664+#ifdef CONFIG_GRKERNSEC_VM86
17665+ if (!capable(CAP_SYS_RAWIO)) {
17666+ gr_handle_vm86();
17667+ ret = -EPERM;
17668+ goto out;
17669+ }
17670+#endif
17671+
17672 tsk = current;
17673 switch (regs->bx) {
17674 case VM86_REQUEST_IRQ:
17675@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17676 tsk->thread.saved_fs = info->regs32->fs;
17677 tsk->thread.saved_gs = get_user_gs(info->regs32);
17678
17679- tss = &per_cpu(init_tss, get_cpu());
17680+ tss = init_tss + get_cpu();
17681 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17682 if (cpu_has_sep)
17683 tsk->thread.sysenter_cs = 0;
17684@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17685 goto cannot_handle;
17686 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17687 goto cannot_handle;
17688- intr_ptr = (unsigned long __user *) (i << 2);
17689+ intr_ptr = (__force unsigned long __user *) (i << 2);
17690 if (get_user(segoffs, intr_ptr))
17691 goto cannot_handle;
17692 if ((segoffs >> 16) == BIOSSEG)
17693diff -urNp linux-2.6.32.43/arch/x86/kernel/vmi_32.c linux-2.6.32.43/arch/x86/kernel/vmi_32.c
17694--- linux-2.6.32.43/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17695+++ linux-2.6.32.43/arch/x86/kernel/vmi_32.c 2011-04-17 15:56:46.000000000 -0400
17696@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17697 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17698
17699 #define call_vrom_func(rom,func) \
17700- (((VROMFUNC *)(rom->func))())
17701+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
17702
17703 #define call_vrom_long_func(rom,func,arg) \
17704- (((VROMLONGFUNC *)(rom->func)) (arg))
17705+({\
17706+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17707+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17708+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17709+ __reloc;\
17710+})
17711
17712-static struct vrom_header *vmi_rom;
17713+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17714 static int disable_pge;
17715 static int disable_pse;
17716 static int disable_sep;
17717@@ -76,10 +81,10 @@ static struct {
17718 void (*set_initial_ap_state)(int, int);
17719 void (*halt)(void);
17720 void (*set_lazy_mode)(int mode);
17721-} vmi_ops;
17722+} vmi_ops __read_only;
17723
17724 /* Cached VMI operations */
17725-struct vmi_timer_ops vmi_timer_ops;
17726+struct vmi_timer_ops vmi_timer_ops __read_only;
17727
17728 /*
17729 * VMI patching routines.
17730@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17731 static inline void patch_offset(void *insnbuf,
17732 unsigned long ip, unsigned long dest)
17733 {
17734- *(unsigned long *)(insnbuf+1) = dest-ip-5;
17735+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
17736 }
17737
17738 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17739@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17740 {
17741 u64 reloc;
17742 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17743+
17744 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17745 switch(rel->type) {
17746 case VMI_RELOCATION_CALL_REL:
17747@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17748
17749 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17750 {
17751- const pte_t pte = { .pte = 0 };
17752+ const pte_t pte = __pte(0ULL);
17753 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17754 }
17755
17756 static void vmi_pmd_clear(pmd_t *pmd)
17757 {
17758- const pte_t pte = { .pte = 0 };
17759+ const pte_t pte = __pte(0ULL);
17760 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17761 }
17762 #endif
17763@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17764 ap.ss = __KERNEL_DS;
17765 ap.esp = (unsigned long) start_esp;
17766
17767- ap.ds = __USER_DS;
17768- ap.es = __USER_DS;
17769+ ap.ds = __KERNEL_DS;
17770+ ap.es = __KERNEL_DS;
17771 ap.fs = __KERNEL_PERCPU;
17772- ap.gs = __KERNEL_STACK_CANARY;
17773+ savesegment(gs, ap.gs);
17774
17775 ap.eflags = 0;
17776
17777@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17778 paravirt_leave_lazy_mmu();
17779 }
17780
17781+#ifdef CONFIG_PAX_KERNEXEC
17782+static unsigned long vmi_pax_open_kernel(void)
17783+{
17784+ return 0;
17785+}
17786+
17787+static unsigned long vmi_pax_close_kernel(void)
17788+{
17789+ return 0;
17790+}
17791+#endif
17792+
17793 static inline int __init check_vmi_rom(struct vrom_header *rom)
17794 {
17795 struct pci_header *pci;
17796@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
17797 return 0;
17798 if (rom->vrom_signature != VMI_SIGNATURE)
17799 return 0;
17800+ if (rom->rom_length * 512 > sizeof(*rom)) {
17801+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
17802+ return 0;
17803+ }
17804 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
17805 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
17806 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
17807@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
17808 struct vrom_header *romstart;
17809 romstart = (struct vrom_header *)isa_bus_to_virt(base);
17810 if (check_vmi_rom(romstart)) {
17811- vmi_rom = romstart;
17812+ vmi_rom = *romstart;
17813 return 1;
17814 }
17815 }
17816@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
17817
17818 para_fill(pv_irq_ops.safe_halt, Halt);
17819
17820+#ifdef CONFIG_PAX_KERNEXEC
17821+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
17822+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
17823+#endif
17824+
17825 /*
17826 * Alternative instruction rewriting doesn't happen soon enough
17827 * to convert VMI_IRET to a call instead of a jump; so we have
17828@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
17829
17830 void __init vmi_init(void)
17831 {
17832- if (!vmi_rom)
17833+ if (!vmi_rom.rom_signature)
17834 probe_vmi_rom();
17835 else
17836- check_vmi_rom(vmi_rom);
17837+ check_vmi_rom(&vmi_rom);
17838
17839 /* In case probing for or validating the ROM failed, basil */
17840- if (!vmi_rom)
17841+ if (!vmi_rom.rom_signature)
17842 return;
17843
17844- reserve_top_address(-vmi_rom->virtual_top);
17845+ reserve_top_address(-vmi_rom.virtual_top);
17846
17847 #ifdef CONFIG_X86_IO_APIC
17848 /* This is virtual hardware; timer routing is wired correctly */
17849@@ -874,7 +901,7 @@ void __init vmi_activate(void)
17850 {
17851 unsigned long flags;
17852
17853- if (!vmi_rom)
17854+ if (!vmi_rom.rom_signature)
17855 return;
17856
17857 local_irq_save(flags);
17858diff -urNp linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S
17859--- linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
17860+++ linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
17861@@ -26,6 +26,13 @@
17862 #include <asm/page_types.h>
17863 #include <asm/cache.h>
17864 #include <asm/boot.h>
17865+#include <asm/segment.h>
17866+
17867+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17868+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
17869+#else
17870+#define __KERNEL_TEXT_OFFSET 0
17871+#endif
17872
17873 #undef i386 /* in case the preprocessor is a 32bit one */
17874
17875@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
17876 #ifdef CONFIG_X86_32
17877 OUTPUT_ARCH(i386)
17878 ENTRY(phys_startup_32)
17879-jiffies = jiffies_64;
17880 #else
17881 OUTPUT_ARCH(i386:x86-64)
17882 ENTRY(phys_startup_64)
17883-jiffies_64 = jiffies;
17884 #endif
17885
17886 PHDRS {
17887 text PT_LOAD FLAGS(5); /* R_E */
17888- data PT_LOAD FLAGS(7); /* RWE */
17889+#ifdef CONFIG_X86_32
17890+ module PT_LOAD FLAGS(5); /* R_E */
17891+#endif
17892+#ifdef CONFIG_XEN
17893+ rodata PT_LOAD FLAGS(5); /* R_E */
17894+#else
17895+ rodata PT_LOAD FLAGS(4); /* R__ */
17896+#endif
17897+ data PT_LOAD FLAGS(6); /* RW_ */
17898 #ifdef CONFIG_X86_64
17899 user PT_LOAD FLAGS(5); /* R_E */
17900+#endif
17901+ init.begin PT_LOAD FLAGS(6); /* RW_ */
17902 #ifdef CONFIG_SMP
17903 percpu PT_LOAD FLAGS(6); /* RW_ */
17904 #endif
17905+ text.init PT_LOAD FLAGS(5); /* R_E */
17906+ text.exit PT_LOAD FLAGS(5); /* R_E */
17907 init PT_LOAD FLAGS(7); /* RWE */
17908-#endif
17909 note PT_NOTE FLAGS(0); /* ___ */
17910 }
17911
17912 SECTIONS
17913 {
17914 #ifdef CONFIG_X86_32
17915- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
17916- phys_startup_32 = startup_32 - LOAD_OFFSET;
17917+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
17918 #else
17919- . = __START_KERNEL;
17920- phys_startup_64 = startup_64 - LOAD_OFFSET;
17921+ . = __START_KERNEL;
17922 #endif
17923
17924 /* Text and read-only data */
17925- .text : AT(ADDR(.text) - LOAD_OFFSET) {
17926- _text = .;
17927+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
17928 /* bootstrapping code */
17929+#ifdef CONFIG_X86_32
17930+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17931+#else
17932+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17933+#endif
17934+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
17935+ _text = .;
17936 HEAD_TEXT
17937 #ifdef CONFIG_X86_32
17938 . = ALIGN(PAGE_SIZE);
17939@@ -82,28 +102,71 @@ SECTIONS
17940 IRQENTRY_TEXT
17941 *(.fixup)
17942 *(.gnu.warning)
17943- /* End of text section */
17944- _etext = .;
17945 } :text = 0x9090
17946
17947- NOTES :text :note
17948+ . += __KERNEL_TEXT_OFFSET;
17949+
17950+#ifdef CONFIG_X86_32
17951+ . = ALIGN(PAGE_SIZE);
17952+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
17953+ *(.vmi.rom)
17954+ } :module
17955+
17956+ . = ALIGN(PAGE_SIZE);
17957+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
17958+
17959+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
17960+ MODULES_EXEC_VADDR = .;
17961+ BYTE(0)
17962+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
17963+ . = ALIGN(HPAGE_SIZE);
17964+ MODULES_EXEC_END = . - 1;
17965+#endif
17966+
17967+ } :module
17968+#endif
17969
17970- EXCEPTION_TABLE(16) :text = 0x9090
17971+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
17972+ /* End of text section */
17973+ _etext = . - __KERNEL_TEXT_OFFSET;
17974+ }
17975+
17976+#ifdef CONFIG_X86_32
17977+ . = ALIGN(PAGE_SIZE);
17978+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
17979+ *(.idt)
17980+ . = ALIGN(PAGE_SIZE);
17981+ *(.empty_zero_page)
17982+ *(.swapper_pg_fixmap)
17983+ *(.swapper_pg_pmd)
17984+ *(.swapper_pg_dir)
17985+ *(.trampoline_pg_dir)
17986+ } :rodata
17987+#endif
17988+
17989+ . = ALIGN(PAGE_SIZE);
17990+ NOTES :rodata :note
17991+
17992+ EXCEPTION_TABLE(16) :rodata
17993
17994 RO_DATA(PAGE_SIZE)
17995
17996 /* Data */
17997 .data : AT(ADDR(.data) - LOAD_OFFSET) {
17998+
17999+#ifdef CONFIG_PAX_KERNEXEC
18000+ . = ALIGN(HPAGE_SIZE);
18001+#else
18002+ . = ALIGN(PAGE_SIZE);
18003+#endif
18004+
18005 /* Start of data section */
18006 _sdata = .;
18007
18008 /* init_task */
18009 INIT_TASK_DATA(THREAD_SIZE)
18010
18011-#ifdef CONFIG_X86_32
18012- /* 32 bit has nosave before _edata */
18013 NOSAVE_DATA
18014-#endif
18015
18016 PAGE_ALIGNED_DATA(PAGE_SIZE)
18017
18018@@ -112,6 +175,8 @@ SECTIONS
18019 DATA_DATA
18020 CONSTRUCTORS
18021
18022+ jiffies = jiffies_64;
18023+
18024 /* rarely changed data like cpu maps */
18025 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18026
18027@@ -166,12 +231,6 @@ SECTIONS
18028 }
18029 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18030
18031- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18032- .jiffies : AT(VLOAD(.jiffies)) {
18033- *(.jiffies)
18034- }
18035- jiffies = VVIRT(.jiffies);
18036-
18037 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18038 *(.vsyscall_3)
18039 }
18040@@ -187,12 +246,19 @@ SECTIONS
18041 #endif /* CONFIG_X86_64 */
18042
18043 /* Init code and data - will be freed after init */
18044- . = ALIGN(PAGE_SIZE);
18045 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18046+ BYTE(0)
18047+
18048+#ifdef CONFIG_PAX_KERNEXEC
18049+ . = ALIGN(HPAGE_SIZE);
18050+#else
18051+ . = ALIGN(PAGE_SIZE);
18052+#endif
18053+
18054 __init_begin = .; /* paired with __init_end */
18055- }
18056+ } :init.begin
18057
18058-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18059+#ifdef CONFIG_SMP
18060 /*
18061 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18062 * output PHDR, so the next output section - .init.text - should
18063@@ -201,12 +267,27 @@ SECTIONS
18064 PERCPU_VADDR(0, :percpu)
18065 #endif
18066
18067- INIT_TEXT_SECTION(PAGE_SIZE)
18068-#ifdef CONFIG_X86_64
18069- :init
18070-#endif
18071+ . = ALIGN(PAGE_SIZE);
18072+ init_begin = .;
18073+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18074+ VMLINUX_SYMBOL(_sinittext) = .;
18075+ INIT_TEXT
18076+ VMLINUX_SYMBOL(_einittext) = .;
18077+ . = ALIGN(PAGE_SIZE);
18078+ } :text.init
18079
18080- INIT_DATA_SECTION(16)
18081+ /*
18082+ * .exit.text is discard at runtime, not link time, to deal with
18083+ * references from .altinstructions and .eh_frame
18084+ */
18085+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18086+ EXIT_TEXT
18087+ . = ALIGN(16);
18088+ } :text.exit
18089+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18090+
18091+ . = ALIGN(PAGE_SIZE);
18092+ INIT_DATA_SECTION(16) :init
18093
18094 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18095 __x86_cpu_dev_start = .;
18096@@ -232,19 +313,11 @@ SECTIONS
18097 *(.altinstr_replacement)
18098 }
18099
18100- /*
18101- * .exit.text is discard at runtime, not link time, to deal with
18102- * references from .altinstructions and .eh_frame
18103- */
18104- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18105- EXIT_TEXT
18106- }
18107-
18108 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18109 EXIT_DATA
18110 }
18111
18112-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18113+#ifndef CONFIG_SMP
18114 PERCPU(PAGE_SIZE)
18115 #endif
18116
18117@@ -267,12 +340,6 @@ SECTIONS
18118 . = ALIGN(PAGE_SIZE);
18119 }
18120
18121-#ifdef CONFIG_X86_64
18122- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18123- NOSAVE_DATA
18124- }
18125-#endif
18126-
18127 /* BSS */
18128 . = ALIGN(PAGE_SIZE);
18129 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18130@@ -288,6 +355,7 @@ SECTIONS
18131 __brk_base = .;
18132 . += 64 * 1024; /* 64k alignment slop space */
18133 *(.brk_reservation) /* areas brk users have reserved */
18134+ . = ALIGN(HPAGE_SIZE);
18135 __brk_limit = .;
18136 }
18137
18138@@ -316,13 +384,12 @@ SECTIONS
18139 * for the boot processor.
18140 */
18141 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18142-INIT_PER_CPU(gdt_page);
18143 INIT_PER_CPU(irq_stack_union);
18144
18145 /*
18146 * Build-time check on the image size:
18147 */
18148-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18149+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18150 "kernel image bigger than KERNEL_IMAGE_SIZE");
18151
18152 #ifdef CONFIG_SMP
18153diff -urNp linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c
18154--- linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18155+++ linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18156@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18157
18158 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18159 /* copy vsyscall data */
18160+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18161 vsyscall_gtod_data.clock.vread = clock->vread;
18162 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18163 vsyscall_gtod_data.clock.mask = clock->mask;
18164@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18165 We do this here because otherwise user space would do it on
18166 its own in a likely inferior way (no access to jiffies).
18167 If you don't like it pass NULL. */
18168- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18169+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18170 p = tcache->blob[1];
18171 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18172 /* Load per CPU data from RDTSCP */
18173diff -urNp linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c
18174--- linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18175+++ linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18176@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18177
18178 EXPORT_SYMBOL(copy_user_generic);
18179 EXPORT_SYMBOL(__copy_user_nocache);
18180-EXPORT_SYMBOL(copy_from_user);
18181-EXPORT_SYMBOL(copy_to_user);
18182 EXPORT_SYMBOL(__copy_from_user_inatomic);
18183
18184 EXPORT_SYMBOL(copy_page);
18185diff -urNp linux-2.6.32.43/arch/x86/kernel/xsave.c linux-2.6.32.43/arch/x86/kernel/xsave.c
18186--- linux-2.6.32.43/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18187+++ linux-2.6.32.43/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18188@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18189 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18190 return -1;
18191
18192- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18193+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18194 fx_sw_user->extended_size -
18195 FP_XSTATE_MAGIC2_SIZE));
18196 /*
18197@@ -196,7 +196,7 @@ fx_only:
18198 * the other extended state.
18199 */
18200 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18201- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18202+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18203 }
18204
18205 /*
18206@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18207 if (task_thread_info(tsk)->status & TS_XSAVE)
18208 err = restore_user_xstate(buf);
18209 else
18210- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18211+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18212 buf);
18213 if (unlikely(err)) {
18214 /*
18215diff -urNp linux-2.6.32.43/arch/x86/kvm/emulate.c linux-2.6.32.43/arch/x86/kvm/emulate.c
18216--- linux-2.6.32.43/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18217+++ linux-2.6.32.43/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18218@@ -81,8 +81,8 @@
18219 #define Src2CL (1<<29)
18220 #define Src2ImmByte (2<<29)
18221 #define Src2One (3<<29)
18222-#define Src2Imm16 (4<<29)
18223-#define Src2Mask (7<<29)
18224+#define Src2Imm16 (4U<<29)
18225+#define Src2Mask (7U<<29)
18226
18227 enum {
18228 Group1_80, Group1_81, Group1_82, Group1_83,
18229@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18230
18231 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18232 do { \
18233+ unsigned long _tmp; \
18234 __asm__ __volatile__ ( \
18235 _PRE_EFLAGS("0", "4", "2") \
18236 _op _suffix " %"_x"3,%1; " \
18237@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18238 /* Raw emulation: instruction has two explicit operands. */
18239 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18240 do { \
18241- unsigned long _tmp; \
18242- \
18243 switch ((_dst).bytes) { \
18244 case 2: \
18245 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18246@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18247
18248 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18249 do { \
18250- unsigned long _tmp; \
18251 switch ((_dst).bytes) { \
18252 case 1: \
18253 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18254diff -urNp linux-2.6.32.43/arch/x86/kvm/lapic.c linux-2.6.32.43/arch/x86/kvm/lapic.c
18255--- linux-2.6.32.43/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18256+++ linux-2.6.32.43/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18257@@ -52,7 +52,7 @@
18258 #define APIC_BUS_CYCLE_NS 1
18259
18260 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18261-#define apic_debug(fmt, arg...)
18262+#define apic_debug(fmt, arg...) do {} while (0)
18263
18264 #define APIC_LVT_NUM 6
18265 /* 14 is the version for Xeon and Pentium 8.4.8*/
18266diff -urNp linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h
18267--- linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18268+++ linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18269@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18270 int level = PT_PAGE_TABLE_LEVEL;
18271 unsigned long mmu_seq;
18272
18273+ pax_track_stack();
18274+
18275 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18276 kvm_mmu_audit(vcpu, "pre page fault");
18277
18278diff -urNp linux-2.6.32.43/arch/x86/kvm/svm.c linux-2.6.32.43/arch/x86/kvm/svm.c
18279--- linux-2.6.32.43/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18280+++ linux-2.6.32.43/arch/x86/kvm/svm.c 2011-04-17 15:56:46.000000000 -0400
18281@@ -2483,9 +2483,12 @@ static int handle_exit(struct kvm_run *k
18282 static void reload_tss(struct kvm_vcpu *vcpu)
18283 {
18284 int cpu = raw_smp_processor_id();
18285-
18286 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18287+
18288+ pax_open_kernel();
18289 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18290+ pax_close_kernel();
18291+
18292 load_TR_desc();
18293 }
18294
18295@@ -2946,7 +2949,7 @@ static bool svm_gb_page_enable(void)
18296 return true;
18297 }
18298
18299-static struct kvm_x86_ops svm_x86_ops = {
18300+static const struct kvm_x86_ops svm_x86_ops = {
18301 .cpu_has_kvm_support = has_svm,
18302 .disabled_by_bios = is_disabled,
18303 .hardware_setup = svm_hardware_setup,
18304diff -urNp linux-2.6.32.43/arch/x86/kvm/vmx.c linux-2.6.32.43/arch/x86/kvm/vmx.c
18305--- linux-2.6.32.43/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18306+++ linux-2.6.32.43/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18307@@ -570,7 +570,11 @@ static void reload_tss(void)
18308
18309 kvm_get_gdt(&gdt);
18310 descs = (void *)gdt.base;
18311+
18312+ pax_open_kernel();
18313 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18314+ pax_close_kernel();
18315+
18316 load_TR_desc();
18317 }
18318
18319@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18320 if (!cpu_has_vmx_flexpriority())
18321 flexpriority_enabled = 0;
18322
18323- if (!cpu_has_vmx_tpr_shadow())
18324- kvm_x86_ops->update_cr8_intercept = NULL;
18325+ if (!cpu_has_vmx_tpr_shadow()) {
18326+ pax_open_kernel();
18327+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18328+ pax_close_kernel();
18329+ }
18330
18331 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18332 kvm_disable_largepages();
18333@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18334 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18335
18336 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18337- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18338+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18339 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18340 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18341 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18342@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18343 "jmp .Lkvm_vmx_return \n\t"
18344 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18345 ".Lkvm_vmx_return: "
18346+
18347+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18348+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18349+ ".Lkvm_vmx_return2: "
18350+#endif
18351+
18352 /* Save guest registers, load host registers, keep flags */
18353 "xchg %0, (%%"R"sp) \n\t"
18354 "mov %%"R"ax, %c[rax](%0) \n\t"
18355@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18356 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18357 #endif
18358 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18359+
18360+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18361+ ,[cs]"i"(__KERNEL_CS)
18362+#endif
18363+
18364 : "cc", "memory"
18365- , R"bx", R"di", R"si"
18366+ , R"ax", R"bx", R"di", R"si"
18367 #ifdef CONFIG_X86_64
18368 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18369 #endif
18370@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18371 if (vmx->rmode.irq.pending)
18372 fixup_rmode_irq(vmx);
18373
18374- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18375+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18376+
18377+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18378+ loadsegment(fs, __KERNEL_PERCPU);
18379+#endif
18380+
18381+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18382+ __set_fs(current_thread_info()->addr_limit);
18383+#endif
18384+
18385 vmx->launched = 1;
18386
18387 vmx_complete_interrupts(vmx);
18388@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18389 return false;
18390 }
18391
18392-static struct kvm_x86_ops vmx_x86_ops = {
18393+static const struct kvm_x86_ops vmx_x86_ops = {
18394 .cpu_has_kvm_support = cpu_has_kvm_support,
18395 .disabled_by_bios = vmx_disabled_by_bios,
18396 .hardware_setup = hardware_setup,
18397diff -urNp linux-2.6.32.43/arch/x86/kvm/x86.c linux-2.6.32.43/arch/x86/kvm/x86.c
18398--- linux-2.6.32.43/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18399+++ linux-2.6.32.43/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18400@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18401 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18402 struct kvm_cpuid_entry2 __user *entries);
18403
18404-struct kvm_x86_ops *kvm_x86_ops;
18405+const struct kvm_x86_ops *kvm_x86_ops;
18406 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18407
18408 int ignore_msrs = 0;
18409@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18410 struct kvm_cpuid2 *cpuid,
18411 struct kvm_cpuid_entry2 __user *entries)
18412 {
18413- int r;
18414+ int r, i;
18415
18416 r = -E2BIG;
18417 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18418 goto out;
18419 r = -EFAULT;
18420- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18421- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18422+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18423 goto out;
18424+ for (i = 0; i < cpuid->nent; ++i) {
18425+ struct kvm_cpuid_entry2 cpuid_entry;
18426+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18427+ goto out;
18428+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18429+ }
18430 vcpu->arch.cpuid_nent = cpuid->nent;
18431 kvm_apic_set_version(vcpu);
18432 return 0;
18433@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18434 struct kvm_cpuid2 *cpuid,
18435 struct kvm_cpuid_entry2 __user *entries)
18436 {
18437- int r;
18438+ int r, i;
18439
18440 vcpu_load(vcpu);
18441 r = -E2BIG;
18442 if (cpuid->nent < vcpu->arch.cpuid_nent)
18443 goto out;
18444 r = -EFAULT;
18445- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18446- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18447+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18448 goto out;
18449+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18450+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18451+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18452+ goto out;
18453+ }
18454 return 0;
18455
18456 out:
18457@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18458 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18459 struct kvm_interrupt *irq)
18460 {
18461- if (irq->irq < 0 || irq->irq >= 256)
18462+ if (irq->irq >= 256)
18463 return -EINVAL;
18464 if (irqchip_in_kernel(vcpu->kvm))
18465 return -ENXIO;
18466@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18467 .notifier_call = kvmclock_cpufreq_notifier
18468 };
18469
18470-int kvm_arch_init(void *opaque)
18471+int kvm_arch_init(const void *opaque)
18472 {
18473 int r, cpu;
18474- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18475+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18476
18477 if (kvm_x86_ops) {
18478 printk(KERN_ERR "kvm: already loaded the other module\n");
18479diff -urNp linux-2.6.32.43/arch/x86/lib/atomic64_32.c linux-2.6.32.43/arch/x86/lib/atomic64_32.c
18480--- linux-2.6.32.43/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18481+++ linux-2.6.32.43/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18482@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18483 }
18484 EXPORT_SYMBOL(atomic64_cmpxchg);
18485
18486+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18487+{
18488+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18489+}
18490+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18491+
18492 /**
18493 * atomic64_xchg - xchg atomic64 variable
18494 * @ptr: pointer to type atomic64_t
18495@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18496 EXPORT_SYMBOL(atomic64_xchg);
18497
18498 /**
18499+ * atomic64_xchg_unchecked - xchg atomic64 variable
18500+ * @ptr: pointer to type atomic64_unchecked_t
18501+ * @new_val: value to assign
18502+ *
18503+ * Atomically xchgs the value of @ptr to @new_val and returns
18504+ * the old value.
18505+ */
18506+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18507+{
18508+ /*
18509+ * Try first with a (possibly incorrect) assumption about
18510+ * what we have there. We'll do two loops most likely,
18511+ * but we'll get an ownership MESI transaction straight away
18512+ * instead of a read transaction followed by a
18513+ * flush-for-ownership transaction:
18514+ */
18515+ u64 old_val, real_val = 0;
18516+
18517+ do {
18518+ old_val = real_val;
18519+
18520+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18521+
18522+ } while (real_val != old_val);
18523+
18524+ return old_val;
18525+}
18526+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18527+
18528+/**
18529 * atomic64_set - set atomic64 variable
18530 * @ptr: pointer to type atomic64_t
18531 * @new_val: value to assign
18532@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18533 EXPORT_SYMBOL(atomic64_set);
18534
18535 /**
18536-EXPORT_SYMBOL(atomic64_read);
18537+ * atomic64_unchecked_set - set atomic64 variable
18538+ * @ptr: pointer to type atomic64_unchecked_t
18539+ * @new_val: value to assign
18540+ *
18541+ * Atomically sets the value of @ptr to @new_val.
18542+ */
18543+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18544+{
18545+ atomic64_xchg_unchecked(ptr, new_val);
18546+}
18547+EXPORT_SYMBOL(atomic64_set_unchecked);
18548+
18549+/**
18550 * atomic64_add_return - add and return
18551 * @delta: integer value to add
18552 * @ptr: pointer to type atomic64_t
18553@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18554 }
18555 EXPORT_SYMBOL(atomic64_add_return);
18556
18557+/**
18558+ * atomic64_add_return_unchecked - add and return
18559+ * @delta: integer value to add
18560+ * @ptr: pointer to type atomic64_unchecked_t
18561+ *
18562+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18563+ */
18564+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18565+{
18566+ /*
18567+ * Try first with a (possibly incorrect) assumption about
18568+ * what we have there. We'll do two loops most likely,
18569+ * but we'll get an ownership MESI transaction straight away
18570+ * instead of a read transaction followed by a
18571+ * flush-for-ownership transaction:
18572+ */
18573+ u64 old_val, new_val, real_val = 0;
18574+
18575+ do {
18576+ old_val = real_val;
18577+ new_val = old_val + delta;
18578+
18579+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18580+
18581+ } while (real_val != old_val);
18582+
18583+ return new_val;
18584+}
18585+EXPORT_SYMBOL(atomic64_add_return_unchecked);
18586+
18587 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18588 {
18589 return atomic64_add_return(-delta, ptr);
18590 }
18591 EXPORT_SYMBOL(atomic64_sub_return);
18592
18593+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18594+{
18595+ return atomic64_add_return_unchecked(-delta, ptr);
18596+}
18597+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18598+
18599 u64 atomic64_inc_return(atomic64_t *ptr)
18600 {
18601 return atomic64_add_return(1, ptr);
18602 }
18603 EXPORT_SYMBOL(atomic64_inc_return);
18604
18605+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18606+{
18607+ return atomic64_add_return_unchecked(1, ptr);
18608+}
18609+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18610+
18611 u64 atomic64_dec_return(atomic64_t *ptr)
18612 {
18613 return atomic64_sub_return(1, ptr);
18614 }
18615 EXPORT_SYMBOL(atomic64_dec_return);
18616
18617+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18618+{
18619+ return atomic64_sub_return_unchecked(1, ptr);
18620+}
18621+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18622+
18623 /**
18624 * atomic64_add - add integer to atomic64 variable
18625 * @delta: integer value to add
18626@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18627 EXPORT_SYMBOL(atomic64_add);
18628
18629 /**
18630+ * atomic64_add_unchecked - add integer to atomic64 variable
18631+ * @delta: integer value to add
18632+ * @ptr: pointer to type atomic64_unchecked_t
18633+ *
18634+ * Atomically adds @delta to @ptr.
18635+ */
18636+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18637+{
18638+ atomic64_add_return_unchecked(delta, ptr);
18639+}
18640+EXPORT_SYMBOL(atomic64_add_unchecked);
18641+
18642+/**
18643 * atomic64_sub - subtract the atomic64 variable
18644 * @delta: integer value to subtract
18645 * @ptr: pointer to type atomic64_t
18646@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18647 EXPORT_SYMBOL(atomic64_sub);
18648
18649 /**
18650+ * atomic64_sub_unchecked - subtract the atomic64 variable
18651+ * @delta: integer value to subtract
18652+ * @ptr: pointer to type atomic64_unchecked_t
18653+ *
18654+ * Atomically subtracts @delta from @ptr.
18655+ */
18656+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18657+{
18658+ atomic64_add_unchecked(-delta, ptr);
18659+}
18660+EXPORT_SYMBOL(atomic64_sub_unchecked);
18661+
18662+/**
18663 * atomic64_sub_and_test - subtract value from variable and test result
18664 * @delta: integer value to subtract
18665 * @ptr: pointer to type atomic64_t
18666@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18667 EXPORT_SYMBOL(atomic64_inc);
18668
18669 /**
18670+ * atomic64_inc_unchecked - increment atomic64 variable
18671+ * @ptr: pointer to type atomic64_unchecked_t
18672+ *
18673+ * Atomically increments @ptr by 1.
18674+ */
18675+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18676+{
18677+ atomic64_add_unchecked(1, ptr);
18678+}
18679+EXPORT_SYMBOL(atomic64_inc_unchecked);
18680+
18681+/**
18682 * atomic64_dec - decrement atomic64 variable
18683 * @ptr: pointer to type atomic64_t
18684 *
18685@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18686 EXPORT_SYMBOL(atomic64_dec);
18687
18688 /**
18689+ * atomic64_dec_unchecked - decrement atomic64 variable
18690+ * @ptr: pointer to type atomic64_unchecked_t
18691+ *
18692+ * Atomically decrements @ptr by 1.
18693+ */
18694+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18695+{
18696+ atomic64_sub_unchecked(1, ptr);
18697+}
18698+EXPORT_SYMBOL(atomic64_dec_unchecked);
18699+
18700+/**
18701 * atomic64_dec_and_test - decrement and test
18702 * @ptr: pointer to type atomic64_t
18703 *
18704diff -urNp linux-2.6.32.43/arch/x86/lib/checksum_32.S linux-2.6.32.43/arch/x86/lib/checksum_32.S
18705--- linux-2.6.32.43/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18706+++ linux-2.6.32.43/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18707@@ -28,7 +28,8 @@
18708 #include <linux/linkage.h>
18709 #include <asm/dwarf2.h>
18710 #include <asm/errno.h>
18711-
18712+#include <asm/segment.h>
18713+
18714 /*
18715 * computes a partial checksum, e.g. for TCP/UDP fragments
18716 */
18717@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18718
18719 #define ARGBASE 16
18720 #define FP 12
18721-
18722-ENTRY(csum_partial_copy_generic)
18723+
18724+ENTRY(csum_partial_copy_generic_to_user)
18725 CFI_STARTPROC
18726+
18727+#ifdef CONFIG_PAX_MEMORY_UDEREF
18728+ pushl %gs
18729+ CFI_ADJUST_CFA_OFFSET 4
18730+ popl %es
18731+ CFI_ADJUST_CFA_OFFSET -4
18732+ jmp csum_partial_copy_generic
18733+#endif
18734+
18735+ENTRY(csum_partial_copy_generic_from_user)
18736+
18737+#ifdef CONFIG_PAX_MEMORY_UDEREF
18738+ pushl %gs
18739+ CFI_ADJUST_CFA_OFFSET 4
18740+ popl %ds
18741+ CFI_ADJUST_CFA_OFFSET -4
18742+#endif
18743+
18744+ENTRY(csum_partial_copy_generic)
18745 subl $4,%esp
18746 CFI_ADJUST_CFA_OFFSET 4
18747 pushl %edi
18748@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18749 jmp 4f
18750 SRC(1: movw (%esi), %bx )
18751 addl $2, %esi
18752-DST( movw %bx, (%edi) )
18753+DST( movw %bx, %es:(%edi) )
18754 addl $2, %edi
18755 addw %bx, %ax
18756 adcl $0, %eax
18757@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18758 SRC(1: movl (%esi), %ebx )
18759 SRC( movl 4(%esi), %edx )
18760 adcl %ebx, %eax
18761-DST( movl %ebx, (%edi) )
18762+DST( movl %ebx, %es:(%edi) )
18763 adcl %edx, %eax
18764-DST( movl %edx, 4(%edi) )
18765+DST( movl %edx, %es:4(%edi) )
18766
18767 SRC( movl 8(%esi), %ebx )
18768 SRC( movl 12(%esi), %edx )
18769 adcl %ebx, %eax
18770-DST( movl %ebx, 8(%edi) )
18771+DST( movl %ebx, %es:8(%edi) )
18772 adcl %edx, %eax
18773-DST( movl %edx, 12(%edi) )
18774+DST( movl %edx, %es:12(%edi) )
18775
18776 SRC( movl 16(%esi), %ebx )
18777 SRC( movl 20(%esi), %edx )
18778 adcl %ebx, %eax
18779-DST( movl %ebx, 16(%edi) )
18780+DST( movl %ebx, %es:16(%edi) )
18781 adcl %edx, %eax
18782-DST( movl %edx, 20(%edi) )
18783+DST( movl %edx, %es:20(%edi) )
18784
18785 SRC( movl 24(%esi), %ebx )
18786 SRC( movl 28(%esi), %edx )
18787 adcl %ebx, %eax
18788-DST( movl %ebx, 24(%edi) )
18789+DST( movl %ebx, %es:24(%edi) )
18790 adcl %edx, %eax
18791-DST( movl %edx, 28(%edi) )
18792+DST( movl %edx, %es:28(%edi) )
18793
18794 lea 32(%esi), %esi
18795 lea 32(%edi), %edi
18796@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
18797 shrl $2, %edx # This clears CF
18798 SRC(3: movl (%esi), %ebx )
18799 adcl %ebx, %eax
18800-DST( movl %ebx, (%edi) )
18801+DST( movl %ebx, %es:(%edi) )
18802 lea 4(%esi), %esi
18803 lea 4(%edi), %edi
18804 dec %edx
18805@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
18806 jb 5f
18807 SRC( movw (%esi), %cx )
18808 leal 2(%esi), %esi
18809-DST( movw %cx, (%edi) )
18810+DST( movw %cx, %es:(%edi) )
18811 leal 2(%edi), %edi
18812 je 6f
18813 shll $16,%ecx
18814 SRC(5: movb (%esi), %cl )
18815-DST( movb %cl, (%edi) )
18816+DST( movb %cl, %es:(%edi) )
18817 6: addl %ecx, %eax
18818 adcl $0, %eax
18819 7:
18820@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
18821
18822 6001:
18823 movl ARGBASE+20(%esp), %ebx # src_err_ptr
18824- movl $-EFAULT, (%ebx)
18825+ movl $-EFAULT, %ss:(%ebx)
18826
18827 # zero the complete destination - computing the rest
18828 # is too much work
18829@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
18830
18831 6002:
18832 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18833- movl $-EFAULT,(%ebx)
18834+ movl $-EFAULT,%ss:(%ebx)
18835 jmp 5000b
18836
18837 .previous
18838
18839+ pushl %ss
18840+ CFI_ADJUST_CFA_OFFSET 4
18841+ popl %ds
18842+ CFI_ADJUST_CFA_OFFSET -4
18843+ pushl %ss
18844+ CFI_ADJUST_CFA_OFFSET 4
18845+ popl %es
18846+ CFI_ADJUST_CFA_OFFSET -4
18847 popl %ebx
18848 CFI_ADJUST_CFA_OFFSET -4
18849 CFI_RESTORE ebx
18850@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
18851 CFI_ADJUST_CFA_OFFSET -4
18852 ret
18853 CFI_ENDPROC
18854-ENDPROC(csum_partial_copy_generic)
18855+ENDPROC(csum_partial_copy_generic_to_user)
18856
18857 #else
18858
18859 /* Version for PentiumII/PPro */
18860
18861 #define ROUND1(x) \
18862+ nop; nop; nop; \
18863 SRC(movl x(%esi), %ebx ) ; \
18864 addl %ebx, %eax ; \
18865- DST(movl %ebx, x(%edi) ) ;
18866+ DST(movl %ebx, %es:x(%edi)) ;
18867
18868 #define ROUND(x) \
18869+ nop; nop; nop; \
18870 SRC(movl x(%esi), %ebx ) ; \
18871 adcl %ebx, %eax ; \
18872- DST(movl %ebx, x(%edi) ) ;
18873+ DST(movl %ebx, %es:x(%edi)) ;
18874
18875 #define ARGBASE 12
18876-
18877-ENTRY(csum_partial_copy_generic)
18878+
18879+ENTRY(csum_partial_copy_generic_to_user)
18880 CFI_STARTPROC
18881+
18882+#ifdef CONFIG_PAX_MEMORY_UDEREF
18883+ pushl %gs
18884+ CFI_ADJUST_CFA_OFFSET 4
18885+ popl %es
18886+ CFI_ADJUST_CFA_OFFSET -4
18887+ jmp csum_partial_copy_generic
18888+#endif
18889+
18890+ENTRY(csum_partial_copy_generic_from_user)
18891+
18892+#ifdef CONFIG_PAX_MEMORY_UDEREF
18893+ pushl %gs
18894+ CFI_ADJUST_CFA_OFFSET 4
18895+ popl %ds
18896+ CFI_ADJUST_CFA_OFFSET -4
18897+#endif
18898+
18899+ENTRY(csum_partial_copy_generic)
18900 pushl %ebx
18901 CFI_ADJUST_CFA_OFFSET 4
18902 CFI_REL_OFFSET ebx, 0
18903@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
18904 subl %ebx, %edi
18905 lea -1(%esi),%edx
18906 andl $-32,%edx
18907- lea 3f(%ebx,%ebx), %ebx
18908+ lea 3f(%ebx,%ebx,2), %ebx
18909 testl %esi, %esi
18910 jmp *%ebx
18911 1: addl $64,%esi
18912@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
18913 jb 5f
18914 SRC( movw (%esi), %dx )
18915 leal 2(%esi), %esi
18916-DST( movw %dx, (%edi) )
18917+DST( movw %dx, %es:(%edi) )
18918 leal 2(%edi), %edi
18919 je 6f
18920 shll $16,%edx
18921 5:
18922 SRC( movb (%esi), %dl )
18923-DST( movb %dl, (%edi) )
18924+DST( movb %dl, %es:(%edi) )
18925 6: addl %edx, %eax
18926 adcl $0, %eax
18927 7:
18928 .section .fixup, "ax"
18929 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
18930- movl $-EFAULT, (%ebx)
18931+ movl $-EFAULT, %ss:(%ebx)
18932 # zero the complete destination (computing the rest is too much work)
18933 movl ARGBASE+8(%esp),%edi # dst
18934 movl ARGBASE+12(%esp),%ecx # len
18935@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
18936 rep; stosb
18937 jmp 7b
18938 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
18939- movl $-EFAULT, (%ebx)
18940+ movl $-EFAULT, %ss:(%ebx)
18941 jmp 7b
18942 .previous
18943
18944+#ifdef CONFIG_PAX_MEMORY_UDEREF
18945+ pushl %ss
18946+ CFI_ADJUST_CFA_OFFSET 4
18947+ popl %ds
18948+ CFI_ADJUST_CFA_OFFSET -4
18949+ pushl %ss
18950+ CFI_ADJUST_CFA_OFFSET 4
18951+ popl %es
18952+ CFI_ADJUST_CFA_OFFSET -4
18953+#endif
18954+
18955 popl %esi
18956 CFI_ADJUST_CFA_OFFSET -4
18957 CFI_RESTORE esi
18958@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
18959 CFI_RESTORE ebx
18960 ret
18961 CFI_ENDPROC
18962-ENDPROC(csum_partial_copy_generic)
18963+ENDPROC(csum_partial_copy_generic_to_user)
18964
18965 #undef ROUND
18966 #undef ROUND1
18967diff -urNp linux-2.6.32.43/arch/x86/lib/clear_page_64.S linux-2.6.32.43/arch/x86/lib/clear_page_64.S
18968--- linux-2.6.32.43/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
18969+++ linux-2.6.32.43/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
18970@@ -43,7 +43,7 @@ ENDPROC(clear_page)
18971
18972 #include <asm/cpufeature.h>
18973
18974- .section .altinstr_replacement,"ax"
18975+ .section .altinstr_replacement,"a"
18976 1: .byte 0xeb /* jmp <disp8> */
18977 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
18978 2:
18979diff -urNp linux-2.6.32.43/arch/x86/lib/copy_page_64.S linux-2.6.32.43/arch/x86/lib/copy_page_64.S
18980--- linux-2.6.32.43/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
18981+++ linux-2.6.32.43/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
18982@@ -104,7 +104,7 @@ ENDPROC(copy_page)
18983
18984 #include <asm/cpufeature.h>
18985
18986- .section .altinstr_replacement,"ax"
18987+ .section .altinstr_replacement,"a"
18988 1: .byte 0xeb /* jmp <disp8> */
18989 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
18990 2:
18991diff -urNp linux-2.6.32.43/arch/x86/lib/copy_user_64.S linux-2.6.32.43/arch/x86/lib/copy_user_64.S
18992--- linux-2.6.32.43/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
18993+++ linux-2.6.32.43/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
18994@@ -15,13 +15,14 @@
18995 #include <asm/asm-offsets.h>
18996 #include <asm/thread_info.h>
18997 #include <asm/cpufeature.h>
18998+#include <asm/pgtable.h>
18999
19000 .macro ALTERNATIVE_JUMP feature,orig,alt
19001 0:
19002 .byte 0xe9 /* 32bit jump */
19003 .long \orig-1f /* by default jump to orig */
19004 1:
19005- .section .altinstr_replacement,"ax"
19006+ .section .altinstr_replacement,"a"
19007 2: .byte 0xe9 /* near jump with 32bit immediate */
19008 .long \alt-1b /* offset */ /* or alternatively to alt */
19009 .previous
19010@@ -64,49 +65,19 @@
19011 #endif
19012 .endm
19013
19014-/* Standard copy_to_user with segment limit checking */
19015-ENTRY(copy_to_user)
19016- CFI_STARTPROC
19017- GET_THREAD_INFO(%rax)
19018- movq %rdi,%rcx
19019- addq %rdx,%rcx
19020- jc bad_to_user
19021- cmpq TI_addr_limit(%rax),%rcx
19022- ja bad_to_user
19023- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19024- CFI_ENDPROC
19025-ENDPROC(copy_to_user)
19026-
19027-/* Standard copy_from_user with segment limit checking */
19028-ENTRY(copy_from_user)
19029- CFI_STARTPROC
19030- GET_THREAD_INFO(%rax)
19031- movq %rsi,%rcx
19032- addq %rdx,%rcx
19033- jc bad_from_user
19034- cmpq TI_addr_limit(%rax),%rcx
19035- ja bad_from_user
19036- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19037- CFI_ENDPROC
19038-ENDPROC(copy_from_user)
19039-
19040 ENTRY(copy_user_generic)
19041 CFI_STARTPROC
19042 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19043 CFI_ENDPROC
19044 ENDPROC(copy_user_generic)
19045
19046-ENTRY(__copy_from_user_inatomic)
19047- CFI_STARTPROC
19048- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19049- CFI_ENDPROC
19050-ENDPROC(__copy_from_user_inatomic)
19051-
19052 .section .fixup,"ax"
19053 /* must zero dest */
19054 ENTRY(bad_from_user)
19055 bad_from_user:
19056 CFI_STARTPROC
19057+ testl %edx,%edx
19058+ js bad_to_user
19059 movl %edx,%ecx
19060 xorl %eax,%eax
19061 rep
19062diff -urNp linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S
19063--- linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19064+++ linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19065@@ -14,6 +14,7 @@
19066 #include <asm/current.h>
19067 #include <asm/asm-offsets.h>
19068 #include <asm/thread_info.h>
19069+#include <asm/pgtable.h>
19070
19071 .macro ALIGN_DESTINATION
19072 #ifdef FIX_ALIGNMENT
19073@@ -50,6 +51,15 @@
19074 */
19075 ENTRY(__copy_user_nocache)
19076 CFI_STARTPROC
19077+
19078+#ifdef CONFIG_PAX_MEMORY_UDEREF
19079+ mov $PAX_USER_SHADOW_BASE,%rcx
19080+ cmp %rcx,%rsi
19081+ jae 1f
19082+ add %rcx,%rsi
19083+1:
19084+#endif
19085+
19086 cmpl $8,%edx
19087 jb 20f /* less then 8 bytes, go to byte copy loop */
19088 ALIGN_DESTINATION
19089diff -urNp linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c
19090--- linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19091+++ linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19092@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19093 len -= 2;
19094 }
19095 }
19096+
19097+#ifdef CONFIG_PAX_MEMORY_UDEREF
19098+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19099+ src += PAX_USER_SHADOW_BASE;
19100+#endif
19101+
19102 isum = csum_partial_copy_generic((__force const void *)src,
19103 dst, len, isum, errp, NULL);
19104 if (unlikely(*errp))
19105@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19106 }
19107
19108 *errp = 0;
19109+
19110+#ifdef CONFIG_PAX_MEMORY_UDEREF
19111+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19112+ dst += PAX_USER_SHADOW_BASE;
19113+#endif
19114+
19115 return csum_partial_copy_generic(src, (void __force *)dst,
19116 len, isum, NULL, errp);
19117 }
19118diff -urNp linux-2.6.32.43/arch/x86/lib/getuser.S linux-2.6.32.43/arch/x86/lib/getuser.S
19119--- linux-2.6.32.43/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19120+++ linux-2.6.32.43/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19121@@ -33,14 +33,35 @@
19122 #include <asm/asm-offsets.h>
19123 #include <asm/thread_info.h>
19124 #include <asm/asm.h>
19125+#include <asm/segment.h>
19126+#include <asm/pgtable.h>
19127+
19128+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19129+#define __copyuser_seg gs;
19130+#else
19131+#define __copyuser_seg
19132+#endif
19133
19134 .text
19135 ENTRY(__get_user_1)
19136 CFI_STARTPROC
19137+
19138+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19139 GET_THREAD_INFO(%_ASM_DX)
19140 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19141 jae bad_get_user
19142-1: movzb (%_ASM_AX),%edx
19143+
19144+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19145+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19146+ cmp %_ASM_DX,%_ASM_AX
19147+ jae 1234f
19148+ add %_ASM_DX,%_ASM_AX
19149+1234:
19150+#endif
19151+
19152+#endif
19153+
19154+1: __copyuser_seg movzb (%_ASM_AX),%edx
19155 xor %eax,%eax
19156 ret
19157 CFI_ENDPROC
19158@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19159 ENTRY(__get_user_2)
19160 CFI_STARTPROC
19161 add $1,%_ASM_AX
19162+
19163+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19164 jc bad_get_user
19165 GET_THREAD_INFO(%_ASM_DX)
19166 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19167 jae bad_get_user
19168-2: movzwl -1(%_ASM_AX),%edx
19169+
19170+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19171+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19172+ cmp %_ASM_DX,%_ASM_AX
19173+ jae 1234f
19174+ add %_ASM_DX,%_ASM_AX
19175+1234:
19176+#endif
19177+
19178+#endif
19179+
19180+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19181 xor %eax,%eax
19182 ret
19183 CFI_ENDPROC
19184@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19185 ENTRY(__get_user_4)
19186 CFI_STARTPROC
19187 add $3,%_ASM_AX
19188+
19189+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19190 jc bad_get_user
19191 GET_THREAD_INFO(%_ASM_DX)
19192 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19193 jae bad_get_user
19194-3: mov -3(%_ASM_AX),%edx
19195+
19196+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19197+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19198+ cmp %_ASM_DX,%_ASM_AX
19199+ jae 1234f
19200+ add %_ASM_DX,%_ASM_AX
19201+1234:
19202+#endif
19203+
19204+#endif
19205+
19206+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19207 xor %eax,%eax
19208 ret
19209 CFI_ENDPROC
19210@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19211 GET_THREAD_INFO(%_ASM_DX)
19212 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19213 jae bad_get_user
19214+
19215+#ifdef CONFIG_PAX_MEMORY_UDEREF
19216+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19217+ cmp %_ASM_DX,%_ASM_AX
19218+ jae 1234f
19219+ add %_ASM_DX,%_ASM_AX
19220+1234:
19221+#endif
19222+
19223 4: movq -7(%_ASM_AX),%_ASM_DX
19224 xor %eax,%eax
19225 ret
19226diff -urNp linux-2.6.32.43/arch/x86/lib/memcpy_64.S linux-2.6.32.43/arch/x86/lib/memcpy_64.S
19227--- linux-2.6.32.43/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19228+++ linux-2.6.32.43/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19229@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19230 * It is also a lot simpler. Use this when possible:
19231 */
19232
19233- .section .altinstr_replacement, "ax"
19234+ .section .altinstr_replacement, "a"
19235 1: .byte 0xeb /* jmp <disp8> */
19236 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19237 2:
19238diff -urNp linux-2.6.32.43/arch/x86/lib/memset_64.S linux-2.6.32.43/arch/x86/lib/memset_64.S
19239--- linux-2.6.32.43/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19240+++ linux-2.6.32.43/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19241@@ -118,7 +118,7 @@ ENDPROC(__memset)
19242
19243 #include <asm/cpufeature.h>
19244
19245- .section .altinstr_replacement,"ax"
19246+ .section .altinstr_replacement,"a"
19247 1: .byte 0xeb /* jmp <disp8> */
19248 .byte (memset_c - memset) - (2f - 1b) /* offset */
19249 2:
19250diff -urNp linux-2.6.32.43/arch/x86/lib/mmx_32.c linux-2.6.32.43/arch/x86/lib/mmx_32.c
19251--- linux-2.6.32.43/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19252+++ linux-2.6.32.43/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19253@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19254 {
19255 void *p;
19256 int i;
19257+ unsigned long cr0;
19258
19259 if (unlikely(in_interrupt()))
19260 return __memcpy(to, from, len);
19261@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19262 kernel_fpu_begin();
19263
19264 __asm__ __volatile__ (
19265- "1: prefetch (%0)\n" /* This set is 28 bytes */
19266- " prefetch 64(%0)\n"
19267- " prefetch 128(%0)\n"
19268- " prefetch 192(%0)\n"
19269- " prefetch 256(%0)\n"
19270+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19271+ " prefetch 64(%1)\n"
19272+ " prefetch 128(%1)\n"
19273+ " prefetch 192(%1)\n"
19274+ " prefetch 256(%1)\n"
19275 "2: \n"
19276 ".section .fixup, \"ax\"\n"
19277- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19278+ "3: \n"
19279+
19280+#ifdef CONFIG_PAX_KERNEXEC
19281+ " movl %%cr0, %0\n"
19282+ " movl %0, %%eax\n"
19283+ " andl $0xFFFEFFFF, %%eax\n"
19284+ " movl %%eax, %%cr0\n"
19285+#endif
19286+
19287+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19288+
19289+#ifdef CONFIG_PAX_KERNEXEC
19290+ " movl %0, %%cr0\n"
19291+#endif
19292+
19293 " jmp 2b\n"
19294 ".previous\n"
19295 _ASM_EXTABLE(1b, 3b)
19296- : : "r" (from));
19297+ : "=&r" (cr0) : "r" (from) : "ax");
19298
19299 for ( ; i > 5; i--) {
19300 __asm__ __volatile__ (
19301- "1: prefetch 320(%0)\n"
19302- "2: movq (%0), %%mm0\n"
19303- " movq 8(%0), %%mm1\n"
19304- " movq 16(%0), %%mm2\n"
19305- " movq 24(%0), %%mm3\n"
19306- " movq %%mm0, (%1)\n"
19307- " movq %%mm1, 8(%1)\n"
19308- " movq %%mm2, 16(%1)\n"
19309- " movq %%mm3, 24(%1)\n"
19310- " movq 32(%0), %%mm0\n"
19311- " movq 40(%0), %%mm1\n"
19312- " movq 48(%0), %%mm2\n"
19313- " movq 56(%0), %%mm3\n"
19314- " movq %%mm0, 32(%1)\n"
19315- " movq %%mm1, 40(%1)\n"
19316- " movq %%mm2, 48(%1)\n"
19317- " movq %%mm3, 56(%1)\n"
19318+ "1: prefetch 320(%1)\n"
19319+ "2: movq (%1), %%mm0\n"
19320+ " movq 8(%1), %%mm1\n"
19321+ " movq 16(%1), %%mm2\n"
19322+ " movq 24(%1), %%mm3\n"
19323+ " movq %%mm0, (%2)\n"
19324+ " movq %%mm1, 8(%2)\n"
19325+ " movq %%mm2, 16(%2)\n"
19326+ " movq %%mm3, 24(%2)\n"
19327+ " movq 32(%1), %%mm0\n"
19328+ " movq 40(%1), %%mm1\n"
19329+ " movq 48(%1), %%mm2\n"
19330+ " movq 56(%1), %%mm3\n"
19331+ " movq %%mm0, 32(%2)\n"
19332+ " movq %%mm1, 40(%2)\n"
19333+ " movq %%mm2, 48(%2)\n"
19334+ " movq %%mm3, 56(%2)\n"
19335 ".section .fixup, \"ax\"\n"
19336- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19337+ "3:\n"
19338+
19339+#ifdef CONFIG_PAX_KERNEXEC
19340+ " movl %%cr0, %0\n"
19341+ " movl %0, %%eax\n"
19342+ " andl $0xFFFEFFFF, %%eax\n"
19343+ " movl %%eax, %%cr0\n"
19344+#endif
19345+
19346+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19347+
19348+#ifdef CONFIG_PAX_KERNEXEC
19349+ " movl %0, %%cr0\n"
19350+#endif
19351+
19352 " jmp 2b\n"
19353 ".previous\n"
19354 _ASM_EXTABLE(1b, 3b)
19355- : : "r" (from), "r" (to) : "memory");
19356+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19357
19358 from += 64;
19359 to += 64;
19360@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19361 static void fast_copy_page(void *to, void *from)
19362 {
19363 int i;
19364+ unsigned long cr0;
19365
19366 kernel_fpu_begin();
19367
19368@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19369 * but that is for later. -AV
19370 */
19371 __asm__ __volatile__(
19372- "1: prefetch (%0)\n"
19373- " prefetch 64(%0)\n"
19374- " prefetch 128(%0)\n"
19375- " prefetch 192(%0)\n"
19376- " prefetch 256(%0)\n"
19377+ "1: prefetch (%1)\n"
19378+ " prefetch 64(%1)\n"
19379+ " prefetch 128(%1)\n"
19380+ " prefetch 192(%1)\n"
19381+ " prefetch 256(%1)\n"
19382 "2: \n"
19383 ".section .fixup, \"ax\"\n"
19384- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19385+ "3: \n"
19386+
19387+#ifdef CONFIG_PAX_KERNEXEC
19388+ " movl %%cr0, %0\n"
19389+ " movl %0, %%eax\n"
19390+ " andl $0xFFFEFFFF, %%eax\n"
19391+ " movl %%eax, %%cr0\n"
19392+#endif
19393+
19394+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19395+
19396+#ifdef CONFIG_PAX_KERNEXEC
19397+ " movl %0, %%cr0\n"
19398+#endif
19399+
19400 " jmp 2b\n"
19401 ".previous\n"
19402- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19403+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19404
19405 for (i = 0; i < (4096-320)/64; i++) {
19406 __asm__ __volatile__ (
19407- "1: prefetch 320(%0)\n"
19408- "2: movq (%0), %%mm0\n"
19409- " movntq %%mm0, (%1)\n"
19410- " movq 8(%0), %%mm1\n"
19411- " movntq %%mm1, 8(%1)\n"
19412- " movq 16(%0), %%mm2\n"
19413- " movntq %%mm2, 16(%1)\n"
19414- " movq 24(%0), %%mm3\n"
19415- " movntq %%mm3, 24(%1)\n"
19416- " movq 32(%0), %%mm4\n"
19417- " movntq %%mm4, 32(%1)\n"
19418- " movq 40(%0), %%mm5\n"
19419- " movntq %%mm5, 40(%1)\n"
19420- " movq 48(%0), %%mm6\n"
19421- " movntq %%mm6, 48(%1)\n"
19422- " movq 56(%0), %%mm7\n"
19423- " movntq %%mm7, 56(%1)\n"
19424+ "1: prefetch 320(%1)\n"
19425+ "2: movq (%1), %%mm0\n"
19426+ " movntq %%mm0, (%2)\n"
19427+ " movq 8(%1), %%mm1\n"
19428+ " movntq %%mm1, 8(%2)\n"
19429+ " movq 16(%1), %%mm2\n"
19430+ " movntq %%mm2, 16(%2)\n"
19431+ " movq 24(%1), %%mm3\n"
19432+ " movntq %%mm3, 24(%2)\n"
19433+ " movq 32(%1), %%mm4\n"
19434+ " movntq %%mm4, 32(%2)\n"
19435+ " movq 40(%1), %%mm5\n"
19436+ " movntq %%mm5, 40(%2)\n"
19437+ " movq 48(%1), %%mm6\n"
19438+ " movntq %%mm6, 48(%2)\n"
19439+ " movq 56(%1), %%mm7\n"
19440+ " movntq %%mm7, 56(%2)\n"
19441 ".section .fixup, \"ax\"\n"
19442- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19443+ "3:\n"
19444+
19445+#ifdef CONFIG_PAX_KERNEXEC
19446+ " movl %%cr0, %0\n"
19447+ " movl %0, %%eax\n"
19448+ " andl $0xFFFEFFFF, %%eax\n"
19449+ " movl %%eax, %%cr0\n"
19450+#endif
19451+
19452+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19453+
19454+#ifdef CONFIG_PAX_KERNEXEC
19455+ " movl %0, %%cr0\n"
19456+#endif
19457+
19458 " jmp 2b\n"
19459 ".previous\n"
19460- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19461+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19462
19463 from += 64;
19464 to += 64;
19465@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19466 static void fast_copy_page(void *to, void *from)
19467 {
19468 int i;
19469+ unsigned long cr0;
19470
19471 kernel_fpu_begin();
19472
19473 __asm__ __volatile__ (
19474- "1: prefetch (%0)\n"
19475- " prefetch 64(%0)\n"
19476- " prefetch 128(%0)\n"
19477- " prefetch 192(%0)\n"
19478- " prefetch 256(%0)\n"
19479+ "1: prefetch (%1)\n"
19480+ " prefetch 64(%1)\n"
19481+ " prefetch 128(%1)\n"
19482+ " prefetch 192(%1)\n"
19483+ " prefetch 256(%1)\n"
19484 "2: \n"
19485 ".section .fixup, \"ax\"\n"
19486- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19487+ "3: \n"
19488+
19489+#ifdef CONFIG_PAX_KERNEXEC
19490+ " movl %%cr0, %0\n"
19491+ " movl %0, %%eax\n"
19492+ " andl $0xFFFEFFFF, %%eax\n"
19493+ " movl %%eax, %%cr0\n"
19494+#endif
19495+
19496+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19497+
19498+#ifdef CONFIG_PAX_KERNEXEC
19499+ " movl %0, %%cr0\n"
19500+#endif
19501+
19502 " jmp 2b\n"
19503 ".previous\n"
19504- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19505+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19506
19507 for (i = 0; i < 4096/64; i++) {
19508 __asm__ __volatile__ (
19509- "1: prefetch 320(%0)\n"
19510- "2: movq (%0), %%mm0\n"
19511- " movq 8(%0), %%mm1\n"
19512- " movq 16(%0), %%mm2\n"
19513- " movq 24(%0), %%mm3\n"
19514- " movq %%mm0, (%1)\n"
19515- " movq %%mm1, 8(%1)\n"
19516- " movq %%mm2, 16(%1)\n"
19517- " movq %%mm3, 24(%1)\n"
19518- " movq 32(%0), %%mm0\n"
19519- " movq 40(%0), %%mm1\n"
19520- " movq 48(%0), %%mm2\n"
19521- " movq 56(%0), %%mm3\n"
19522- " movq %%mm0, 32(%1)\n"
19523- " movq %%mm1, 40(%1)\n"
19524- " movq %%mm2, 48(%1)\n"
19525- " movq %%mm3, 56(%1)\n"
19526+ "1: prefetch 320(%1)\n"
19527+ "2: movq (%1), %%mm0\n"
19528+ " movq 8(%1), %%mm1\n"
19529+ " movq 16(%1), %%mm2\n"
19530+ " movq 24(%1), %%mm3\n"
19531+ " movq %%mm0, (%2)\n"
19532+ " movq %%mm1, 8(%2)\n"
19533+ " movq %%mm2, 16(%2)\n"
19534+ " movq %%mm3, 24(%2)\n"
19535+ " movq 32(%1), %%mm0\n"
19536+ " movq 40(%1), %%mm1\n"
19537+ " movq 48(%1), %%mm2\n"
19538+ " movq 56(%1), %%mm3\n"
19539+ " movq %%mm0, 32(%2)\n"
19540+ " movq %%mm1, 40(%2)\n"
19541+ " movq %%mm2, 48(%2)\n"
19542+ " movq %%mm3, 56(%2)\n"
19543 ".section .fixup, \"ax\"\n"
19544- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19545+ "3:\n"
19546+
19547+#ifdef CONFIG_PAX_KERNEXEC
19548+ " movl %%cr0, %0\n"
19549+ " movl %0, %%eax\n"
19550+ " andl $0xFFFEFFFF, %%eax\n"
19551+ " movl %%eax, %%cr0\n"
19552+#endif
19553+
19554+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19555+
19556+#ifdef CONFIG_PAX_KERNEXEC
19557+ " movl %0, %%cr0\n"
19558+#endif
19559+
19560 " jmp 2b\n"
19561 ".previous\n"
19562 _ASM_EXTABLE(1b, 3b)
19563- : : "r" (from), "r" (to) : "memory");
19564+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19565
19566 from += 64;
19567 to += 64;
19568diff -urNp linux-2.6.32.43/arch/x86/lib/putuser.S linux-2.6.32.43/arch/x86/lib/putuser.S
19569--- linux-2.6.32.43/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19570+++ linux-2.6.32.43/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19571@@ -15,7 +15,8 @@
19572 #include <asm/thread_info.h>
19573 #include <asm/errno.h>
19574 #include <asm/asm.h>
19575-
19576+#include <asm/segment.h>
19577+#include <asm/pgtable.h>
19578
19579 /*
19580 * __put_user_X
19581@@ -29,52 +30,119 @@
19582 * as they get called from within inline assembly.
19583 */
19584
19585-#define ENTER CFI_STARTPROC ; \
19586- GET_THREAD_INFO(%_ASM_BX)
19587+#define ENTER CFI_STARTPROC
19588 #define EXIT ret ; \
19589 CFI_ENDPROC
19590
19591+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19592+#define _DEST %_ASM_CX,%_ASM_BX
19593+#else
19594+#define _DEST %_ASM_CX
19595+#endif
19596+
19597+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19598+#define __copyuser_seg gs;
19599+#else
19600+#define __copyuser_seg
19601+#endif
19602+
19603 .text
19604 ENTRY(__put_user_1)
19605 ENTER
19606+
19607+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19608+ GET_THREAD_INFO(%_ASM_BX)
19609 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19610 jae bad_put_user
19611-1: movb %al,(%_ASM_CX)
19612+
19613+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19614+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19615+ cmp %_ASM_BX,%_ASM_CX
19616+ jb 1234f
19617+ xor %ebx,%ebx
19618+1234:
19619+#endif
19620+
19621+#endif
19622+
19623+1: __copyuser_seg movb %al,(_DEST)
19624 xor %eax,%eax
19625 EXIT
19626 ENDPROC(__put_user_1)
19627
19628 ENTRY(__put_user_2)
19629 ENTER
19630+
19631+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19632+ GET_THREAD_INFO(%_ASM_BX)
19633 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19634 sub $1,%_ASM_BX
19635 cmp %_ASM_BX,%_ASM_CX
19636 jae bad_put_user
19637-2: movw %ax,(%_ASM_CX)
19638+
19639+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19640+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19641+ cmp %_ASM_BX,%_ASM_CX
19642+ jb 1234f
19643+ xor %ebx,%ebx
19644+1234:
19645+#endif
19646+
19647+#endif
19648+
19649+2: __copyuser_seg movw %ax,(_DEST)
19650 xor %eax,%eax
19651 EXIT
19652 ENDPROC(__put_user_2)
19653
19654 ENTRY(__put_user_4)
19655 ENTER
19656+
19657+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19658+ GET_THREAD_INFO(%_ASM_BX)
19659 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19660 sub $3,%_ASM_BX
19661 cmp %_ASM_BX,%_ASM_CX
19662 jae bad_put_user
19663-3: movl %eax,(%_ASM_CX)
19664+
19665+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19666+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19667+ cmp %_ASM_BX,%_ASM_CX
19668+ jb 1234f
19669+ xor %ebx,%ebx
19670+1234:
19671+#endif
19672+
19673+#endif
19674+
19675+3: __copyuser_seg movl %eax,(_DEST)
19676 xor %eax,%eax
19677 EXIT
19678 ENDPROC(__put_user_4)
19679
19680 ENTRY(__put_user_8)
19681 ENTER
19682+
19683+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19684+ GET_THREAD_INFO(%_ASM_BX)
19685 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19686 sub $7,%_ASM_BX
19687 cmp %_ASM_BX,%_ASM_CX
19688 jae bad_put_user
19689-4: mov %_ASM_AX,(%_ASM_CX)
19690+
19691+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19692+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19693+ cmp %_ASM_BX,%_ASM_CX
19694+ jb 1234f
19695+ xor %ebx,%ebx
19696+1234:
19697+#endif
19698+
19699+#endif
19700+
19701+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19702 #ifdef CONFIG_X86_32
19703-5: movl %edx,4(%_ASM_CX)
19704+5: __copyuser_seg movl %edx,4(_DEST)
19705 #endif
19706 xor %eax,%eax
19707 EXIT
19708diff -urNp linux-2.6.32.43/arch/x86/lib/usercopy_32.c linux-2.6.32.43/arch/x86/lib/usercopy_32.c
19709--- linux-2.6.32.43/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19710+++ linux-2.6.32.43/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19711@@ -43,7 +43,7 @@ do { \
19712 __asm__ __volatile__( \
19713 " testl %1,%1\n" \
19714 " jz 2f\n" \
19715- "0: lodsb\n" \
19716+ "0: "__copyuser_seg"lodsb\n" \
19717 " stosb\n" \
19718 " testb %%al,%%al\n" \
19719 " jz 1f\n" \
19720@@ -128,10 +128,12 @@ do { \
19721 int __d0; \
19722 might_fault(); \
19723 __asm__ __volatile__( \
19724+ __COPYUSER_SET_ES \
19725 "0: rep; stosl\n" \
19726 " movl %2,%0\n" \
19727 "1: rep; stosb\n" \
19728 "2:\n" \
19729+ __COPYUSER_RESTORE_ES \
19730 ".section .fixup,\"ax\"\n" \
19731 "3: lea 0(%2,%0,4),%0\n" \
19732 " jmp 2b\n" \
19733@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19734 might_fault();
19735
19736 __asm__ __volatile__(
19737+ __COPYUSER_SET_ES
19738 " testl %0, %0\n"
19739 " jz 3f\n"
19740 " andl %0,%%ecx\n"
19741@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19742 " subl %%ecx,%0\n"
19743 " addl %0,%%eax\n"
19744 "1:\n"
19745+ __COPYUSER_RESTORE_ES
19746 ".section .fixup,\"ax\"\n"
19747 "2: xorl %%eax,%%eax\n"
19748 " jmp 1b\n"
19749@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19750
19751 #ifdef CONFIG_X86_INTEL_USERCOPY
19752 static unsigned long
19753-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19754+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19755 {
19756 int d0, d1;
19757 __asm__ __volatile__(
19758@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19759 " .align 2,0x90\n"
19760 "3: movl 0(%4), %%eax\n"
19761 "4: movl 4(%4), %%edx\n"
19762- "5: movl %%eax, 0(%3)\n"
19763- "6: movl %%edx, 4(%3)\n"
19764+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19765+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19766 "7: movl 8(%4), %%eax\n"
19767 "8: movl 12(%4),%%edx\n"
19768- "9: movl %%eax, 8(%3)\n"
19769- "10: movl %%edx, 12(%3)\n"
19770+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19771+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19772 "11: movl 16(%4), %%eax\n"
19773 "12: movl 20(%4), %%edx\n"
19774- "13: movl %%eax, 16(%3)\n"
19775- "14: movl %%edx, 20(%3)\n"
19776+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19777+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19778 "15: movl 24(%4), %%eax\n"
19779 "16: movl 28(%4), %%edx\n"
19780- "17: movl %%eax, 24(%3)\n"
19781- "18: movl %%edx, 28(%3)\n"
19782+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
19783+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
19784 "19: movl 32(%4), %%eax\n"
19785 "20: movl 36(%4), %%edx\n"
19786- "21: movl %%eax, 32(%3)\n"
19787- "22: movl %%edx, 36(%3)\n"
19788+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
19789+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
19790 "23: movl 40(%4), %%eax\n"
19791 "24: movl 44(%4), %%edx\n"
19792- "25: movl %%eax, 40(%3)\n"
19793- "26: movl %%edx, 44(%3)\n"
19794+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
19795+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
19796 "27: movl 48(%4), %%eax\n"
19797 "28: movl 52(%4), %%edx\n"
19798- "29: movl %%eax, 48(%3)\n"
19799- "30: movl %%edx, 52(%3)\n"
19800+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
19801+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
19802 "31: movl 56(%4), %%eax\n"
19803 "32: movl 60(%4), %%edx\n"
19804- "33: movl %%eax, 56(%3)\n"
19805- "34: movl %%edx, 60(%3)\n"
19806+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
19807+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
19808 " addl $-64, %0\n"
19809 " addl $64, %4\n"
19810 " addl $64, %3\n"
19811@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
19812 " shrl $2, %0\n"
19813 " andl $3, %%eax\n"
19814 " cld\n"
19815+ __COPYUSER_SET_ES
19816 "99: rep; movsl\n"
19817 "36: movl %%eax, %0\n"
19818 "37: rep; movsb\n"
19819 "100:\n"
19820+ __COPYUSER_RESTORE_ES
19821+ ".section .fixup,\"ax\"\n"
19822+ "101: lea 0(%%eax,%0,4),%0\n"
19823+ " jmp 100b\n"
19824+ ".previous\n"
19825+ ".section __ex_table,\"a\"\n"
19826+ " .align 4\n"
19827+ " .long 1b,100b\n"
19828+ " .long 2b,100b\n"
19829+ " .long 3b,100b\n"
19830+ " .long 4b,100b\n"
19831+ " .long 5b,100b\n"
19832+ " .long 6b,100b\n"
19833+ " .long 7b,100b\n"
19834+ " .long 8b,100b\n"
19835+ " .long 9b,100b\n"
19836+ " .long 10b,100b\n"
19837+ " .long 11b,100b\n"
19838+ " .long 12b,100b\n"
19839+ " .long 13b,100b\n"
19840+ " .long 14b,100b\n"
19841+ " .long 15b,100b\n"
19842+ " .long 16b,100b\n"
19843+ " .long 17b,100b\n"
19844+ " .long 18b,100b\n"
19845+ " .long 19b,100b\n"
19846+ " .long 20b,100b\n"
19847+ " .long 21b,100b\n"
19848+ " .long 22b,100b\n"
19849+ " .long 23b,100b\n"
19850+ " .long 24b,100b\n"
19851+ " .long 25b,100b\n"
19852+ " .long 26b,100b\n"
19853+ " .long 27b,100b\n"
19854+ " .long 28b,100b\n"
19855+ " .long 29b,100b\n"
19856+ " .long 30b,100b\n"
19857+ " .long 31b,100b\n"
19858+ " .long 32b,100b\n"
19859+ " .long 33b,100b\n"
19860+ " .long 34b,100b\n"
19861+ " .long 35b,100b\n"
19862+ " .long 36b,100b\n"
19863+ " .long 37b,100b\n"
19864+ " .long 99b,101b\n"
19865+ ".previous"
19866+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
19867+ : "1"(to), "2"(from), "0"(size)
19868+ : "eax", "edx", "memory");
19869+ return size;
19870+}
19871+
19872+static unsigned long
19873+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
19874+{
19875+ int d0, d1;
19876+ __asm__ __volatile__(
19877+ " .align 2,0x90\n"
19878+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
19879+ " cmpl $67, %0\n"
19880+ " jbe 3f\n"
19881+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
19882+ " .align 2,0x90\n"
19883+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
19884+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
19885+ "5: movl %%eax, 0(%3)\n"
19886+ "6: movl %%edx, 4(%3)\n"
19887+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
19888+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
19889+ "9: movl %%eax, 8(%3)\n"
19890+ "10: movl %%edx, 12(%3)\n"
19891+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
19892+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
19893+ "13: movl %%eax, 16(%3)\n"
19894+ "14: movl %%edx, 20(%3)\n"
19895+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
19896+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
19897+ "17: movl %%eax, 24(%3)\n"
19898+ "18: movl %%edx, 28(%3)\n"
19899+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
19900+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
19901+ "21: movl %%eax, 32(%3)\n"
19902+ "22: movl %%edx, 36(%3)\n"
19903+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
19904+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
19905+ "25: movl %%eax, 40(%3)\n"
19906+ "26: movl %%edx, 44(%3)\n"
19907+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
19908+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
19909+ "29: movl %%eax, 48(%3)\n"
19910+ "30: movl %%edx, 52(%3)\n"
19911+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
19912+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
19913+ "33: movl %%eax, 56(%3)\n"
19914+ "34: movl %%edx, 60(%3)\n"
19915+ " addl $-64, %0\n"
19916+ " addl $64, %4\n"
19917+ " addl $64, %3\n"
19918+ " cmpl $63, %0\n"
19919+ " ja 1b\n"
19920+ "35: movl %0, %%eax\n"
19921+ " shrl $2, %0\n"
19922+ " andl $3, %%eax\n"
19923+ " cld\n"
19924+ "99: rep; "__copyuser_seg" movsl\n"
19925+ "36: movl %%eax, %0\n"
19926+ "37: rep; "__copyuser_seg" movsb\n"
19927+ "100:\n"
19928 ".section .fixup,\"ax\"\n"
19929 "101: lea 0(%%eax,%0,4),%0\n"
19930 " jmp 100b\n"
19931@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
19932 int d0, d1;
19933 __asm__ __volatile__(
19934 " .align 2,0x90\n"
19935- "0: movl 32(%4), %%eax\n"
19936+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
19937 " cmpl $67, %0\n"
19938 " jbe 2f\n"
19939- "1: movl 64(%4), %%eax\n"
19940+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
19941 " .align 2,0x90\n"
19942- "2: movl 0(%4), %%eax\n"
19943- "21: movl 4(%4), %%edx\n"
19944+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
19945+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
19946 " movl %%eax, 0(%3)\n"
19947 " movl %%edx, 4(%3)\n"
19948- "3: movl 8(%4), %%eax\n"
19949- "31: movl 12(%4),%%edx\n"
19950+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
19951+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
19952 " movl %%eax, 8(%3)\n"
19953 " movl %%edx, 12(%3)\n"
19954- "4: movl 16(%4), %%eax\n"
19955- "41: movl 20(%4), %%edx\n"
19956+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
19957+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
19958 " movl %%eax, 16(%3)\n"
19959 " movl %%edx, 20(%3)\n"
19960- "10: movl 24(%4), %%eax\n"
19961- "51: movl 28(%4), %%edx\n"
19962+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
19963+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
19964 " movl %%eax, 24(%3)\n"
19965 " movl %%edx, 28(%3)\n"
19966- "11: movl 32(%4), %%eax\n"
19967- "61: movl 36(%4), %%edx\n"
19968+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
19969+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
19970 " movl %%eax, 32(%3)\n"
19971 " movl %%edx, 36(%3)\n"
19972- "12: movl 40(%4), %%eax\n"
19973- "71: movl 44(%4), %%edx\n"
19974+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
19975+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
19976 " movl %%eax, 40(%3)\n"
19977 " movl %%edx, 44(%3)\n"
19978- "13: movl 48(%4), %%eax\n"
19979- "81: movl 52(%4), %%edx\n"
19980+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
19981+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
19982 " movl %%eax, 48(%3)\n"
19983 " movl %%edx, 52(%3)\n"
19984- "14: movl 56(%4), %%eax\n"
19985- "91: movl 60(%4), %%edx\n"
19986+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
19987+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
19988 " movl %%eax, 56(%3)\n"
19989 " movl %%edx, 60(%3)\n"
19990 " addl $-64, %0\n"
19991@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
19992 " shrl $2, %0\n"
19993 " andl $3, %%eax\n"
19994 " cld\n"
19995- "6: rep; movsl\n"
19996+ "6: rep; "__copyuser_seg" movsl\n"
19997 " movl %%eax,%0\n"
19998- "7: rep; movsb\n"
19999+ "7: rep; "__copyuser_seg" movsb\n"
20000 "8:\n"
20001 ".section .fixup,\"ax\"\n"
20002 "9: lea 0(%%eax,%0,4),%0\n"
20003@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20004
20005 __asm__ __volatile__(
20006 " .align 2,0x90\n"
20007- "0: movl 32(%4), %%eax\n"
20008+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20009 " cmpl $67, %0\n"
20010 " jbe 2f\n"
20011- "1: movl 64(%4), %%eax\n"
20012+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20013 " .align 2,0x90\n"
20014- "2: movl 0(%4), %%eax\n"
20015- "21: movl 4(%4), %%edx\n"
20016+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20017+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20018 " movnti %%eax, 0(%3)\n"
20019 " movnti %%edx, 4(%3)\n"
20020- "3: movl 8(%4), %%eax\n"
20021- "31: movl 12(%4),%%edx\n"
20022+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20023+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20024 " movnti %%eax, 8(%3)\n"
20025 " movnti %%edx, 12(%3)\n"
20026- "4: movl 16(%4), %%eax\n"
20027- "41: movl 20(%4), %%edx\n"
20028+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20029+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20030 " movnti %%eax, 16(%3)\n"
20031 " movnti %%edx, 20(%3)\n"
20032- "10: movl 24(%4), %%eax\n"
20033- "51: movl 28(%4), %%edx\n"
20034+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20035+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20036 " movnti %%eax, 24(%3)\n"
20037 " movnti %%edx, 28(%3)\n"
20038- "11: movl 32(%4), %%eax\n"
20039- "61: movl 36(%4), %%edx\n"
20040+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20041+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20042 " movnti %%eax, 32(%3)\n"
20043 " movnti %%edx, 36(%3)\n"
20044- "12: movl 40(%4), %%eax\n"
20045- "71: movl 44(%4), %%edx\n"
20046+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20047+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20048 " movnti %%eax, 40(%3)\n"
20049 " movnti %%edx, 44(%3)\n"
20050- "13: movl 48(%4), %%eax\n"
20051- "81: movl 52(%4), %%edx\n"
20052+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20053+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20054 " movnti %%eax, 48(%3)\n"
20055 " movnti %%edx, 52(%3)\n"
20056- "14: movl 56(%4), %%eax\n"
20057- "91: movl 60(%4), %%edx\n"
20058+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20059+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20060 " movnti %%eax, 56(%3)\n"
20061 " movnti %%edx, 60(%3)\n"
20062 " addl $-64, %0\n"
20063@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20064 " shrl $2, %0\n"
20065 " andl $3, %%eax\n"
20066 " cld\n"
20067- "6: rep; movsl\n"
20068+ "6: rep; "__copyuser_seg" movsl\n"
20069 " movl %%eax,%0\n"
20070- "7: rep; movsb\n"
20071+ "7: rep; "__copyuser_seg" movsb\n"
20072 "8:\n"
20073 ".section .fixup,\"ax\"\n"
20074 "9: lea 0(%%eax,%0,4),%0\n"
20075@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20076
20077 __asm__ __volatile__(
20078 " .align 2,0x90\n"
20079- "0: movl 32(%4), %%eax\n"
20080+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20081 " cmpl $67, %0\n"
20082 " jbe 2f\n"
20083- "1: movl 64(%4), %%eax\n"
20084+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20085 " .align 2,0x90\n"
20086- "2: movl 0(%4), %%eax\n"
20087- "21: movl 4(%4), %%edx\n"
20088+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20089+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20090 " movnti %%eax, 0(%3)\n"
20091 " movnti %%edx, 4(%3)\n"
20092- "3: movl 8(%4), %%eax\n"
20093- "31: movl 12(%4),%%edx\n"
20094+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20095+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20096 " movnti %%eax, 8(%3)\n"
20097 " movnti %%edx, 12(%3)\n"
20098- "4: movl 16(%4), %%eax\n"
20099- "41: movl 20(%4), %%edx\n"
20100+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20101+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20102 " movnti %%eax, 16(%3)\n"
20103 " movnti %%edx, 20(%3)\n"
20104- "10: movl 24(%4), %%eax\n"
20105- "51: movl 28(%4), %%edx\n"
20106+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20107+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20108 " movnti %%eax, 24(%3)\n"
20109 " movnti %%edx, 28(%3)\n"
20110- "11: movl 32(%4), %%eax\n"
20111- "61: movl 36(%4), %%edx\n"
20112+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20113+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20114 " movnti %%eax, 32(%3)\n"
20115 " movnti %%edx, 36(%3)\n"
20116- "12: movl 40(%4), %%eax\n"
20117- "71: movl 44(%4), %%edx\n"
20118+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20119+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20120 " movnti %%eax, 40(%3)\n"
20121 " movnti %%edx, 44(%3)\n"
20122- "13: movl 48(%4), %%eax\n"
20123- "81: movl 52(%4), %%edx\n"
20124+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20125+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20126 " movnti %%eax, 48(%3)\n"
20127 " movnti %%edx, 52(%3)\n"
20128- "14: movl 56(%4), %%eax\n"
20129- "91: movl 60(%4), %%edx\n"
20130+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20131+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20132 " movnti %%eax, 56(%3)\n"
20133 " movnti %%edx, 60(%3)\n"
20134 " addl $-64, %0\n"
20135@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20136 " shrl $2, %0\n"
20137 " andl $3, %%eax\n"
20138 " cld\n"
20139- "6: rep; movsl\n"
20140+ "6: rep; "__copyuser_seg" movsl\n"
20141 " movl %%eax,%0\n"
20142- "7: rep; movsb\n"
20143+ "7: rep; "__copyuser_seg" movsb\n"
20144 "8:\n"
20145 ".section .fixup,\"ax\"\n"
20146 "9: lea 0(%%eax,%0,4),%0\n"
20147@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20148 */
20149 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20150 unsigned long size);
20151-unsigned long __copy_user_intel(void __user *to, const void *from,
20152+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20153+ unsigned long size);
20154+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20155 unsigned long size);
20156 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20157 const void __user *from, unsigned long size);
20158 #endif /* CONFIG_X86_INTEL_USERCOPY */
20159
20160 /* Generic arbitrary sized copy. */
20161-#define __copy_user(to, from, size) \
20162+#define __copy_user(to, from, size, prefix, set, restore) \
20163 do { \
20164 int __d0, __d1, __d2; \
20165 __asm__ __volatile__( \
20166+ set \
20167 " cmp $7,%0\n" \
20168 " jbe 1f\n" \
20169 " movl %1,%0\n" \
20170 " negl %0\n" \
20171 " andl $7,%0\n" \
20172 " subl %0,%3\n" \
20173- "4: rep; movsb\n" \
20174+ "4: rep; "prefix"movsb\n" \
20175 " movl %3,%0\n" \
20176 " shrl $2,%0\n" \
20177 " andl $3,%3\n" \
20178 " .align 2,0x90\n" \
20179- "0: rep; movsl\n" \
20180+ "0: rep; "prefix"movsl\n" \
20181 " movl %3,%0\n" \
20182- "1: rep; movsb\n" \
20183+ "1: rep; "prefix"movsb\n" \
20184 "2:\n" \
20185+ restore \
20186 ".section .fixup,\"ax\"\n" \
20187 "5: addl %3,%0\n" \
20188 " jmp 2b\n" \
20189@@ -682,14 +799,14 @@ do { \
20190 " negl %0\n" \
20191 " andl $7,%0\n" \
20192 " subl %0,%3\n" \
20193- "4: rep; movsb\n" \
20194+ "4: rep; "__copyuser_seg"movsb\n" \
20195 " movl %3,%0\n" \
20196 " shrl $2,%0\n" \
20197 " andl $3,%3\n" \
20198 " .align 2,0x90\n" \
20199- "0: rep; movsl\n" \
20200+ "0: rep; "__copyuser_seg"movsl\n" \
20201 " movl %3,%0\n" \
20202- "1: rep; movsb\n" \
20203+ "1: rep; "__copyuser_seg"movsb\n" \
20204 "2:\n" \
20205 ".section .fixup,\"ax\"\n" \
20206 "5: addl %3,%0\n" \
20207@@ -775,9 +892,9 @@ survive:
20208 }
20209 #endif
20210 if (movsl_is_ok(to, from, n))
20211- __copy_user(to, from, n);
20212+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20213 else
20214- n = __copy_user_intel(to, from, n);
20215+ n = __generic_copy_to_user_intel(to, from, n);
20216 return n;
20217 }
20218 EXPORT_SYMBOL(__copy_to_user_ll);
20219@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20220 unsigned long n)
20221 {
20222 if (movsl_is_ok(to, from, n))
20223- __copy_user(to, from, n);
20224+ __copy_user(to, from, n, __copyuser_seg, "", "");
20225 else
20226- n = __copy_user_intel((void __user *)to,
20227- (const void *)from, n);
20228+ n = __generic_copy_from_user_intel(to, from, n);
20229 return n;
20230 }
20231 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20232@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20233 if (n > 64 && cpu_has_xmm2)
20234 n = __copy_user_intel_nocache(to, from, n);
20235 else
20236- __copy_user(to, from, n);
20237+ __copy_user(to, from, n, __copyuser_seg, "", "");
20238 #else
20239- __copy_user(to, from, n);
20240+ __copy_user(to, from, n, __copyuser_seg, "", "");
20241 #endif
20242 return n;
20243 }
20244 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20245
20246-/**
20247- * copy_to_user: - Copy a block of data into user space.
20248- * @to: Destination address, in user space.
20249- * @from: Source address, in kernel space.
20250- * @n: Number of bytes to copy.
20251- *
20252- * Context: User context only. This function may sleep.
20253- *
20254- * Copy data from kernel space to user space.
20255- *
20256- * Returns number of bytes that could not be copied.
20257- * On success, this will be zero.
20258- */
20259-unsigned long
20260-copy_to_user(void __user *to, const void *from, unsigned long n)
20261+#ifdef CONFIG_PAX_MEMORY_UDEREF
20262+void __set_fs(mm_segment_t x)
20263 {
20264- if (access_ok(VERIFY_WRITE, to, n))
20265- n = __copy_to_user(to, from, n);
20266- return n;
20267+ switch (x.seg) {
20268+ case 0:
20269+ loadsegment(gs, 0);
20270+ break;
20271+ case TASK_SIZE_MAX:
20272+ loadsegment(gs, __USER_DS);
20273+ break;
20274+ case -1UL:
20275+ loadsegment(gs, __KERNEL_DS);
20276+ break;
20277+ default:
20278+ BUG();
20279+ }
20280+ return;
20281 }
20282-EXPORT_SYMBOL(copy_to_user);
20283+EXPORT_SYMBOL(__set_fs);
20284
20285-/**
20286- * copy_from_user: - Copy a block of data from user space.
20287- * @to: Destination address, in kernel space.
20288- * @from: Source address, in user space.
20289- * @n: Number of bytes to copy.
20290- *
20291- * Context: User context only. This function may sleep.
20292- *
20293- * Copy data from user space to kernel space.
20294- *
20295- * Returns number of bytes that could not be copied.
20296- * On success, this will be zero.
20297- *
20298- * If some data could not be copied, this function will pad the copied
20299- * data to the requested size using zero bytes.
20300- */
20301-unsigned long
20302-copy_from_user(void *to, const void __user *from, unsigned long n)
20303+void set_fs(mm_segment_t x)
20304 {
20305- if (access_ok(VERIFY_READ, from, n))
20306- n = __copy_from_user(to, from, n);
20307- else
20308- memset(to, 0, n);
20309- return n;
20310+ current_thread_info()->addr_limit = x;
20311+ __set_fs(x);
20312 }
20313-EXPORT_SYMBOL(copy_from_user);
20314+EXPORT_SYMBOL(set_fs);
20315+#endif
20316diff -urNp linux-2.6.32.43/arch/x86/lib/usercopy_64.c linux-2.6.32.43/arch/x86/lib/usercopy_64.c
20317--- linux-2.6.32.43/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20318+++ linux-2.6.32.43/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20319@@ -42,6 +42,12 @@ long
20320 __strncpy_from_user(char *dst, const char __user *src, long count)
20321 {
20322 long res;
20323+
20324+#ifdef CONFIG_PAX_MEMORY_UDEREF
20325+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20326+ src += PAX_USER_SHADOW_BASE;
20327+#endif
20328+
20329 __do_strncpy_from_user(dst, src, count, res);
20330 return res;
20331 }
20332@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20333 {
20334 long __d0;
20335 might_fault();
20336+
20337+#ifdef CONFIG_PAX_MEMORY_UDEREF
20338+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20339+ addr += PAX_USER_SHADOW_BASE;
20340+#endif
20341+
20342 /* no memory constraint because it doesn't change any memory gcc knows
20343 about */
20344 asm volatile(
20345@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20346
20347 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20348 {
20349- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20350+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20351+
20352+#ifdef CONFIG_PAX_MEMORY_UDEREF
20353+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20354+ to += PAX_USER_SHADOW_BASE;
20355+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20356+ from += PAX_USER_SHADOW_BASE;
20357+#endif
20358+
20359 return copy_user_generic((__force void *)to, (__force void *)from, len);
20360- }
20361- return len;
20362+ }
20363+ return len;
20364 }
20365 EXPORT_SYMBOL(copy_in_user);
20366
20367diff -urNp linux-2.6.32.43/arch/x86/Makefile linux-2.6.32.43/arch/x86/Makefile
20368--- linux-2.6.32.43/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20369+++ linux-2.6.32.43/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20370@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20371 else
20372 BITS := 64
20373 UTS_MACHINE := x86_64
20374+ biarch := $(call cc-option,-m64)
20375 CHECKFLAGS += -D__x86_64__ -m64
20376
20377 KBUILD_AFLAGS += -m64
20378@@ -189,3 +190,12 @@ define archhelp
20379 echo ' FDARGS="..." arguments for the booted kernel'
20380 echo ' FDINITRD=file initrd for the booted kernel'
20381 endef
20382+
20383+define OLD_LD
20384+
20385+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20386+*** Please upgrade your binutils to 2.18 or newer
20387+endef
20388+
20389+archprepare:
20390+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20391diff -urNp linux-2.6.32.43/arch/x86/mm/extable.c linux-2.6.32.43/arch/x86/mm/extable.c
20392--- linux-2.6.32.43/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20393+++ linux-2.6.32.43/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20394@@ -1,14 +1,71 @@
20395 #include <linux/module.h>
20396 #include <linux/spinlock.h>
20397+#include <linux/sort.h>
20398 #include <asm/uaccess.h>
20399+#include <asm/pgtable.h>
20400
20401+/*
20402+ * The exception table needs to be sorted so that the binary
20403+ * search that we use to find entries in it works properly.
20404+ * This is used both for the kernel exception table and for
20405+ * the exception tables of modules that get loaded.
20406+ */
20407+static int cmp_ex(const void *a, const void *b)
20408+{
20409+ const struct exception_table_entry *x = a, *y = b;
20410+
20411+ /* avoid overflow */
20412+ if (x->insn > y->insn)
20413+ return 1;
20414+ if (x->insn < y->insn)
20415+ return -1;
20416+ return 0;
20417+}
20418+
20419+static void swap_ex(void *a, void *b, int size)
20420+{
20421+ struct exception_table_entry t, *x = a, *y = b;
20422+
20423+ t = *x;
20424+
20425+ pax_open_kernel();
20426+ *x = *y;
20427+ *y = t;
20428+ pax_close_kernel();
20429+}
20430+
20431+void sort_extable(struct exception_table_entry *start,
20432+ struct exception_table_entry *finish)
20433+{
20434+ sort(start, finish - start, sizeof(struct exception_table_entry),
20435+ cmp_ex, swap_ex);
20436+}
20437+
20438+#ifdef CONFIG_MODULES
20439+/*
20440+ * If the exception table is sorted, any referring to the module init
20441+ * will be at the beginning or the end.
20442+ */
20443+void trim_init_extable(struct module *m)
20444+{
20445+ /*trim the beginning*/
20446+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20447+ m->extable++;
20448+ m->num_exentries--;
20449+ }
20450+ /*trim the end*/
20451+ while (m->num_exentries &&
20452+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20453+ m->num_exentries--;
20454+}
20455+#endif /* CONFIG_MODULES */
20456
20457 int fixup_exception(struct pt_regs *regs)
20458 {
20459 const struct exception_table_entry *fixup;
20460
20461 #ifdef CONFIG_PNPBIOS
20462- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20463+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20464 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20465 extern u32 pnp_bios_is_utter_crap;
20466 pnp_bios_is_utter_crap = 1;
20467diff -urNp linux-2.6.32.43/arch/x86/mm/fault.c linux-2.6.32.43/arch/x86/mm/fault.c
20468--- linux-2.6.32.43/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20469+++ linux-2.6.32.43/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20470@@ -11,10 +11,19 @@
20471 #include <linux/kprobes.h> /* __kprobes, ... */
20472 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20473 #include <linux/perf_event.h> /* perf_sw_event */
20474+#include <linux/unistd.h>
20475+#include <linux/compiler.h>
20476
20477 #include <asm/traps.h> /* dotraplinkage, ... */
20478 #include <asm/pgalloc.h> /* pgd_*(), ... */
20479 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20480+#include <asm/vsyscall.h>
20481+#include <asm/tlbflush.h>
20482+
20483+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20484+#include <asm/stacktrace.h>
20485+#include "../kernel/dumpstack.h"
20486+#endif
20487
20488 /*
20489 * Page fault error code bits:
20490@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20491 int ret = 0;
20492
20493 /* kprobe_running() needs smp_processor_id() */
20494- if (kprobes_built_in() && !user_mode_vm(regs)) {
20495+ if (kprobes_built_in() && !user_mode(regs)) {
20496 preempt_disable();
20497 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20498 ret = 1;
20499@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20500 return !instr_lo || (instr_lo>>1) == 1;
20501 case 0x00:
20502 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20503- if (probe_kernel_address(instr, opcode))
20504+ if (user_mode(regs)) {
20505+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20506+ return 0;
20507+ } else if (probe_kernel_address(instr, opcode))
20508 return 0;
20509
20510 *prefetch = (instr_lo == 0xF) &&
20511@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20512 while (instr < max_instr) {
20513 unsigned char opcode;
20514
20515- if (probe_kernel_address(instr, opcode))
20516+ if (user_mode(regs)) {
20517+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20518+ break;
20519+ } else if (probe_kernel_address(instr, opcode))
20520 break;
20521
20522 instr++;
20523@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20524 force_sig_info(si_signo, &info, tsk);
20525 }
20526
20527+#ifdef CONFIG_PAX_EMUTRAMP
20528+static int pax_handle_fetch_fault(struct pt_regs *regs);
20529+#endif
20530+
20531+#ifdef CONFIG_PAX_PAGEEXEC
20532+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20533+{
20534+ pgd_t *pgd;
20535+ pud_t *pud;
20536+ pmd_t *pmd;
20537+
20538+ pgd = pgd_offset(mm, address);
20539+ if (!pgd_present(*pgd))
20540+ return NULL;
20541+ pud = pud_offset(pgd, address);
20542+ if (!pud_present(*pud))
20543+ return NULL;
20544+ pmd = pmd_offset(pud, address);
20545+ if (!pmd_present(*pmd))
20546+ return NULL;
20547+ return pmd;
20548+}
20549+#endif
20550+
20551 DEFINE_SPINLOCK(pgd_lock);
20552 LIST_HEAD(pgd_list);
20553
20554@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20555 address += PMD_SIZE) {
20556
20557 unsigned long flags;
20558+
20559+#ifdef CONFIG_PAX_PER_CPU_PGD
20560+ unsigned long cpu;
20561+#else
20562 struct page *page;
20563+#endif
20564
20565 spin_lock_irqsave(&pgd_lock, flags);
20566+
20567+#ifdef CONFIG_PAX_PER_CPU_PGD
20568+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20569+ pgd_t *pgd = get_cpu_pgd(cpu);
20570+#else
20571 list_for_each_entry(page, &pgd_list, lru) {
20572- if (!vmalloc_sync_one(page_address(page), address))
20573+ pgd_t *pgd = page_address(page);
20574+#endif
20575+
20576+ if (!vmalloc_sync_one(pgd, address))
20577 break;
20578 }
20579 spin_unlock_irqrestore(&pgd_lock, flags);
20580@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20581 * an interrupt in the middle of a task switch..
20582 */
20583 pgd_paddr = read_cr3();
20584+
20585+#ifdef CONFIG_PAX_PER_CPU_PGD
20586+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20587+#endif
20588+
20589 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20590 if (!pmd_k)
20591 return -1;
20592@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20593
20594 const pgd_t *pgd_ref = pgd_offset_k(address);
20595 unsigned long flags;
20596+
20597+#ifdef CONFIG_PAX_PER_CPU_PGD
20598+ unsigned long cpu;
20599+#else
20600 struct page *page;
20601+#endif
20602
20603 if (pgd_none(*pgd_ref))
20604 continue;
20605
20606 spin_lock_irqsave(&pgd_lock, flags);
20607+
20608+#ifdef CONFIG_PAX_PER_CPU_PGD
20609+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20610+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20611+#else
20612 list_for_each_entry(page, &pgd_list, lru) {
20613 pgd_t *pgd;
20614 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20615+#endif
20616+
20617 if (pgd_none(*pgd))
20618 set_pgd(pgd, *pgd_ref);
20619 else
20620@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20621 * happen within a race in page table update. In the later
20622 * case just flush:
20623 */
20624+
20625+#ifdef CONFIG_PAX_PER_CPU_PGD
20626+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20627+ pgd = pgd_offset_cpu(smp_processor_id(), address);
20628+#else
20629 pgd = pgd_offset(current->active_mm, address);
20630+#endif
20631+
20632 pgd_ref = pgd_offset_k(address);
20633 if (pgd_none(*pgd_ref))
20634 return -1;
20635@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20636 static int is_errata100(struct pt_regs *regs, unsigned long address)
20637 {
20638 #ifdef CONFIG_X86_64
20639- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20640+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20641 return 1;
20642 #endif
20643 return 0;
20644@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20645 }
20646
20647 static const char nx_warning[] = KERN_CRIT
20648-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20649+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20650
20651 static void
20652 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20653@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20654 if (!oops_may_print())
20655 return;
20656
20657- if (error_code & PF_INSTR) {
20658+ if (nx_enabled && (error_code & PF_INSTR)) {
20659 unsigned int level;
20660
20661 pte_t *pte = lookup_address(address, &level);
20662
20663 if (pte && pte_present(*pte) && !pte_exec(*pte))
20664- printk(nx_warning, current_uid());
20665+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20666 }
20667
20668+#ifdef CONFIG_PAX_KERNEXEC
20669+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20670+ if (current->signal->curr_ip)
20671+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20672+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20673+ else
20674+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20675+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20676+ }
20677+#endif
20678+
20679 printk(KERN_ALERT "BUG: unable to handle kernel ");
20680 if (address < PAGE_SIZE)
20681 printk(KERN_CONT "NULL pointer dereference");
20682@@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20683 unsigned long address, int si_code)
20684 {
20685 struct task_struct *tsk = current;
20686+ struct mm_struct *mm = tsk->mm;
20687+
20688+#ifdef CONFIG_X86_64
20689+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20690+ if (regs->ip == (unsigned long)vgettimeofday) {
20691+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20692+ return;
20693+ } else if (regs->ip == (unsigned long)vtime) {
20694+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20695+ return;
20696+ } else if (regs->ip == (unsigned long)vgetcpu) {
20697+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20698+ return;
20699+ }
20700+ }
20701+#endif
20702+
20703+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20704+ if (mm && (error_code & PF_USER)) {
20705+ unsigned long ip = regs->ip;
20706+
20707+ if (v8086_mode(regs))
20708+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20709+
20710+ /*
20711+ * It's possible to have interrupts off here:
20712+ */
20713+ local_irq_enable();
20714+
20715+#ifdef CONFIG_PAX_PAGEEXEC
20716+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20717+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20718+
20719+#ifdef CONFIG_PAX_EMUTRAMP
20720+ switch (pax_handle_fetch_fault(regs)) {
20721+ case 2:
20722+ return;
20723+ }
20724+#endif
20725+
20726+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20727+ do_group_exit(SIGKILL);
20728+ }
20729+#endif
20730+
20731+#ifdef CONFIG_PAX_SEGMEXEC
20732+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20733+
20734+#ifdef CONFIG_PAX_EMUTRAMP
20735+ switch (pax_handle_fetch_fault(regs)) {
20736+ case 2:
20737+ return;
20738+ }
20739+#endif
20740+
20741+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20742+ do_group_exit(SIGKILL);
20743+ }
20744+#endif
20745+
20746+ }
20747+#endif
20748
20749 /* User mode accesses just cause a SIGSEGV */
20750 if (error_code & PF_USER) {
20751@@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20752 return 1;
20753 }
20754
20755+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20756+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20757+{
20758+ pte_t *pte;
20759+ pmd_t *pmd;
20760+ spinlock_t *ptl;
20761+ unsigned char pte_mask;
20762+
20763+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20764+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20765+ return 0;
20766+
20767+ /* PaX: it's our fault, let's handle it if we can */
20768+
20769+ /* PaX: take a look at read faults before acquiring any locks */
20770+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20771+ /* instruction fetch attempt from a protected page in user mode */
20772+ up_read(&mm->mmap_sem);
20773+
20774+#ifdef CONFIG_PAX_EMUTRAMP
20775+ switch (pax_handle_fetch_fault(regs)) {
20776+ case 2:
20777+ return 1;
20778+ }
20779+#endif
20780+
20781+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
20782+ do_group_exit(SIGKILL);
20783+ }
20784+
20785+ pmd = pax_get_pmd(mm, address);
20786+ if (unlikely(!pmd))
20787+ return 0;
20788+
20789+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
20790+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
20791+ pte_unmap_unlock(pte, ptl);
20792+ return 0;
20793+ }
20794+
20795+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
20796+ /* write attempt to a protected page in user mode */
20797+ pte_unmap_unlock(pte, ptl);
20798+ return 0;
20799+ }
20800+
20801+#ifdef CONFIG_SMP
20802+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
20803+#else
20804+ if (likely(address > get_limit(regs->cs)))
20805+#endif
20806+ {
20807+ set_pte(pte, pte_mkread(*pte));
20808+ __flush_tlb_one(address);
20809+ pte_unmap_unlock(pte, ptl);
20810+ up_read(&mm->mmap_sem);
20811+ return 1;
20812+ }
20813+
20814+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
20815+
20816+ /*
20817+ * PaX: fill DTLB with user rights and retry
20818+ */
20819+ __asm__ __volatile__ (
20820+ "orb %2,(%1)\n"
20821+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
20822+/*
20823+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
20824+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
20825+ * page fault when examined during a TLB load attempt. this is true not only
20826+ * for PTEs holding a non-present entry but also present entries that will
20827+ * raise a page fault (such as those set up by PaX, or the copy-on-write
20828+ * mechanism). in effect it means that we do *not* need to flush the TLBs
20829+ * for our target pages since their PTEs are simply not in the TLBs at all.
20830+
20831+ * the best thing in omitting it is that we gain around 15-20% speed in the
20832+ * fast path of the page fault handler and can get rid of tracing since we
20833+ * can no longer flush unintended entries.
20834+ */
20835+ "invlpg (%0)\n"
20836+#endif
20837+ __copyuser_seg"testb $0,(%0)\n"
20838+ "xorb %3,(%1)\n"
20839+ :
20840+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
20841+ : "memory", "cc");
20842+ pte_unmap_unlock(pte, ptl);
20843+ up_read(&mm->mmap_sem);
20844+ return 1;
20845+}
20846+#endif
20847+
20848 /*
20849 * Handle a spurious fault caused by a stale TLB entry.
20850 *
20851@@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
20852 static inline int
20853 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
20854 {
20855+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
20856+ return 1;
20857+
20858 if (write) {
20859 /* write, present and write, not present: */
20860 if (unlikely(!(vma->vm_flags & VM_WRITE)))
20861@@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
20862 {
20863 struct vm_area_struct *vma;
20864 struct task_struct *tsk;
20865- unsigned long address;
20866 struct mm_struct *mm;
20867 int write;
20868 int fault;
20869
20870+ /* Get the faulting address: */
20871+ unsigned long address = read_cr2();
20872+
20873+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20874+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
20875+ if (!search_exception_tables(regs->ip)) {
20876+ bad_area_nosemaphore(regs, error_code, address);
20877+ return;
20878+ }
20879+ if (address < PAX_USER_SHADOW_BASE) {
20880+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
20881+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
20882+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
20883+ } else
20884+ address -= PAX_USER_SHADOW_BASE;
20885+ }
20886+#endif
20887+
20888 tsk = current;
20889 mm = tsk->mm;
20890
20891- /* Get the faulting address: */
20892- address = read_cr2();
20893-
20894 /*
20895 * Detect and handle instructions that would cause a page fault for
20896 * both a tracked kernel page and a userspace page.
20897@@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
20898 * User-mode registers count as a user access even for any
20899 * potential system fault or CPU buglet:
20900 */
20901- if (user_mode_vm(regs)) {
20902+ if (user_mode(regs)) {
20903 local_irq_enable();
20904 error_code |= PF_USER;
20905 } else {
20906@@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
20907 might_sleep();
20908 }
20909
20910+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20911+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
20912+ return;
20913+#endif
20914+
20915 vma = find_vma(mm, address);
20916 if (unlikely(!vma)) {
20917 bad_area(regs, error_code, address);
20918@@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
20919 bad_area(regs, error_code, address);
20920 return;
20921 }
20922- if (error_code & PF_USER) {
20923- /*
20924- * Accessing the stack below %sp is always a bug.
20925- * The large cushion allows instructions like enter
20926- * and pusha to work. ("enter $65535, $31" pushes
20927- * 32 pointers and then decrements %sp by 65535.)
20928- */
20929- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
20930- bad_area(regs, error_code, address);
20931- return;
20932- }
20933+ /*
20934+ * Accessing the stack below %sp is always a bug.
20935+ * The large cushion allows instructions like enter
20936+ * and pusha to work. ("enter $65535, $31" pushes
20937+ * 32 pointers and then decrements %sp by 65535.)
20938+ */
20939+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
20940+ bad_area(regs, error_code, address);
20941+ return;
20942+ }
20943+
20944+#ifdef CONFIG_PAX_SEGMEXEC
20945+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
20946+ bad_area(regs, error_code, address);
20947+ return;
20948 }
20949+#endif
20950+
20951 if (unlikely(expand_stack(vma, address))) {
20952 bad_area(regs, error_code, address);
20953 return;
20954@@ -1146,3 +1416,199 @@ good_area:
20955
20956 up_read(&mm->mmap_sem);
20957 }
20958+
20959+#ifdef CONFIG_PAX_EMUTRAMP
20960+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
20961+{
20962+ int err;
20963+
20964+ do { /* PaX: gcc trampoline emulation #1 */
20965+ unsigned char mov1, mov2;
20966+ unsigned short jmp;
20967+ unsigned int addr1, addr2;
20968+
20969+#ifdef CONFIG_X86_64
20970+ if ((regs->ip + 11) >> 32)
20971+ break;
20972+#endif
20973+
20974+ err = get_user(mov1, (unsigned char __user *)regs->ip);
20975+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
20976+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
20977+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
20978+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
20979+
20980+ if (err)
20981+ break;
20982+
20983+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
20984+ regs->cx = addr1;
20985+ regs->ax = addr2;
20986+ regs->ip = addr2;
20987+ return 2;
20988+ }
20989+ } while (0);
20990+
20991+ do { /* PaX: gcc trampoline emulation #2 */
20992+ unsigned char mov, jmp;
20993+ unsigned int addr1, addr2;
20994+
20995+#ifdef CONFIG_X86_64
20996+ if ((regs->ip + 9) >> 32)
20997+ break;
20998+#endif
20999+
21000+ err = get_user(mov, (unsigned char __user *)regs->ip);
21001+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21002+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21003+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21004+
21005+ if (err)
21006+ break;
21007+
21008+ if (mov == 0xB9 && jmp == 0xE9) {
21009+ regs->cx = addr1;
21010+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21011+ return 2;
21012+ }
21013+ } while (0);
21014+
21015+ return 1; /* PaX in action */
21016+}
21017+
21018+#ifdef CONFIG_X86_64
21019+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21020+{
21021+ int err;
21022+
21023+ do { /* PaX: gcc trampoline emulation #1 */
21024+ unsigned short mov1, mov2, jmp1;
21025+ unsigned char jmp2;
21026+ unsigned int addr1;
21027+ unsigned long addr2;
21028+
21029+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21030+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21031+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21032+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21033+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21034+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21035+
21036+ if (err)
21037+ break;
21038+
21039+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21040+ regs->r11 = addr1;
21041+ regs->r10 = addr2;
21042+ regs->ip = addr1;
21043+ return 2;
21044+ }
21045+ } while (0);
21046+
21047+ do { /* PaX: gcc trampoline emulation #2 */
21048+ unsigned short mov1, mov2, jmp1;
21049+ unsigned char jmp2;
21050+ unsigned long addr1, addr2;
21051+
21052+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21053+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21054+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21055+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21056+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21057+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21058+
21059+ if (err)
21060+ break;
21061+
21062+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21063+ regs->r11 = addr1;
21064+ regs->r10 = addr2;
21065+ regs->ip = addr1;
21066+ return 2;
21067+ }
21068+ } while (0);
21069+
21070+ return 1; /* PaX in action */
21071+}
21072+#endif
21073+
21074+/*
21075+ * PaX: decide what to do with offenders (regs->ip = fault address)
21076+ *
21077+ * returns 1 when task should be killed
21078+ * 2 when gcc trampoline was detected
21079+ */
21080+static int pax_handle_fetch_fault(struct pt_regs *regs)
21081+{
21082+ if (v8086_mode(regs))
21083+ return 1;
21084+
21085+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21086+ return 1;
21087+
21088+#ifdef CONFIG_X86_32
21089+ return pax_handle_fetch_fault_32(regs);
21090+#else
21091+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21092+ return pax_handle_fetch_fault_32(regs);
21093+ else
21094+ return pax_handle_fetch_fault_64(regs);
21095+#endif
21096+}
21097+#endif
21098+
21099+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21100+void pax_report_insns(void *pc, void *sp)
21101+{
21102+ long i;
21103+
21104+ printk(KERN_ERR "PAX: bytes at PC: ");
21105+ for (i = 0; i < 20; i++) {
21106+ unsigned char c;
21107+ if (get_user(c, (__force unsigned char __user *)pc+i))
21108+ printk(KERN_CONT "?? ");
21109+ else
21110+ printk(KERN_CONT "%02x ", c);
21111+ }
21112+ printk("\n");
21113+
21114+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21115+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21116+ unsigned long c;
21117+ if (get_user(c, (__force unsigned long __user *)sp+i))
21118+#ifdef CONFIG_X86_32
21119+ printk(KERN_CONT "???????? ");
21120+#else
21121+ printk(KERN_CONT "???????????????? ");
21122+#endif
21123+ else
21124+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21125+ }
21126+ printk("\n");
21127+}
21128+#endif
21129+
21130+/**
21131+ * probe_kernel_write(): safely attempt to write to a location
21132+ * @dst: address to write to
21133+ * @src: pointer to the data that shall be written
21134+ * @size: size of the data chunk
21135+ *
21136+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21137+ * happens, handle that and return -EFAULT.
21138+ */
21139+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21140+{
21141+ long ret;
21142+ mm_segment_t old_fs = get_fs();
21143+
21144+ set_fs(KERNEL_DS);
21145+ pagefault_disable();
21146+ pax_open_kernel();
21147+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21148+ pax_close_kernel();
21149+ pagefault_enable();
21150+ set_fs(old_fs);
21151+
21152+ return ret ? -EFAULT : 0;
21153+}
21154diff -urNp linux-2.6.32.43/arch/x86/mm/gup.c linux-2.6.32.43/arch/x86/mm/gup.c
21155--- linux-2.6.32.43/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21156+++ linux-2.6.32.43/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21157@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21158 addr = start;
21159 len = (unsigned long) nr_pages << PAGE_SHIFT;
21160 end = start + len;
21161- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21162+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21163 (void __user *)start, len)))
21164 return 0;
21165
21166diff -urNp linux-2.6.32.43/arch/x86/mm/highmem_32.c linux-2.6.32.43/arch/x86/mm/highmem_32.c
21167--- linux-2.6.32.43/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21168+++ linux-2.6.32.43/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21169@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21170 idx = type + KM_TYPE_NR*smp_processor_id();
21171 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21172 BUG_ON(!pte_none(*(kmap_pte-idx)));
21173+
21174+ pax_open_kernel();
21175 set_pte(kmap_pte-idx, mk_pte(page, prot));
21176+ pax_close_kernel();
21177
21178 return (void *)vaddr;
21179 }
21180diff -urNp linux-2.6.32.43/arch/x86/mm/hugetlbpage.c linux-2.6.32.43/arch/x86/mm/hugetlbpage.c
21181--- linux-2.6.32.43/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21182+++ linux-2.6.32.43/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21183@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21184 struct hstate *h = hstate_file(file);
21185 struct mm_struct *mm = current->mm;
21186 struct vm_area_struct *vma;
21187- unsigned long start_addr;
21188+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21189+
21190+#ifdef CONFIG_PAX_SEGMEXEC
21191+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21192+ pax_task_size = SEGMEXEC_TASK_SIZE;
21193+#endif
21194+
21195+ pax_task_size -= PAGE_SIZE;
21196
21197 if (len > mm->cached_hole_size) {
21198- start_addr = mm->free_area_cache;
21199+ start_addr = mm->free_area_cache;
21200 } else {
21201- start_addr = TASK_UNMAPPED_BASE;
21202- mm->cached_hole_size = 0;
21203+ start_addr = mm->mmap_base;
21204+ mm->cached_hole_size = 0;
21205 }
21206
21207 full_search:
21208@@ -281,26 +288,27 @@ full_search:
21209
21210 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21211 /* At this point: (!vma || addr < vma->vm_end). */
21212- if (TASK_SIZE - len < addr) {
21213+ if (pax_task_size - len < addr) {
21214 /*
21215 * Start a new search - just in case we missed
21216 * some holes.
21217 */
21218- if (start_addr != TASK_UNMAPPED_BASE) {
21219- start_addr = TASK_UNMAPPED_BASE;
21220+ if (start_addr != mm->mmap_base) {
21221+ start_addr = mm->mmap_base;
21222 mm->cached_hole_size = 0;
21223 goto full_search;
21224 }
21225 return -ENOMEM;
21226 }
21227- if (!vma || addr + len <= vma->vm_start) {
21228- mm->free_area_cache = addr + len;
21229- return addr;
21230- }
21231+ if (check_heap_stack_gap(vma, addr, len))
21232+ break;
21233 if (addr + mm->cached_hole_size < vma->vm_start)
21234 mm->cached_hole_size = vma->vm_start - addr;
21235 addr = ALIGN(vma->vm_end, huge_page_size(h));
21236 }
21237+
21238+ mm->free_area_cache = addr + len;
21239+ return addr;
21240 }
21241
21242 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21243@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21244 {
21245 struct hstate *h = hstate_file(file);
21246 struct mm_struct *mm = current->mm;
21247- struct vm_area_struct *vma, *prev_vma;
21248- unsigned long base = mm->mmap_base, addr = addr0;
21249+ struct vm_area_struct *vma;
21250+ unsigned long base = mm->mmap_base, addr;
21251 unsigned long largest_hole = mm->cached_hole_size;
21252- int first_time = 1;
21253
21254 /* don't allow allocations above current base */
21255 if (mm->free_area_cache > base)
21256@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21257 largest_hole = 0;
21258 mm->free_area_cache = base;
21259 }
21260-try_again:
21261+
21262 /* make sure it can fit in the remaining address space */
21263 if (mm->free_area_cache < len)
21264 goto fail;
21265
21266 /* either no address requested or cant fit in requested address hole */
21267- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21268+ addr = (mm->free_area_cache - len);
21269 do {
21270+ addr &= huge_page_mask(h);
21271+ vma = find_vma(mm, addr);
21272 /*
21273 * Lookup failure means no vma is above this address,
21274 * i.e. return with success:
21275- */
21276- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21277- return addr;
21278-
21279- /*
21280 * new region fits between prev_vma->vm_end and
21281 * vma->vm_start, use it:
21282 */
21283- if (addr + len <= vma->vm_start &&
21284- (!prev_vma || (addr >= prev_vma->vm_end))) {
21285+ if (check_heap_stack_gap(vma, addr, len)) {
21286 /* remember the address as a hint for next time */
21287- mm->cached_hole_size = largest_hole;
21288- return (mm->free_area_cache = addr);
21289- } else {
21290- /* pull free_area_cache down to the first hole */
21291- if (mm->free_area_cache == vma->vm_end) {
21292- mm->free_area_cache = vma->vm_start;
21293- mm->cached_hole_size = largest_hole;
21294- }
21295+ mm->cached_hole_size = largest_hole;
21296+ return (mm->free_area_cache = addr);
21297+ }
21298+ /* pull free_area_cache down to the first hole */
21299+ if (mm->free_area_cache == vma->vm_end) {
21300+ mm->free_area_cache = vma->vm_start;
21301+ mm->cached_hole_size = largest_hole;
21302 }
21303
21304 /* remember the largest hole we saw so far */
21305 if (addr + largest_hole < vma->vm_start)
21306- largest_hole = vma->vm_start - addr;
21307+ largest_hole = vma->vm_start - addr;
21308
21309 /* try just below the current vma->vm_start */
21310- addr = (vma->vm_start - len) & huge_page_mask(h);
21311- } while (len <= vma->vm_start);
21312+ addr = skip_heap_stack_gap(vma, len);
21313+ } while (!IS_ERR_VALUE(addr));
21314
21315 fail:
21316 /*
21317- * if hint left us with no space for the requested
21318- * mapping then try again:
21319- */
21320- if (first_time) {
21321- mm->free_area_cache = base;
21322- largest_hole = 0;
21323- first_time = 0;
21324- goto try_again;
21325- }
21326- /*
21327 * A failed mmap() very likely causes application failure,
21328 * so fall back to the bottom-up function here. This scenario
21329 * can happen with large stack limits and large mmap()
21330 * allocations.
21331 */
21332- mm->free_area_cache = TASK_UNMAPPED_BASE;
21333+
21334+#ifdef CONFIG_PAX_SEGMEXEC
21335+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21336+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21337+ else
21338+#endif
21339+
21340+ mm->mmap_base = TASK_UNMAPPED_BASE;
21341+
21342+#ifdef CONFIG_PAX_RANDMMAP
21343+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21344+ mm->mmap_base += mm->delta_mmap;
21345+#endif
21346+
21347+ mm->free_area_cache = mm->mmap_base;
21348 mm->cached_hole_size = ~0UL;
21349 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21350 len, pgoff, flags);
21351@@ -387,6 +393,7 @@ fail:
21352 /*
21353 * Restore the topdown base:
21354 */
21355+ mm->mmap_base = base;
21356 mm->free_area_cache = base;
21357 mm->cached_hole_size = ~0UL;
21358
21359@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21360 struct hstate *h = hstate_file(file);
21361 struct mm_struct *mm = current->mm;
21362 struct vm_area_struct *vma;
21363+ unsigned long pax_task_size = TASK_SIZE;
21364
21365 if (len & ~huge_page_mask(h))
21366 return -EINVAL;
21367- if (len > TASK_SIZE)
21368+
21369+#ifdef CONFIG_PAX_SEGMEXEC
21370+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21371+ pax_task_size = SEGMEXEC_TASK_SIZE;
21372+#endif
21373+
21374+ pax_task_size -= PAGE_SIZE;
21375+
21376+ if (len > pax_task_size)
21377 return -ENOMEM;
21378
21379 if (flags & MAP_FIXED) {
21380@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21381 if (addr) {
21382 addr = ALIGN(addr, huge_page_size(h));
21383 vma = find_vma(mm, addr);
21384- if (TASK_SIZE - len >= addr &&
21385- (!vma || addr + len <= vma->vm_start))
21386+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21387 return addr;
21388 }
21389 if (mm->get_unmapped_area == arch_get_unmapped_area)
21390diff -urNp linux-2.6.32.43/arch/x86/mm/init_32.c linux-2.6.32.43/arch/x86/mm/init_32.c
21391--- linux-2.6.32.43/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21392+++ linux-2.6.32.43/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21393@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21394 }
21395
21396 /*
21397- * Creates a middle page table and puts a pointer to it in the
21398- * given global directory entry. This only returns the gd entry
21399- * in non-PAE compilation mode, since the middle layer is folded.
21400- */
21401-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21402-{
21403- pud_t *pud;
21404- pmd_t *pmd_table;
21405-
21406-#ifdef CONFIG_X86_PAE
21407- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21408- if (after_bootmem)
21409- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21410- else
21411- pmd_table = (pmd_t *)alloc_low_page();
21412- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21413- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21414- pud = pud_offset(pgd, 0);
21415- BUG_ON(pmd_table != pmd_offset(pud, 0));
21416-
21417- return pmd_table;
21418- }
21419-#endif
21420- pud = pud_offset(pgd, 0);
21421- pmd_table = pmd_offset(pud, 0);
21422-
21423- return pmd_table;
21424-}
21425-
21426-/*
21427 * Create a page table and place a pointer to it in a middle page
21428 * directory entry:
21429 */
21430@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21431 page_table = (pte_t *)alloc_low_page();
21432
21433 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21434+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21435+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21436+#else
21437 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21438+#endif
21439 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21440 }
21441
21442 return pte_offset_kernel(pmd, 0);
21443 }
21444
21445+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21446+{
21447+ pud_t *pud;
21448+ pmd_t *pmd_table;
21449+
21450+ pud = pud_offset(pgd, 0);
21451+ pmd_table = pmd_offset(pud, 0);
21452+
21453+ return pmd_table;
21454+}
21455+
21456 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21457 {
21458 int pgd_idx = pgd_index(vaddr);
21459@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21460 int pgd_idx, pmd_idx;
21461 unsigned long vaddr;
21462 pgd_t *pgd;
21463+ pud_t *pud;
21464 pmd_t *pmd;
21465 pte_t *pte = NULL;
21466
21467@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21468 pgd = pgd_base + pgd_idx;
21469
21470 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21471- pmd = one_md_table_init(pgd);
21472- pmd = pmd + pmd_index(vaddr);
21473+ pud = pud_offset(pgd, vaddr);
21474+ pmd = pmd_offset(pud, vaddr);
21475+
21476+#ifdef CONFIG_X86_PAE
21477+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21478+#endif
21479+
21480 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21481 pmd++, pmd_idx++) {
21482 pte = page_table_kmap_check(one_page_table_init(pmd),
21483@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21484 }
21485 }
21486
21487-static inline int is_kernel_text(unsigned long addr)
21488+static inline int is_kernel_text(unsigned long start, unsigned long end)
21489 {
21490- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21491- return 1;
21492- return 0;
21493+ if ((start > ktla_ktva((unsigned long)_etext) ||
21494+ end <= ktla_ktva((unsigned long)_stext)) &&
21495+ (start > ktla_ktva((unsigned long)_einittext) ||
21496+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21497+
21498+#ifdef CONFIG_ACPI_SLEEP
21499+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21500+#endif
21501+
21502+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21503+ return 0;
21504+ return 1;
21505 }
21506
21507 /*
21508@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21509 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21510 unsigned long start_pfn, end_pfn;
21511 pgd_t *pgd_base = swapper_pg_dir;
21512- int pgd_idx, pmd_idx, pte_ofs;
21513+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21514 unsigned long pfn;
21515 pgd_t *pgd;
21516+ pud_t *pud;
21517 pmd_t *pmd;
21518 pte_t *pte;
21519 unsigned pages_2m, pages_4k;
21520@@ -278,8 +279,13 @@ repeat:
21521 pfn = start_pfn;
21522 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21523 pgd = pgd_base + pgd_idx;
21524- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21525- pmd = one_md_table_init(pgd);
21526+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21527+ pud = pud_offset(pgd, 0);
21528+ pmd = pmd_offset(pud, 0);
21529+
21530+#ifdef CONFIG_X86_PAE
21531+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21532+#endif
21533
21534 if (pfn >= end_pfn)
21535 continue;
21536@@ -291,14 +297,13 @@ repeat:
21537 #endif
21538 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21539 pmd++, pmd_idx++) {
21540- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21541+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21542
21543 /*
21544 * Map with big pages if possible, otherwise
21545 * create normal page tables:
21546 */
21547 if (use_pse) {
21548- unsigned int addr2;
21549 pgprot_t prot = PAGE_KERNEL_LARGE;
21550 /*
21551 * first pass will use the same initial
21552@@ -308,11 +313,7 @@ repeat:
21553 __pgprot(PTE_IDENT_ATTR |
21554 _PAGE_PSE);
21555
21556- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21557- PAGE_OFFSET + PAGE_SIZE-1;
21558-
21559- if (is_kernel_text(addr) ||
21560- is_kernel_text(addr2))
21561+ if (is_kernel_text(address, address + PMD_SIZE))
21562 prot = PAGE_KERNEL_LARGE_EXEC;
21563
21564 pages_2m++;
21565@@ -329,7 +330,7 @@ repeat:
21566 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21567 pte += pte_ofs;
21568 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21569- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21570+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21571 pgprot_t prot = PAGE_KERNEL;
21572 /*
21573 * first pass will use the same initial
21574@@ -337,7 +338,7 @@ repeat:
21575 */
21576 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21577
21578- if (is_kernel_text(addr))
21579+ if (is_kernel_text(address, address + PAGE_SIZE))
21580 prot = PAGE_KERNEL_EXEC;
21581
21582 pages_4k++;
21583@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21584
21585 pud = pud_offset(pgd, va);
21586 pmd = pmd_offset(pud, va);
21587- if (!pmd_present(*pmd))
21588+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
21589 break;
21590
21591 pte = pte_offset_kernel(pmd, va);
21592@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21593
21594 static void __init pagetable_init(void)
21595 {
21596- pgd_t *pgd_base = swapper_pg_dir;
21597-
21598- permanent_kmaps_init(pgd_base);
21599+ permanent_kmaps_init(swapper_pg_dir);
21600 }
21601
21602 #ifdef CONFIG_ACPI_SLEEP
21603@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21604 * ACPI suspend needs this for resume, because things like the intel-agp
21605 * driver might have split up a kernel 4MB mapping.
21606 */
21607-char swsusp_pg_dir[PAGE_SIZE]
21608+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21609 __attribute__ ((aligned(PAGE_SIZE)));
21610
21611 static inline void save_pg_dir(void)
21612 {
21613- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21614+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21615 }
21616 #else /* !CONFIG_ACPI_SLEEP */
21617 static inline void save_pg_dir(void)
21618@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21619 flush_tlb_all();
21620 }
21621
21622-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21623+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21624 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21625
21626 /* user-defined highmem size */
21627@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21628 * Initialize the boot-time allocator (with low memory only):
21629 */
21630 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21631- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21632+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21633 PAGE_SIZE);
21634 if (bootmap == -1L)
21635 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21636@@ -864,6 +863,12 @@ void __init mem_init(void)
21637
21638 pci_iommu_alloc();
21639
21640+#ifdef CONFIG_PAX_PER_CPU_PGD
21641+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21642+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21643+ KERNEL_PGD_PTRS);
21644+#endif
21645+
21646 #ifdef CONFIG_FLATMEM
21647 BUG_ON(!mem_map);
21648 #endif
21649@@ -881,7 +886,7 @@ void __init mem_init(void)
21650 set_highmem_pages_init();
21651
21652 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21653- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21654+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21655 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21656
21657 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21658@@ -923,10 +928,10 @@ void __init mem_init(void)
21659 ((unsigned long)&__init_end -
21660 (unsigned long)&__init_begin) >> 10,
21661
21662- (unsigned long)&_etext, (unsigned long)&_edata,
21663- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21664+ (unsigned long)&_sdata, (unsigned long)&_edata,
21665+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21666
21667- (unsigned long)&_text, (unsigned long)&_etext,
21668+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21669 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21670
21671 /*
21672@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21673 if (!kernel_set_to_readonly)
21674 return;
21675
21676+ start = ktla_ktva(start);
21677 pr_debug("Set kernel text: %lx - %lx for read write\n",
21678 start, start+size);
21679
21680@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21681 if (!kernel_set_to_readonly)
21682 return;
21683
21684+ start = ktla_ktva(start);
21685 pr_debug("Set kernel text: %lx - %lx for read only\n",
21686 start, start+size);
21687
21688@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21689 unsigned long start = PFN_ALIGN(_text);
21690 unsigned long size = PFN_ALIGN(_etext) - start;
21691
21692+ start = ktla_ktva(start);
21693 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21694 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21695 size >> 10);
21696diff -urNp linux-2.6.32.43/arch/x86/mm/init_64.c linux-2.6.32.43/arch/x86/mm/init_64.c
21697--- linux-2.6.32.43/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21698+++ linux-2.6.32.43/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21699@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21700 pmd = fill_pmd(pud, vaddr);
21701 pte = fill_pte(pmd, vaddr);
21702
21703+ pax_open_kernel();
21704 set_pte(pte, new_pte);
21705+ pax_close_kernel();
21706
21707 /*
21708 * It's enough to flush this one mapping.
21709@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21710 pgd = pgd_offset_k((unsigned long)__va(phys));
21711 if (pgd_none(*pgd)) {
21712 pud = (pud_t *) spp_getpage();
21713- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21714- _PAGE_USER));
21715+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21716 }
21717 pud = pud_offset(pgd, (unsigned long)__va(phys));
21718 if (pud_none(*pud)) {
21719 pmd = (pmd_t *) spp_getpage();
21720- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21721- _PAGE_USER));
21722+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21723 }
21724 pmd = pmd_offset(pud, phys);
21725 BUG_ON(!pmd_none(*pmd));
21726@@ -675,6 +675,12 @@ void __init mem_init(void)
21727
21728 pci_iommu_alloc();
21729
21730+#ifdef CONFIG_PAX_PER_CPU_PGD
21731+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21732+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21733+ KERNEL_PGD_PTRS);
21734+#endif
21735+
21736 /* clear_bss() already clear the empty_zero_page */
21737
21738 reservedpages = 0;
21739@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21740 static struct vm_area_struct gate_vma = {
21741 .vm_start = VSYSCALL_START,
21742 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21743- .vm_page_prot = PAGE_READONLY_EXEC,
21744- .vm_flags = VM_READ | VM_EXEC
21745+ .vm_page_prot = PAGE_READONLY,
21746+ .vm_flags = VM_READ
21747 };
21748
21749 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21750@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21751
21752 const char *arch_vma_name(struct vm_area_struct *vma)
21753 {
21754- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21755+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21756 return "[vdso]";
21757 if (vma == &gate_vma)
21758 return "[vsyscall]";
21759diff -urNp linux-2.6.32.43/arch/x86/mm/init.c linux-2.6.32.43/arch/x86/mm/init.c
21760--- linux-2.6.32.43/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21761+++ linux-2.6.32.43/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21762@@ -69,11 +69,7 @@ static void __init find_early_table_spac
21763 * cause a hotspot and fill up ZONE_DMA. The page tables
21764 * need roughly 0.5KB per GB.
21765 */
21766-#ifdef CONFIG_X86_32
21767- start = 0x7000;
21768-#else
21769- start = 0x8000;
21770-#endif
21771+ start = 0x100000;
21772 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21773 tables, PAGE_SIZE);
21774 if (e820_table_start == -1UL)
21775@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21776 #endif
21777
21778 set_nx();
21779- if (nx_enabled)
21780+ if (nx_enabled && cpu_has_nx)
21781 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
21782
21783 /* Enable PSE if available */
21784@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
21785 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
21786 * mmio resources as well as potential bios/acpi data regions.
21787 */
21788+
21789 int devmem_is_allowed(unsigned long pagenr)
21790 {
21791+#ifdef CONFIG_GRKERNSEC_KMEM
21792+ /* allow BDA */
21793+ if (!pagenr)
21794+ return 1;
21795+ /* allow EBDA */
21796+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
21797+ return 1;
21798+ /* allow ISA/video mem */
21799+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21800+ return 1;
21801+ /* throw out everything else below 1MB */
21802+ if (pagenr <= 256)
21803+ return 0;
21804+#else
21805 if (pagenr <= 256)
21806 return 1;
21807+#endif
21808+
21809 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
21810 return 0;
21811 if (!page_is_ram(pagenr))
21812@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
21813
21814 void free_initmem(void)
21815 {
21816+
21817+#ifdef CONFIG_PAX_KERNEXEC
21818+#ifdef CONFIG_X86_32
21819+ /* PaX: limit KERNEL_CS to actual size */
21820+ unsigned long addr, limit;
21821+ struct desc_struct d;
21822+ int cpu;
21823+
21824+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
21825+ limit = (limit - 1UL) >> PAGE_SHIFT;
21826+
21827+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
21828+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
21829+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
21830+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
21831+ }
21832+
21833+ /* PaX: make KERNEL_CS read-only */
21834+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
21835+ if (!paravirt_enabled())
21836+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
21837+/*
21838+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
21839+ pgd = pgd_offset_k(addr);
21840+ pud = pud_offset(pgd, addr);
21841+ pmd = pmd_offset(pud, addr);
21842+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21843+ }
21844+*/
21845+#ifdef CONFIG_X86_PAE
21846+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
21847+/*
21848+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
21849+ pgd = pgd_offset_k(addr);
21850+ pud = pud_offset(pgd, addr);
21851+ pmd = pmd_offset(pud, addr);
21852+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21853+ }
21854+*/
21855+#endif
21856+
21857+#ifdef CONFIG_MODULES
21858+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
21859+#endif
21860+
21861+#else
21862+ pgd_t *pgd;
21863+ pud_t *pud;
21864+ pmd_t *pmd;
21865+ unsigned long addr, end;
21866+
21867+ /* PaX: make kernel code/rodata read-only, rest non-executable */
21868+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
21869+ pgd = pgd_offset_k(addr);
21870+ pud = pud_offset(pgd, addr);
21871+ pmd = pmd_offset(pud, addr);
21872+ if (!pmd_present(*pmd))
21873+ continue;
21874+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
21875+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21876+ else
21877+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
21878+ }
21879+
21880+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
21881+ end = addr + KERNEL_IMAGE_SIZE;
21882+ for (; addr < end; addr += PMD_SIZE) {
21883+ pgd = pgd_offset_k(addr);
21884+ pud = pud_offset(pgd, addr);
21885+ pmd = pmd_offset(pud, addr);
21886+ if (!pmd_present(*pmd))
21887+ continue;
21888+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
21889+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
21890+ }
21891+#endif
21892+
21893+ flush_tlb_all();
21894+#endif
21895+
21896 free_init_pages("unused kernel memory",
21897 (unsigned long)(&__init_begin),
21898 (unsigned long)(&__init_end));
21899diff -urNp linux-2.6.32.43/arch/x86/mm/iomap_32.c linux-2.6.32.43/arch/x86/mm/iomap_32.c
21900--- linux-2.6.32.43/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
21901+++ linux-2.6.32.43/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
21902@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
21903 debug_kmap_atomic(type);
21904 idx = type + KM_TYPE_NR * smp_processor_id();
21905 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21906+
21907+ pax_open_kernel();
21908 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
21909+ pax_close_kernel();
21910+
21911 arch_flush_lazy_mmu_mode();
21912
21913 return (void *)vaddr;
21914diff -urNp linux-2.6.32.43/arch/x86/mm/ioremap.c linux-2.6.32.43/arch/x86/mm/ioremap.c
21915--- linux-2.6.32.43/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
21916+++ linux-2.6.32.43/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
21917@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
21918 * Second special case: Some BIOSen report the PC BIOS
21919 * area (640->1Mb) as ram even though it is not.
21920 */
21921- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
21922- pagenr < (BIOS_END >> PAGE_SHIFT))
21923+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
21924+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
21925 return 0;
21926
21927 for (i = 0; i < e820.nr_map; i++) {
21928@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
21929 /*
21930 * Don't allow anybody to remap normal RAM that we're using..
21931 */
21932- for (pfn = phys_addr >> PAGE_SHIFT;
21933- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
21934- pfn++) {
21935-
21936+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
21937 int is_ram = page_is_ram(pfn);
21938
21939- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
21940+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
21941 return NULL;
21942 WARN_ON_ONCE(is_ram);
21943 }
21944@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
21945 early_param("early_ioremap_debug", early_ioremap_debug_setup);
21946
21947 static __initdata int after_paging_init;
21948-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
21949+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
21950
21951 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
21952 {
21953@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
21954 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
21955
21956 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
21957- memset(bm_pte, 0, sizeof(bm_pte));
21958- pmd_populate_kernel(&init_mm, pmd, bm_pte);
21959+ pmd_populate_user(&init_mm, pmd, bm_pte);
21960
21961 /*
21962 * The boot-ioremap range spans multiple pmds, for which
21963diff -urNp linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c
21964--- linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
21965+++ linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
21966@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
21967 * memory (e.g. tracked pages)? For now, we need this to avoid
21968 * invoking kmemcheck for PnP BIOS calls.
21969 */
21970- if (regs->flags & X86_VM_MASK)
21971+ if (v8086_mode(regs))
21972 return false;
21973- if (regs->cs != __KERNEL_CS)
21974+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
21975 return false;
21976
21977 pte = kmemcheck_pte_lookup(address);
21978diff -urNp linux-2.6.32.43/arch/x86/mm/mmap.c linux-2.6.32.43/arch/x86/mm/mmap.c
21979--- linux-2.6.32.43/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
21980+++ linux-2.6.32.43/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
21981@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
21982 * Leave an at least ~128 MB hole with possible stack randomization.
21983 */
21984 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
21985-#define MAX_GAP (TASK_SIZE/6*5)
21986+#define MAX_GAP (pax_task_size/6*5)
21987
21988 /*
21989 * True on X86_32 or when emulating IA32 on X86_64
21990@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
21991 return rnd << PAGE_SHIFT;
21992 }
21993
21994-static unsigned long mmap_base(void)
21995+static unsigned long mmap_base(struct mm_struct *mm)
21996 {
21997 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
21998+ unsigned long pax_task_size = TASK_SIZE;
21999+
22000+#ifdef CONFIG_PAX_SEGMEXEC
22001+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22002+ pax_task_size = SEGMEXEC_TASK_SIZE;
22003+#endif
22004
22005 if (gap < MIN_GAP)
22006 gap = MIN_GAP;
22007 else if (gap > MAX_GAP)
22008 gap = MAX_GAP;
22009
22010- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22011+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22012 }
22013
22014 /*
22015 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22016 * does, but not when emulating X86_32
22017 */
22018-static unsigned long mmap_legacy_base(void)
22019+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22020 {
22021- if (mmap_is_ia32())
22022+ if (mmap_is_ia32()) {
22023+
22024+#ifdef CONFIG_PAX_SEGMEXEC
22025+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22026+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22027+ else
22028+#endif
22029+
22030 return TASK_UNMAPPED_BASE;
22031- else
22032+ } else
22033 return TASK_UNMAPPED_BASE + mmap_rnd();
22034 }
22035
22036@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22037 void arch_pick_mmap_layout(struct mm_struct *mm)
22038 {
22039 if (mmap_is_legacy()) {
22040- mm->mmap_base = mmap_legacy_base();
22041+ mm->mmap_base = mmap_legacy_base(mm);
22042+
22043+#ifdef CONFIG_PAX_RANDMMAP
22044+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22045+ mm->mmap_base += mm->delta_mmap;
22046+#endif
22047+
22048 mm->get_unmapped_area = arch_get_unmapped_area;
22049 mm->unmap_area = arch_unmap_area;
22050 } else {
22051- mm->mmap_base = mmap_base();
22052+ mm->mmap_base = mmap_base(mm);
22053+
22054+#ifdef CONFIG_PAX_RANDMMAP
22055+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22056+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22057+#endif
22058+
22059 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22060 mm->unmap_area = arch_unmap_area_topdown;
22061 }
22062diff -urNp linux-2.6.32.43/arch/x86/mm/mmio-mod.c linux-2.6.32.43/arch/x86/mm/mmio-mod.c
22063--- linux-2.6.32.43/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22064+++ linux-2.6.32.43/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22065@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22066 break;
22067 default:
22068 {
22069- unsigned char *ip = (unsigned char *)instptr;
22070+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22071 my_trace->opcode = MMIO_UNKNOWN_OP;
22072 my_trace->width = 0;
22073 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22074@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22075 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22076 void __iomem *addr)
22077 {
22078- static atomic_t next_id;
22079+ static atomic_unchecked_t next_id;
22080 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22081 /* These are page-unaligned. */
22082 struct mmiotrace_map map = {
22083@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22084 .private = trace
22085 },
22086 .phys = offset,
22087- .id = atomic_inc_return(&next_id)
22088+ .id = atomic_inc_return_unchecked(&next_id)
22089 };
22090 map.map_id = trace->id;
22091
22092diff -urNp linux-2.6.32.43/arch/x86/mm/numa_32.c linux-2.6.32.43/arch/x86/mm/numa_32.c
22093--- linux-2.6.32.43/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22094+++ linux-2.6.32.43/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22095@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22096 }
22097 #endif
22098
22099-extern unsigned long find_max_low_pfn(void);
22100 extern unsigned long highend_pfn, highstart_pfn;
22101
22102 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22103diff -urNp linux-2.6.32.43/arch/x86/mm/pageattr.c linux-2.6.32.43/arch/x86/mm/pageattr.c
22104--- linux-2.6.32.43/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22105+++ linux-2.6.32.43/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22106@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22107 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22108 */
22109 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22110- pgprot_val(forbidden) |= _PAGE_NX;
22111+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22112
22113 /*
22114 * The kernel text needs to be executable for obvious reasons
22115 * Does not cover __inittext since that is gone later on. On
22116 * 64bit we do not enforce !NX on the low mapping
22117 */
22118- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22119- pgprot_val(forbidden) |= _PAGE_NX;
22120+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22121+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22122
22123+#ifdef CONFIG_DEBUG_RODATA
22124 /*
22125 * The .rodata section needs to be read-only. Using the pfn
22126 * catches all aliases.
22127@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22128 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22129 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22130 pgprot_val(forbidden) |= _PAGE_RW;
22131+#endif
22132+
22133+#ifdef CONFIG_PAX_KERNEXEC
22134+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22135+ pgprot_val(forbidden) |= _PAGE_RW;
22136+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22137+ }
22138+#endif
22139
22140 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22141
22142@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22143 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22144 {
22145 /* change init_mm */
22146+ pax_open_kernel();
22147 set_pte_atomic(kpte, pte);
22148+
22149 #ifdef CONFIG_X86_32
22150 if (!SHARED_KERNEL_PMD) {
22151+
22152+#ifdef CONFIG_PAX_PER_CPU_PGD
22153+ unsigned long cpu;
22154+#else
22155 struct page *page;
22156+#endif
22157
22158+#ifdef CONFIG_PAX_PER_CPU_PGD
22159+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22160+ pgd_t *pgd = get_cpu_pgd(cpu);
22161+#else
22162 list_for_each_entry(page, &pgd_list, lru) {
22163- pgd_t *pgd;
22164+ pgd_t *pgd = (pgd_t *)page_address(page);
22165+#endif
22166+
22167 pud_t *pud;
22168 pmd_t *pmd;
22169
22170- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22171+ pgd += pgd_index(address);
22172 pud = pud_offset(pgd, address);
22173 pmd = pmd_offset(pud, address);
22174 set_pte_atomic((pte_t *)pmd, pte);
22175 }
22176 }
22177 #endif
22178+ pax_close_kernel();
22179 }
22180
22181 static int
22182diff -urNp linux-2.6.32.43/arch/x86/mm/pageattr-test.c linux-2.6.32.43/arch/x86/mm/pageattr-test.c
22183--- linux-2.6.32.43/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22184+++ linux-2.6.32.43/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22185@@ -36,7 +36,7 @@ enum {
22186
22187 static int pte_testbit(pte_t pte)
22188 {
22189- return pte_flags(pte) & _PAGE_UNUSED1;
22190+ return pte_flags(pte) & _PAGE_CPA_TEST;
22191 }
22192
22193 struct split_state {
22194diff -urNp linux-2.6.32.43/arch/x86/mm/pat.c linux-2.6.32.43/arch/x86/mm/pat.c
22195--- linux-2.6.32.43/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22196+++ linux-2.6.32.43/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22197@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22198
22199 conflict:
22200 printk(KERN_INFO "%s:%d conflicting memory types "
22201- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22202+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22203 new->end, cattr_name(new->type), cattr_name(entry->type));
22204 return -EBUSY;
22205 }
22206@@ -559,7 +559,7 @@ unlock_ret:
22207
22208 if (err) {
22209 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22210- current->comm, current->pid, start, end);
22211+ current->comm, task_pid_nr(current), start, end);
22212 }
22213
22214 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22215@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22216 while (cursor < to) {
22217 if (!devmem_is_allowed(pfn)) {
22218 printk(KERN_INFO
22219- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22220- current->comm, from, to);
22221+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22222+ current->comm, from, to, cursor);
22223 return 0;
22224 }
22225 cursor += PAGE_SIZE;
22226@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22227 printk(KERN_INFO
22228 "%s:%d ioremap_change_attr failed %s "
22229 "for %Lx-%Lx\n",
22230- current->comm, current->pid,
22231+ current->comm, task_pid_nr(current),
22232 cattr_name(flags),
22233 base, (unsigned long long)(base + size));
22234 return -EINVAL;
22235@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22236 free_memtype(paddr, paddr + size);
22237 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22238 " for %Lx-%Lx, got %s\n",
22239- current->comm, current->pid,
22240+ current->comm, task_pid_nr(current),
22241 cattr_name(want_flags),
22242 (unsigned long long)paddr,
22243 (unsigned long long)(paddr + size),
22244diff -urNp linux-2.6.32.43/arch/x86/mm/pf_in.c linux-2.6.32.43/arch/x86/mm/pf_in.c
22245--- linux-2.6.32.43/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22246+++ linux-2.6.32.43/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22247@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22248 int i;
22249 enum reason_type rv = OTHERS;
22250
22251- p = (unsigned char *)ins_addr;
22252+ p = (unsigned char *)ktla_ktva(ins_addr);
22253 p += skip_prefix(p, &prf);
22254 p += get_opcode(p, &opcode);
22255
22256@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22257 struct prefix_bits prf;
22258 int i;
22259
22260- p = (unsigned char *)ins_addr;
22261+ p = (unsigned char *)ktla_ktva(ins_addr);
22262 p += skip_prefix(p, &prf);
22263 p += get_opcode(p, &opcode);
22264
22265@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22266 struct prefix_bits prf;
22267 int i;
22268
22269- p = (unsigned char *)ins_addr;
22270+ p = (unsigned char *)ktla_ktva(ins_addr);
22271 p += skip_prefix(p, &prf);
22272 p += get_opcode(p, &opcode);
22273
22274@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22275 int i;
22276 unsigned long rv;
22277
22278- p = (unsigned char *)ins_addr;
22279+ p = (unsigned char *)ktla_ktva(ins_addr);
22280 p += skip_prefix(p, &prf);
22281 p += get_opcode(p, &opcode);
22282 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22283@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22284 int i;
22285 unsigned long rv;
22286
22287- p = (unsigned char *)ins_addr;
22288+ p = (unsigned char *)ktla_ktva(ins_addr);
22289 p += skip_prefix(p, &prf);
22290 p += get_opcode(p, &opcode);
22291 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22292diff -urNp linux-2.6.32.43/arch/x86/mm/pgtable_32.c linux-2.6.32.43/arch/x86/mm/pgtable_32.c
22293--- linux-2.6.32.43/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22294+++ linux-2.6.32.43/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22295@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22296 return;
22297 }
22298 pte = pte_offset_kernel(pmd, vaddr);
22299+
22300+ pax_open_kernel();
22301 if (pte_val(pteval))
22302 set_pte_at(&init_mm, vaddr, pte, pteval);
22303 else
22304 pte_clear(&init_mm, vaddr, pte);
22305+ pax_close_kernel();
22306
22307 /*
22308 * It's enough to flush this one mapping.
22309diff -urNp linux-2.6.32.43/arch/x86/mm/pgtable.c linux-2.6.32.43/arch/x86/mm/pgtable.c
22310--- linux-2.6.32.43/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22311+++ linux-2.6.32.43/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22312@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22313 list_del(&page->lru);
22314 }
22315
22316-#define UNSHARED_PTRS_PER_PGD \
22317- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22318+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22319+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22320
22321+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22322+{
22323+ while (count--)
22324+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22325+}
22326+#endif
22327+
22328+#ifdef CONFIG_PAX_PER_CPU_PGD
22329+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22330+{
22331+ while (count--)
22332+
22333+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22334+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22335+#else
22336+ *dst++ = *src++;
22337+#endif
22338+
22339+}
22340+#endif
22341+
22342+#ifdef CONFIG_X86_64
22343+#define pxd_t pud_t
22344+#define pyd_t pgd_t
22345+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22346+#define pxd_free(mm, pud) pud_free((mm), (pud))
22347+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22348+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22349+#define PYD_SIZE PGDIR_SIZE
22350+#else
22351+#define pxd_t pmd_t
22352+#define pyd_t pud_t
22353+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22354+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22355+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22356+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22357+#define PYD_SIZE PUD_SIZE
22358+#endif
22359+
22360+#ifdef CONFIG_PAX_PER_CPU_PGD
22361+static inline void pgd_ctor(pgd_t *pgd) {}
22362+static inline void pgd_dtor(pgd_t *pgd) {}
22363+#else
22364 static void pgd_ctor(pgd_t *pgd)
22365 {
22366 /* If the pgd points to a shared pagetable level (either the
22367@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22368 pgd_list_del(pgd);
22369 spin_unlock_irqrestore(&pgd_lock, flags);
22370 }
22371+#endif
22372
22373 /*
22374 * List of all pgd's needed for non-PAE so it can invalidate entries
22375@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22376 * -- wli
22377 */
22378
22379-#ifdef CONFIG_X86_PAE
22380+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22381 /*
22382 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22383 * updating the top-level pagetable entries to guarantee the
22384@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22385 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22386 * and initialize the kernel pmds here.
22387 */
22388-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22389+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22390
22391 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22392 {
22393@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22394 */
22395 flush_tlb_mm(mm);
22396 }
22397+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22398+#define PREALLOCATED_PXDS USER_PGD_PTRS
22399 #else /* !CONFIG_X86_PAE */
22400
22401 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22402-#define PREALLOCATED_PMDS 0
22403+#define PREALLOCATED_PXDS 0
22404
22405 #endif /* CONFIG_X86_PAE */
22406
22407-static void free_pmds(pmd_t *pmds[])
22408+static void free_pxds(pxd_t *pxds[])
22409 {
22410 int i;
22411
22412- for(i = 0; i < PREALLOCATED_PMDS; i++)
22413- if (pmds[i])
22414- free_page((unsigned long)pmds[i]);
22415+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22416+ if (pxds[i])
22417+ free_page((unsigned long)pxds[i]);
22418 }
22419
22420-static int preallocate_pmds(pmd_t *pmds[])
22421+static int preallocate_pxds(pxd_t *pxds[])
22422 {
22423 int i;
22424 bool failed = false;
22425
22426- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22427- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22428- if (pmd == NULL)
22429+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22430+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22431+ if (pxd == NULL)
22432 failed = true;
22433- pmds[i] = pmd;
22434+ pxds[i] = pxd;
22435 }
22436
22437 if (failed) {
22438- free_pmds(pmds);
22439+ free_pxds(pxds);
22440 return -ENOMEM;
22441 }
22442
22443@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22444 * preallocate which never got a corresponding vma will need to be
22445 * freed manually.
22446 */
22447-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22448+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22449 {
22450 int i;
22451
22452- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22453+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22454 pgd_t pgd = pgdp[i];
22455
22456 if (pgd_val(pgd) != 0) {
22457- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22458+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22459
22460- pgdp[i] = native_make_pgd(0);
22461+ set_pgd(pgdp + i, native_make_pgd(0));
22462
22463- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22464- pmd_free(mm, pmd);
22465+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22466+ pxd_free(mm, pxd);
22467 }
22468 }
22469 }
22470
22471-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22472+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22473 {
22474- pud_t *pud;
22475+ pyd_t *pyd;
22476 unsigned long addr;
22477 int i;
22478
22479- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22480+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22481 return;
22482
22483- pud = pud_offset(pgd, 0);
22484+#ifdef CONFIG_X86_64
22485+ pyd = pyd_offset(mm, 0L);
22486+#else
22487+ pyd = pyd_offset(pgd, 0L);
22488+#endif
22489
22490- for (addr = i = 0; i < PREALLOCATED_PMDS;
22491- i++, pud++, addr += PUD_SIZE) {
22492- pmd_t *pmd = pmds[i];
22493+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22494+ i++, pyd++, addr += PYD_SIZE) {
22495+ pxd_t *pxd = pxds[i];
22496
22497 if (i >= KERNEL_PGD_BOUNDARY)
22498- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22499- sizeof(pmd_t) * PTRS_PER_PMD);
22500+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22501+ sizeof(pxd_t) * PTRS_PER_PMD);
22502
22503- pud_populate(mm, pud, pmd);
22504+ pyd_populate(mm, pyd, pxd);
22505 }
22506 }
22507
22508 pgd_t *pgd_alloc(struct mm_struct *mm)
22509 {
22510 pgd_t *pgd;
22511- pmd_t *pmds[PREALLOCATED_PMDS];
22512+ pxd_t *pxds[PREALLOCATED_PXDS];
22513+
22514 unsigned long flags;
22515
22516 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22517@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22518
22519 mm->pgd = pgd;
22520
22521- if (preallocate_pmds(pmds) != 0)
22522+ if (preallocate_pxds(pxds) != 0)
22523 goto out_free_pgd;
22524
22525 if (paravirt_pgd_alloc(mm) != 0)
22526- goto out_free_pmds;
22527+ goto out_free_pxds;
22528
22529 /*
22530 * Make sure that pre-populating the pmds is atomic with
22531@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22532 spin_lock_irqsave(&pgd_lock, flags);
22533
22534 pgd_ctor(pgd);
22535- pgd_prepopulate_pmd(mm, pgd, pmds);
22536+ pgd_prepopulate_pxd(mm, pgd, pxds);
22537
22538 spin_unlock_irqrestore(&pgd_lock, flags);
22539
22540 return pgd;
22541
22542-out_free_pmds:
22543- free_pmds(pmds);
22544+out_free_pxds:
22545+ free_pxds(pxds);
22546 out_free_pgd:
22547 free_page((unsigned long)pgd);
22548 out:
22549@@ -287,7 +338,7 @@ out:
22550
22551 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22552 {
22553- pgd_mop_up_pmds(mm, pgd);
22554+ pgd_mop_up_pxds(mm, pgd);
22555 pgd_dtor(pgd);
22556 paravirt_pgd_free(mm, pgd);
22557 free_page((unsigned long)pgd);
22558diff -urNp linux-2.6.32.43/arch/x86/mm/setup_nx.c linux-2.6.32.43/arch/x86/mm/setup_nx.c
22559--- linux-2.6.32.43/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22560+++ linux-2.6.32.43/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22561@@ -4,11 +4,10 @@
22562
22563 #include <asm/pgtable.h>
22564
22565+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22566 int nx_enabled;
22567
22568-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22569-static int disable_nx __cpuinitdata;
22570-
22571+#ifndef CONFIG_PAX_PAGEEXEC
22572 /*
22573 * noexec = on|off
22574 *
22575@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22576 if (!str)
22577 return -EINVAL;
22578 if (!strncmp(str, "on", 2)) {
22579- __supported_pte_mask |= _PAGE_NX;
22580- disable_nx = 0;
22581+ nx_enabled = 1;
22582 } else if (!strncmp(str, "off", 3)) {
22583- disable_nx = 1;
22584- __supported_pte_mask &= ~_PAGE_NX;
22585+ nx_enabled = 0;
22586 }
22587 return 0;
22588 }
22589 early_param("noexec", noexec_setup);
22590 #endif
22591+#endif
22592
22593 #ifdef CONFIG_X86_PAE
22594 void __init set_nx(void)
22595 {
22596- unsigned int v[4], l, h;
22597+ if (!nx_enabled && cpu_has_nx) {
22598+ unsigned l, h;
22599
22600- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22601- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22602-
22603- if ((v[3] & (1 << 20)) && !disable_nx) {
22604- rdmsr(MSR_EFER, l, h);
22605- l |= EFER_NX;
22606- wrmsr(MSR_EFER, l, h);
22607- nx_enabled = 1;
22608- __supported_pte_mask |= _PAGE_NX;
22609- }
22610+ __supported_pte_mask &= ~_PAGE_NX;
22611+ rdmsr(MSR_EFER, l, h);
22612+ l &= ~EFER_NX;
22613+ wrmsr(MSR_EFER, l, h);
22614 }
22615 }
22616 #else
22617@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22618 unsigned long efer;
22619
22620 rdmsrl(MSR_EFER, efer);
22621- if (!(efer & EFER_NX) || disable_nx)
22622+ if (!(efer & EFER_NX) || !nx_enabled)
22623 __supported_pte_mask &= ~_PAGE_NX;
22624 }
22625 #endif
22626diff -urNp linux-2.6.32.43/arch/x86/mm/tlb.c linux-2.6.32.43/arch/x86/mm/tlb.c
22627--- linux-2.6.32.43/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22628+++ linux-2.6.32.43/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22629@@ -61,7 +61,11 @@ void leave_mm(int cpu)
22630 BUG();
22631 cpumask_clear_cpu(cpu,
22632 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22633+
22634+#ifndef CONFIG_PAX_PER_CPU_PGD
22635 load_cr3(swapper_pg_dir);
22636+#endif
22637+
22638 }
22639 EXPORT_SYMBOL_GPL(leave_mm);
22640
22641diff -urNp linux-2.6.32.43/arch/x86/oprofile/backtrace.c linux-2.6.32.43/arch/x86/oprofile/backtrace.c
22642--- linux-2.6.32.43/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22643+++ linux-2.6.32.43/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22644@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22645 struct frame_head bufhead[2];
22646
22647 /* Also check accessibility of one struct frame_head beyond */
22648- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22649+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22650 return NULL;
22651 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22652 return NULL;
22653@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22654 {
22655 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22656
22657- if (!user_mode_vm(regs)) {
22658+ if (!user_mode(regs)) {
22659 unsigned long stack = kernel_stack_pointer(regs);
22660 if (depth)
22661 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22662diff -urNp linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c
22663--- linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22664+++ linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22665@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22666 #endif
22667 }
22668
22669-static int inline addr_increment(void)
22670+static inline int addr_increment(void)
22671 {
22672 #ifdef CONFIG_SMP
22673 return smp_num_siblings == 2 ? 2 : 1;
22674diff -urNp linux-2.6.32.43/arch/x86/pci/common.c linux-2.6.32.43/arch/x86/pci/common.c
22675--- linux-2.6.32.43/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22676+++ linux-2.6.32.43/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22677@@ -31,8 +31,8 @@ int noioapicreroute = 1;
22678 int pcibios_last_bus = -1;
22679 unsigned long pirq_table_addr;
22680 struct pci_bus *pci_root_bus;
22681-struct pci_raw_ops *raw_pci_ops;
22682-struct pci_raw_ops *raw_pci_ext_ops;
22683+const struct pci_raw_ops *raw_pci_ops;
22684+const struct pci_raw_ops *raw_pci_ext_ops;
22685
22686 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22687 int reg, int len, u32 *val)
22688diff -urNp linux-2.6.32.43/arch/x86/pci/direct.c linux-2.6.32.43/arch/x86/pci/direct.c
22689--- linux-2.6.32.43/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22690+++ linux-2.6.32.43/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22691@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22692
22693 #undef PCI_CONF1_ADDRESS
22694
22695-struct pci_raw_ops pci_direct_conf1 = {
22696+const struct pci_raw_ops pci_direct_conf1 = {
22697 .read = pci_conf1_read,
22698 .write = pci_conf1_write,
22699 };
22700@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22701
22702 #undef PCI_CONF2_ADDRESS
22703
22704-struct pci_raw_ops pci_direct_conf2 = {
22705+const struct pci_raw_ops pci_direct_conf2 = {
22706 .read = pci_conf2_read,
22707 .write = pci_conf2_write,
22708 };
22709@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22710 * This should be close to trivial, but it isn't, because there are buggy
22711 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22712 */
22713-static int __init pci_sanity_check(struct pci_raw_ops *o)
22714+static int __init pci_sanity_check(const struct pci_raw_ops *o)
22715 {
22716 u32 x = 0;
22717 int year, devfn;
22718diff -urNp linux-2.6.32.43/arch/x86/pci/mmconfig_32.c linux-2.6.32.43/arch/x86/pci/mmconfig_32.c
22719--- linux-2.6.32.43/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22720+++ linux-2.6.32.43/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22721@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22722 return 0;
22723 }
22724
22725-static struct pci_raw_ops pci_mmcfg = {
22726+static const struct pci_raw_ops pci_mmcfg = {
22727 .read = pci_mmcfg_read,
22728 .write = pci_mmcfg_write,
22729 };
22730diff -urNp linux-2.6.32.43/arch/x86/pci/mmconfig_64.c linux-2.6.32.43/arch/x86/pci/mmconfig_64.c
22731--- linux-2.6.32.43/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22732+++ linux-2.6.32.43/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22733@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22734 return 0;
22735 }
22736
22737-static struct pci_raw_ops pci_mmcfg = {
22738+static const struct pci_raw_ops pci_mmcfg = {
22739 .read = pci_mmcfg_read,
22740 .write = pci_mmcfg_write,
22741 };
22742diff -urNp linux-2.6.32.43/arch/x86/pci/numaq_32.c linux-2.6.32.43/arch/x86/pci/numaq_32.c
22743--- linux-2.6.32.43/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22744+++ linux-2.6.32.43/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22745@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22746
22747 #undef PCI_CONF1_MQ_ADDRESS
22748
22749-static struct pci_raw_ops pci_direct_conf1_mq = {
22750+static const struct pci_raw_ops pci_direct_conf1_mq = {
22751 .read = pci_conf1_mq_read,
22752 .write = pci_conf1_mq_write
22753 };
22754diff -urNp linux-2.6.32.43/arch/x86/pci/olpc.c linux-2.6.32.43/arch/x86/pci/olpc.c
22755--- linux-2.6.32.43/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22756+++ linux-2.6.32.43/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22757@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22758 return 0;
22759 }
22760
22761-static struct pci_raw_ops pci_olpc_conf = {
22762+static const struct pci_raw_ops pci_olpc_conf = {
22763 .read = pci_olpc_read,
22764 .write = pci_olpc_write,
22765 };
22766diff -urNp linux-2.6.32.43/arch/x86/pci/pcbios.c linux-2.6.32.43/arch/x86/pci/pcbios.c
22767--- linux-2.6.32.43/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22768+++ linux-2.6.32.43/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22769@@ -56,50 +56,93 @@ union bios32 {
22770 static struct {
22771 unsigned long address;
22772 unsigned short segment;
22773-} bios32_indirect = { 0, __KERNEL_CS };
22774+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22775
22776 /*
22777 * Returns the entry point for the given service, NULL on error
22778 */
22779
22780-static unsigned long bios32_service(unsigned long service)
22781+static unsigned long __devinit bios32_service(unsigned long service)
22782 {
22783 unsigned char return_code; /* %al */
22784 unsigned long address; /* %ebx */
22785 unsigned long length; /* %ecx */
22786 unsigned long entry; /* %edx */
22787 unsigned long flags;
22788+ struct desc_struct d, *gdt;
22789
22790 local_irq_save(flags);
22791- __asm__("lcall *(%%edi); cld"
22792+
22793+ gdt = get_cpu_gdt_table(smp_processor_id());
22794+
22795+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
22796+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22797+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
22798+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22799+
22800+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
22801 : "=a" (return_code),
22802 "=b" (address),
22803 "=c" (length),
22804 "=d" (entry)
22805 : "0" (service),
22806 "1" (0),
22807- "D" (&bios32_indirect));
22808+ "D" (&bios32_indirect),
22809+ "r"(__PCIBIOS_DS)
22810+ : "memory");
22811+
22812+ pax_open_kernel();
22813+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
22814+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
22815+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
22816+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
22817+ pax_close_kernel();
22818+
22819 local_irq_restore(flags);
22820
22821 switch (return_code) {
22822- case 0:
22823- return address + entry;
22824- case 0x80: /* Not present */
22825- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22826- return 0;
22827- default: /* Shouldn't happen */
22828- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22829- service, return_code);
22830+ case 0: {
22831+ int cpu;
22832+ unsigned char flags;
22833+
22834+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
22835+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
22836+ printk(KERN_WARNING "bios32_service: not valid\n");
22837 return 0;
22838+ }
22839+ address = address + PAGE_OFFSET;
22840+ length += 16UL; /* some BIOSs underreport this... */
22841+ flags = 4;
22842+ if (length >= 64*1024*1024) {
22843+ length >>= PAGE_SHIFT;
22844+ flags |= 8;
22845+ }
22846+
22847+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22848+ gdt = get_cpu_gdt_table(cpu);
22849+ pack_descriptor(&d, address, length, 0x9b, flags);
22850+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
22851+ pack_descriptor(&d, address, length, 0x93, flags);
22852+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
22853+ }
22854+ return entry;
22855+ }
22856+ case 0x80: /* Not present */
22857+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
22858+ return 0;
22859+ default: /* Shouldn't happen */
22860+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
22861+ service, return_code);
22862+ return 0;
22863 }
22864 }
22865
22866 static struct {
22867 unsigned long address;
22868 unsigned short segment;
22869-} pci_indirect = { 0, __KERNEL_CS };
22870+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
22871
22872-static int pci_bios_present;
22873+static int pci_bios_present __read_only;
22874
22875 static int __devinit check_pcibios(void)
22876 {
22877@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
22878 unsigned long flags, pcibios_entry;
22879
22880 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
22881- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
22882+ pci_indirect.address = pcibios_entry;
22883
22884 local_irq_save(flags);
22885- __asm__(
22886- "lcall *(%%edi); cld\n\t"
22887+ __asm__("movw %w6, %%ds\n\t"
22888+ "lcall *%%ss:(%%edi); cld\n\t"
22889+ "push %%ss\n\t"
22890+ "pop %%ds\n\t"
22891 "jc 1f\n\t"
22892 "xor %%ah, %%ah\n"
22893 "1:"
22894@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
22895 "=b" (ebx),
22896 "=c" (ecx)
22897 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
22898- "D" (&pci_indirect)
22899+ "D" (&pci_indirect),
22900+ "r" (__PCIBIOS_DS)
22901 : "memory");
22902 local_irq_restore(flags);
22903
22904@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
22905
22906 switch (len) {
22907 case 1:
22908- __asm__("lcall *(%%esi); cld\n\t"
22909+ __asm__("movw %w6, %%ds\n\t"
22910+ "lcall *%%ss:(%%esi); cld\n\t"
22911+ "push %%ss\n\t"
22912+ "pop %%ds\n\t"
22913 "jc 1f\n\t"
22914 "xor %%ah, %%ah\n"
22915 "1:"
22916@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
22917 : "1" (PCIBIOS_READ_CONFIG_BYTE),
22918 "b" (bx),
22919 "D" ((long)reg),
22920- "S" (&pci_indirect));
22921+ "S" (&pci_indirect),
22922+ "r" (__PCIBIOS_DS));
22923 /*
22924 * Zero-extend the result beyond 8 bits, do not trust the
22925 * BIOS having done it:
22926@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
22927 *value &= 0xff;
22928 break;
22929 case 2:
22930- __asm__("lcall *(%%esi); cld\n\t"
22931+ __asm__("movw %w6, %%ds\n\t"
22932+ "lcall *%%ss:(%%esi); cld\n\t"
22933+ "push %%ss\n\t"
22934+ "pop %%ds\n\t"
22935 "jc 1f\n\t"
22936 "xor %%ah, %%ah\n"
22937 "1:"
22938@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
22939 : "1" (PCIBIOS_READ_CONFIG_WORD),
22940 "b" (bx),
22941 "D" ((long)reg),
22942- "S" (&pci_indirect));
22943+ "S" (&pci_indirect),
22944+ "r" (__PCIBIOS_DS));
22945 /*
22946 * Zero-extend the result beyond 16 bits, do not trust the
22947 * BIOS having done it:
22948@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
22949 *value &= 0xffff;
22950 break;
22951 case 4:
22952- __asm__("lcall *(%%esi); cld\n\t"
22953+ __asm__("movw %w6, %%ds\n\t"
22954+ "lcall *%%ss:(%%esi); cld\n\t"
22955+ "push %%ss\n\t"
22956+ "pop %%ds\n\t"
22957 "jc 1f\n\t"
22958 "xor %%ah, %%ah\n"
22959 "1:"
22960@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
22961 : "1" (PCIBIOS_READ_CONFIG_DWORD),
22962 "b" (bx),
22963 "D" ((long)reg),
22964- "S" (&pci_indirect));
22965+ "S" (&pci_indirect),
22966+ "r" (__PCIBIOS_DS));
22967 break;
22968 }
22969
22970@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
22971
22972 switch (len) {
22973 case 1:
22974- __asm__("lcall *(%%esi); cld\n\t"
22975+ __asm__("movw %w6, %%ds\n\t"
22976+ "lcall *%%ss:(%%esi); cld\n\t"
22977+ "push %%ss\n\t"
22978+ "pop %%ds\n\t"
22979 "jc 1f\n\t"
22980 "xor %%ah, %%ah\n"
22981 "1:"
22982@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
22983 "c" (value),
22984 "b" (bx),
22985 "D" ((long)reg),
22986- "S" (&pci_indirect));
22987+ "S" (&pci_indirect),
22988+ "r" (__PCIBIOS_DS));
22989 break;
22990 case 2:
22991- __asm__("lcall *(%%esi); cld\n\t"
22992+ __asm__("movw %w6, %%ds\n\t"
22993+ "lcall *%%ss:(%%esi); cld\n\t"
22994+ "push %%ss\n\t"
22995+ "pop %%ds\n\t"
22996 "jc 1f\n\t"
22997 "xor %%ah, %%ah\n"
22998 "1:"
22999@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23000 "c" (value),
23001 "b" (bx),
23002 "D" ((long)reg),
23003- "S" (&pci_indirect));
23004+ "S" (&pci_indirect),
23005+ "r" (__PCIBIOS_DS));
23006 break;
23007 case 4:
23008- __asm__("lcall *(%%esi); cld\n\t"
23009+ __asm__("movw %w6, %%ds\n\t"
23010+ "lcall *%%ss:(%%esi); cld\n\t"
23011+ "push %%ss\n\t"
23012+ "pop %%ds\n\t"
23013 "jc 1f\n\t"
23014 "xor %%ah, %%ah\n"
23015 "1:"
23016@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23017 "c" (value),
23018 "b" (bx),
23019 "D" ((long)reg),
23020- "S" (&pci_indirect));
23021+ "S" (&pci_indirect),
23022+ "r" (__PCIBIOS_DS));
23023 break;
23024 }
23025
23026@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23027 * Function table for BIOS32 access
23028 */
23029
23030-static struct pci_raw_ops pci_bios_access = {
23031+static const struct pci_raw_ops pci_bios_access = {
23032 .read = pci_bios_read,
23033 .write = pci_bios_write
23034 };
23035@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23036 * Try to find PCI BIOS.
23037 */
23038
23039-static struct pci_raw_ops * __devinit pci_find_bios(void)
23040+static const struct pci_raw_ops * __devinit pci_find_bios(void)
23041 {
23042 union bios32 *check;
23043 unsigned char sum;
23044@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23045
23046 DBG("PCI: Fetching IRQ routing table... ");
23047 __asm__("push %%es\n\t"
23048+ "movw %w8, %%ds\n\t"
23049 "push %%ds\n\t"
23050 "pop %%es\n\t"
23051- "lcall *(%%esi); cld\n\t"
23052+ "lcall *%%ss:(%%esi); cld\n\t"
23053 "pop %%es\n\t"
23054+ "push %%ss\n\t"
23055+ "pop %%ds\n"
23056 "jc 1f\n\t"
23057 "xor %%ah, %%ah\n"
23058 "1:"
23059@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23060 "1" (0),
23061 "D" ((long) &opt),
23062 "S" (&pci_indirect),
23063- "m" (opt)
23064+ "m" (opt),
23065+ "r" (__PCIBIOS_DS)
23066 : "memory");
23067 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23068 if (ret & 0xff00)
23069@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23070 {
23071 int ret;
23072
23073- __asm__("lcall *(%%esi); cld\n\t"
23074+ __asm__("movw %w5, %%ds\n\t"
23075+ "lcall *%%ss:(%%esi); cld\n\t"
23076+ "push %%ss\n\t"
23077+ "pop %%ds\n"
23078 "jc 1f\n\t"
23079 "xor %%ah, %%ah\n"
23080 "1:"
23081@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23082 : "0" (PCIBIOS_SET_PCI_HW_INT),
23083 "b" ((dev->bus->number << 8) | dev->devfn),
23084 "c" ((irq << 8) | (pin + 10)),
23085- "S" (&pci_indirect));
23086+ "S" (&pci_indirect),
23087+ "r" (__PCIBIOS_DS));
23088 return !(ret & 0xff00);
23089 }
23090 EXPORT_SYMBOL(pcibios_set_irq_routing);
23091diff -urNp linux-2.6.32.43/arch/x86/power/cpu.c linux-2.6.32.43/arch/x86/power/cpu.c
23092--- linux-2.6.32.43/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23093+++ linux-2.6.32.43/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23094@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23095 static void fix_processor_context(void)
23096 {
23097 int cpu = smp_processor_id();
23098- struct tss_struct *t = &per_cpu(init_tss, cpu);
23099+ struct tss_struct *t = init_tss + cpu;
23100
23101 set_tss_desc(cpu, t); /*
23102 * This just modifies memory; should not be
23103@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23104 */
23105
23106 #ifdef CONFIG_X86_64
23107+ pax_open_kernel();
23108 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23109+ pax_close_kernel();
23110
23111 syscall_init(); /* This sets MSR_*STAR and related */
23112 #endif
23113diff -urNp linux-2.6.32.43/arch/x86/vdso/Makefile linux-2.6.32.43/arch/x86/vdso/Makefile
23114--- linux-2.6.32.43/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23115+++ linux-2.6.32.43/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23116@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23117 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23118 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23119
23120-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23121+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23122 GCOV_PROFILE := n
23123
23124 #
23125diff -urNp linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c
23126--- linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23127+++ linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23128@@ -22,24 +22,48 @@
23129 #include <asm/hpet.h>
23130 #include <asm/unistd.h>
23131 #include <asm/io.h>
23132+#include <asm/fixmap.h>
23133 #include "vextern.h"
23134
23135 #define gtod vdso_vsyscall_gtod_data
23136
23137+notrace noinline long __vdso_fallback_time(long *t)
23138+{
23139+ long secs;
23140+ asm volatile("syscall"
23141+ : "=a" (secs)
23142+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23143+ return secs;
23144+}
23145+
23146 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23147 {
23148 long ret;
23149 asm("syscall" : "=a" (ret) :
23150- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23151+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23152 return ret;
23153 }
23154
23155+notrace static inline cycle_t __vdso_vread_hpet(void)
23156+{
23157+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23158+}
23159+
23160+notrace static inline cycle_t __vdso_vread_tsc(void)
23161+{
23162+ cycle_t ret = (cycle_t)vget_cycles();
23163+
23164+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23165+}
23166+
23167 notrace static inline long vgetns(void)
23168 {
23169 long v;
23170- cycles_t (*vread)(void);
23171- vread = gtod->clock.vread;
23172- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23173+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23174+ v = __vdso_vread_tsc();
23175+ else
23176+ v = __vdso_vread_hpet();
23177+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23178 return (v * gtod->clock.mult) >> gtod->clock.shift;
23179 }
23180
23181@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23182
23183 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23184 {
23185- if (likely(gtod->sysctl_enabled))
23186+ if (likely(gtod->sysctl_enabled &&
23187+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23188+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23189 switch (clock) {
23190 case CLOCK_REALTIME:
23191 if (likely(gtod->clock.vread))
23192@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23193 int clock_gettime(clockid_t, struct timespec *)
23194 __attribute__((weak, alias("__vdso_clock_gettime")));
23195
23196-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23197+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23198 {
23199 long ret;
23200- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23201+ asm("syscall" : "=a" (ret) :
23202+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23203+ return ret;
23204+}
23205+
23206+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23207+{
23208+ if (likely(gtod->sysctl_enabled &&
23209+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23210+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23211+ {
23212 if (likely(tv != NULL)) {
23213 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23214 offsetof(struct timespec, tv_nsec) ||
23215@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23216 }
23217 return 0;
23218 }
23219- asm("syscall" : "=a" (ret) :
23220- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23221- return ret;
23222+ return __vdso_fallback_gettimeofday(tv, tz);
23223 }
23224 int gettimeofday(struct timeval *, struct timezone *)
23225 __attribute__((weak, alias("__vdso_gettimeofday")));
23226diff -urNp linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c
23227--- linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23228+++ linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23229@@ -25,6 +25,7 @@
23230 #include <asm/tlbflush.h>
23231 #include <asm/vdso.h>
23232 #include <asm/proto.h>
23233+#include <asm/mman.h>
23234
23235 enum {
23236 VDSO_DISABLED = 0,
23237@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23238 void enable_sep_cpu(void)
23239 {
23240 int cpu = get_cpu();
23241- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23242+ struct tss_struct *tss = init_tss + cpu;
23243
23244 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23245 put_cpu();
23246@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23247 gate_vma.vm_start = FIXADDR_USER_START;
23248 gate_vma.vm_end = FIXADDR_USER_END;
23249 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23250- gate_vma.vm_page_prot = __P101;
23251+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23252 /*
23253 * Make sure the vDSO gets into every core dump.
23254 * Dumping its contents makes post-mortem fully interpretable later
23255@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23256 if (compat)
23257 addr = VDSO_HIGH_BASE;
23258 else {
23259- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23260+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23261 if (IS_ERR_VALUE(addr)) {
23262 ret = addr;
23263 goto up_fail;
23264 }
23265 }
23266
23267- current->mm->context.vdso = (void *)addr;
23268+ current->mm->context.vdso = addr;
23269
23270 if (compat_uses_vma || !compat) {
23271 /*
23272@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23273 }
23274
23275 current_thread_info()->sysenter_return =
23276- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23277+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23278
23279 up_fail:
23280 if (ret)
23281- current->mm->context.vdso = NULL;
23282+ current->mm->context.vdso = 0;
23283
23284 up_write(&mm->mmap_sem);
23285
23286@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23287
23288 const char *arch_vma_name(struct vm_area_struct *vma)
23289 {
23290- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23291+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23292 return "[vdso]";
23293+
23294+#ifdef CONFIG_PAX_SEGMEXEC
23295+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23296+ return "[vdso]";
23297+#endif
23298+
23299 return NULL;
23300 }
23301
23302@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23303 struct mm_struct *mm = tsk->mm;
23304
23305 /* Check to see if this task was created in compat vdso mode */
23306- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23307+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23308 return &gate_vma;
23309 return NULL;
23310 }
23311diff -urNp linux-2.6.32.43/arch/x86/vdso/vdso.lds.S linux-2.6.32.43/arch/x86/vdso/vdso.lds.S
23312--- linux-2.6.32.43/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23313+++ linux-2.6.32.43/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23314@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23315 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23316 #include "vextern.h"
23317 #undef VEXTERN
23318+
23319+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23320+VEXTERN(fallback_gettimeofday)
23321+VEXTERN(fallback_time)
23322+VEXTERN(getcpu)
23323+#undef VEXTERN
23324diff -urNp linux-2.6.32.43/arch/x86/vdso/vextern.h linux-2.6.32.43/arch/x86/vdso/vextern.h
23325--- linux-2.6.32.43/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23326+++ linux-2.6.32.43/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23327@@ -11,6 +11,5 @@
23328 put into vextern.h and be referenced as a pointer with vdso prefix.
23329 The main kernel later fills in the values. */
23330
23331-VEXTERN(jiffies)
23332 VEXTERN(vgetcpu_mode)
23333 VEXTERN(vsyscall_gtod_data)
23334diff -urNp linux-2.6.32.43/arch/x86/vdso/vma.c linux-2.6.32.43/arch/x86/vdso/vma.c
23335--- linux-2.6.32.43/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23336+++ linux-2.6.32.43/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23337@@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23338 if (!vbase)
23339 goto oom;
23340
23341- if (memcmp(vbase, "\177ELF", 4)) {
23342+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
23343 printk("VDSO: I'm broken; not ELF\n");
23344 vdso_enabled = 0;
23345 }
23346@@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23347 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23348 #include "vextern.h"
23349 #undef VEXTERN
23350+ vunmap(vbase);
23351 return 0;
23352
23353 oom:
23354@@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23355 goto up_fail;
23356 }
23357
23358- current->mm->context.vdso = (void *)addr;
23359+ current->mm->context.vdso = addr;
23360
23361 ret = install_special_mapping(mm, addr, vdso_size,
23362 VM_READ|VM_EXEC|
23363@@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23364 VM_ALWAYSDUMP,
23365 vdso_pages);
23366 if (ret) {
23367- current->mm->context.vdso = NULL;
23368+ current->mm->context.vdso = 0;
23369 goto up_fail;
23370 }
23371
23372@@ -132,10 +133,3 @@ up_fail:
23373 up_write(&mm->mmap_sem);
23374 return ret;
23375 }
23376-
23377-static __init int vdso_setup(char *s)
23378-{
23379- vdso_enabled = simple_strtoul(s, NULL, 0);
23380- return 0;
23381-}
23382-__setup("vdso=", vdso_setup);
23383diff -urNp linux-2.6.32.43/arch/x86/xen/enlighten.c linux-2.6.32.43/arch/x86/xen/enlighten.c
23384--- linux-2.6.32.43/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23385+++ linux-2.6.32.43/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23386@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23387
23388 struct shared_info xen_dummy_shared_info;
23389
23390-void *xen_initial_gdt;
23391-
23392 /*
23393 * Point at some empty memory to start with. We map the real shared_info
23394 * page as soon as fixmap is up and running.
23395@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23396
23397 preempt_disable();
23398
23399- start = __get_cpu_var(idt_desc).address;
23400+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23401 end = start + __get_cpu_var(idt_desc).size + 1;
23402
23403 xen_mc_flush();
23404@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23405 #endif
23406 };
23407
23408-static void xen_reboot(int reason)
23409+static __noreturn void xen_reboot(int reason)
23410 {
23411 struct sched_shutdown r = { .reason = reason };
23412
23413@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23414 BUG();
23415 }
23416
23417-static void xen_restart(char *msg)
23418+static __noreturn void xen_restart(char *msg)
23419 {
23420 xen_reboot(SHUTDOWN_reboot);
23421 }
23422
23423-static void xen_emergency_restart(void)
23424+static __noreturn void xen_emergency_restart(void)
23425 {
23426 xen_reboot(SHUTDOWN_reboot);
23427 }
23428
23429-static void xen_machine_halt(void)
23430+static __noreturn void xen_machine_halt(void)
23431 {
23432 xen_reboot(SHUTDOWN_poweroff);
23433 }
23434@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23435 */
23436 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23437
23438-#ifdef CONFIG_X86_64
23439 /* Work out if we support NX */
23440- check_efer();
23441+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23442+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23443+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23444+ unsigned l, h;
23445+
23446+#ifdef CONFIG_X86_PAE
23447+ nx_enabled = 1;
23448+#endif
23449+ __supported_pte_mask |= _PAGE_NX;
23450+ rdmsr(MSR_EFER, l, h);
23451+ l |= EFER_NX;
23452+ wrmsr(MSR_EFER, l, h);
23453+ }
23454 #endif
23455
23456 xen_setup_features();
23457@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23458
23459 machine_ops = xen_machine_ops;
23460
23461- /*
23462- * The only reliable way to retain the initial address of the
23463- * percpu gdt_page is to remember it here, so we can go and
23464- * mark it RW later, when the initial percpu area is freed.
23465- */
23466- xen_initial_gdt = &per_cpu(gdt_page, 0);
23467-
23468 xen_smp_init();
23469
23470 pgd = (pgd_t *)xen_start_info->pt_base;
23471diff -urNp linux-2.6.32.43/arch/x86/xen/mmu.c linux-2.6.32.43/arch/x86/xen/mmu.c
23472--- linux-2.6.32.43/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23473+++ linux-2.6.32.43/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23474@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23475 convert_pfn_mfn(init_level4_pgt);
23476 convert_pfn_mfn(level3_ident_pgt);
23477 convert_pfn_mfn(level3_kernel_pgt);
23478+ convert_pfn_mfn(level3_vmalloc_pgt);
23479+ convert_pfn_mfn(level3_vmemmap_pgt);
23480
23481 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23482 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23483@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23484 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23485 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23486 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23487+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23488+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23489 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23490+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23491 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23492 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23493
23494diff -urNp linux-2.6.32.43/arch/x86/xen/smp.c linux-2.6.32.43/arch/x86/xen/smp.c
23495--- linux-2.6.32.43/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23496+++ linux-2.6.32.43/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23497@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23498 {
23499 BUG_ON(smp_processor_id() != 0);
23500 native_smp_prepare_boot_cpu();
23501-
23502- /* We've switched to the "real" per-cpu gdt, so make sure the
23503- old memory can be recycled */
23504- make_lowmem_page_readwrite(xen_initial_gdt);
23505-
23506 xen_setup_vcpu_info_placement();
23507 }
23508
23509@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23510 gdt = get_cpu_gdt_table(cpu);
23511
23512 ctxt->flags = VGCF_IN_KERNEL;
23513- ctxt->user_regs.ds = __USER_DS;
23514- ctxt->user_regs.es = __USER_DS;
23515+ ctxt->user_regs.ds = __KERNEL_DS;
23516+ ctxt->user_regs.es = __KERNEL_DS;
23517 ctxt->user_regs.ss = __KERNEL_DS;
23518 #ifdef CONFIG_X86_32
23519 ctxt->user_regs.fs = __KERNEL_PERCPU;
23520- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23521+ savesegment(gs, ctxt->user_regs.gs);
23522 #else
23523 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23524 #endif
23525@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23526 int rc;
23527
23528 per_cpu(current_task, cpu) = idle;
23529+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23530 #ifdef CONFIG_X86_32
23531 irq_ctx_init(cpu);
23532 #else
23533 clear_tsk_thread_flag(idle, TIF_FORK);
23534- per_cpu(kernel_stack, cpu) =
23535- (unsigned long)task_stack_page(idle) -
23536- KERNEL_STACK_OFFSET + THREAD_SIZE;
23537+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23538 #endif
23539 xen_setup_runstate_info(cpu);
23540 xen_setup_timer(cpu);
23541diff -urNp linux-2.6.32.43/arch/x86/xen/xen-asm_32.S linux-2.6.32.43/arch/x86/xen/xen-asm_32.S
23542--- linux-2.6.32.43/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23543+++ linux-2.6.32.43/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23544@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23545 ESP_OFFSET=4 # bytes pushed onto stack
23546
23547 /*
23548- * Store vcpu_info pointer for easy access. Do it this way to
23549- * avoid having to reload %fs
23550+ * Store vcpu_info pointer for easy access.
23551 */
23552 #ifdef CONFIG_SMP
23553- GET_THREAD_INFO(%eax)
23554- movl TI_cpu(%eax), %eax
23555- movl __per_cpu_offset(,%eax,4), %eax
23556- mov per_cpu__xen_vcpu(%eax), %eax
23557+ push %fs
23558+ mov $(__KERNEL_PERCPU), %eax
23559+ mov %eax, %fs
23560+ mov PER_CPU_VAR(xen_vcpu), %eax
23561+ pop %fs
23562 #else
23563 movl per_cpu__xen_vcpu, %eax
23564 #endif
23565diff -urNp linux-2.6.32.43/arch/x86/xen/xen-head.S linux-2.6.32.43/arch/x86/xen/xen-head.S
23566--- linux-2.6.32.43/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23567+++ linux-2.6.32.43/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23568@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23569 #ifdef CONFIG_X86_32
23570 mov %esi,xen_start_info
23571 mov $init_thread_union+THREAD_SIZE,%esp
23572+#ifdef CONFIG_SMP
23573+ movl $cpu_gdt_table,%edi
23574+ movl $__per_cpu_load,%eax
23575+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23576+ rorl $16,%eax
23577+ movb %al,__KERNEL_PERCPU + 4(%edi)
23578+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23579+ movl $__per_cpu_end - 1,%eax
23580+ subl $__per_cpu_start,%eax
23581+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23582+#endif
23583 #else
23584 mov %rsi,xen_start_info
23585 mov $init_thread_union+THREAD_SIZE,%rsp
23586diff -urNp linux-2.6.32.43/arch/x86/xen/xen-ops.h linux-2.6.32.43/arch/x86/xen/xen-ops.h
23587--- linux-2.6.32.43/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23588+++ linux-2.6.32.43/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23589@@ -10,8 +10,6 @@
23590 extern const char xen_hypervisor_callback[];
23591 extern const char xen_failsafe_callback[];
23592
23593-extern void *xen_initial_gdt;
23594-
23595 struct trap_info;
23596 void xen_copy_trap_info(struct trap_info *traps);
23597
23598diff -urNp linux-2.6.32.43/block/blk-integrity.c linux-2.6.32.43/block/blk-integrity.c
23599--- linux-2.6.32.43/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23600+++ linux-2.6.32.43/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23601@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23602 NULL,
23603 };
23604
23605-static struct sysfs_ops integrity_ops = {
23606+static const struct sysfs_ops integrity_ops = {
23607 .show = &integrity_attr_show,
23608 .store = &integrity_attr_store,
23609 };
23610diff -urNp linux-2.6.32.43/block/blk-iopoll.c linux-2.6.32.43/block/blk-iopoll.c
23611--- linux-2.6.32.43/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23612+++ linux-2.6.32.43/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23613@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23614 }
23615 EXPORT_SYMBOL(blk_iopoll_complete);
23616
23617-static void blk_iopoll_softirq(struct softirq_action *h)
23618+static void blk_iopoll_softirq(void)
23619 {
23620 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23621 int rearm = 0, budget = blk_iopoll_budget;
23622diff -urNp linux-2.6.32.43/block/blk-map.c linux-2.6.32.43/block/blk-map.c
23623--- linux-2.6.32.43/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23624+++ linux-2.6.32.43/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23625@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23626 * direct dma. else, set up kernel bounce buffers
23627 */
23628 uaddr = (unsigned long) ubuf;
23629- if (blk_rq_aligned(q, ubuf, len) && !map_data)
23630+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23631 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23632 else
23633 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23634@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23635 for (i = 0; i < iov_count; i++) {
23636 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23637
23638+ if (!iov[i].iov_len)
23639+ return -EINVAL;
23640+
23641 if (uaddr & queue_dma_alignment(q)) {
23642 unaligned = 1;
23643 break;
23644 }
23645- if (!iov[i].iov_len)
23646- return -EINVAL;
23647 }
23648
23649 if (unaligned || (q->dma_pad_mask & len) || map_data)
23650@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23651 if (!len || !kbuf)
23652 return -EINVAL;
23653
23654- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23655+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23656 if (do_copy)
23657 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23658 else
23659diff -urNp linux-2.6.32.43/block/blk-softirq.c linux-2.6.32.43/block/blk-softirq.c
23660--- linux-2.6.32.43/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23661+++ linux-2.6.32.43/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23662@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23663 * Softirq action handler - move entries to local list and loop over them
23664 * while passing them to the queue registered handler.
23665 */
23666-static void blk_done_softirq(struct softirq_action *h)
23667+static void blk_done_softirq(void)
23668 {
23669 struct list_head *cpu_list, local_list;
23670
23671diff -urNp linux-2.6.32.43/block/blk-sysfs.c linux-2.6.32.43/block/blk-sysfs.c
23672--- linux-2.6.32.43/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23673+++ linux-2.6.32.43/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23674@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23675 kmem_cache_free(blk_requestq_cachep, q);
23676 }
23677
23678-static struct sysfs_ops queue_sysfs_ops = {
23679+static const struct sysfs_ops queue_sysfs_ops = {
23680 .show = queue_attr_show,
23681 .store = queue_attr_store,
23682 };
23683diff -urNp linux-2.6.32.43/block/bsg.c linux-2.6.32.43/block/bsg.c
23684--- linux-2.6.32.43/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23685+++ linux-2.6.32.43/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23686@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23687 struct sg_io_v4 *hdr, struct bsg_device *bd,
23688 fmode_t has_write_perm)
23689 {
23690+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23691+ unsigned char *cmdptr;
23692+
23693 if (hdr->request_len > BLK_MAX_CDB) {
23694 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23695 if (!rq->cmd)
23696 return -ENOMEM;
23697- }
23698+ cmdptr = rq->cmd;
23699+ } else
23700+ cmdptr = tmpcmd;
23701
23702- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23703+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23704 hdr->request_len))
23705 return -EFAULT;
23706
23707+ if (cmdptr != rq->cmd)
23708+ memcpy(rq->cmd, cmdptr, hdr->request_len);
23709+
23710 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23711 if (blk_verify_command(rq->cmd, has_write_perm))
23712 return -EPERM;
23713diff -urNp linux-2.6.32.43/block/elevator.c linux-2.6.32.43/block/elevator.c
23714--- linux-2.6.32.43/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23715+++ linux-2.6.32.43/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23716@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23717 return error;
23718 }
23719
23720-static struct sysfs_ops elv_sysfs_ops = {
23721+static const struct sysfs_ops elv_sysfs_ops = {
23722 .show = elv_attr_show,
23723 .store = elv_attr_store,
23724 };
23725diff -urNp linux-2.6.32.43/block/scsi_ioctl.c linux-2.6.32.43/block/scsi_ioctl.c
23726--- linux-2.6.32.43/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23727+++ linux-2.6.32.43/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23728@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23729 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23730 struct sg_io_hdr *hdr, fmode_t mode)
23731 {
23732- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23733+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23734+ unsigned char *cmdptr;
23735+
23736+ if (rq->cmd != rq->__cmd)
23737+ cmdptr = rq->cmd;
23738+ else
23739+ cmdptr = tmpcmd;
23740+
23741+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23742 return -EFAULT;
23743+
23744+ if (cmdptr != rq->cmd)
23745+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23746+
23747 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23748 return -EPERM;
23749
23750@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23751 int err;
23752 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23753 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23754+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23755+ unsigned char *cmdptr;
23756
23757 if (!sic)
23758 return -EINVAL;
23759@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23760 */
23761 err = -EFAULT;
23762 rq->cmd_len = cmdlen;
23763- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23764+
23765+ if (rq->cmd != rq->__cmd)
23766+ cmdptr = rq->cmd;
23767+ else
23768+ cmdptr = tmpcmd;
23769+
23770+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23771 goto error;
23772
23773+ if (rq->cmd != cmdptr)
23774+ memcpy(rq->cmd, cmdptr, cmdlen);
23775+
23776 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23777 goto error;
23778
23779diff -urNp linux-2.6.32.43/crypto/gf128mul.c linux-2.6.32.43/crypto/gf128mul.c
23780--- linux-2.6.32.43/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
23781+++ linux-2.6.32.43/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
23782@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
23783 for (i = 0; i < 7; ++i)
23784 gf128mul_x_lle(&p[i + 1], &p[i]);
23785
23786- memset(r, 0, sizeof(r));
23787+ memset(r, 0, sizeof(*r));
23788 for (i = 0;;) {
23789 u8 ch = ((u8 *)b)[15 - i];
23790
23791@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
23792 for (i = 0; i < 7; ++i)
23793 gf128mul_x_bbe(&p[i + 1], &p[i]);
23794
23795- memset(r, 0, sizeof(r));
23796+ memset(r, 0, sizeof(*r));
23797 for (i = 0;;) {
23798 u8 ch = ((u8 *)b)[i];
23799
23800diff -urNp linux-2.6.32.43/crypto/serpent.c linux-2.6.32.43/crypto/serpent.c
23801--- linux-2.6.32.43/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
23802+++ linux-2.6.32.43/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
23803@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
23804 u32 r0,r1,r2,r3,r4;
23805 int i;
23806
23807+ pax_track_stack();
23808+
23809 /* Copy key, add padding */
23810
23811 for (i = 0; i < keylen; ++i)
23812diff -urNp linux-2.6.32.43/Documentation/dontdiff linux-2.6.32.43/Documentation/dontdiff
23813--- linux-2.6.32.43/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
23814+++ linux-2.6.32.43/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
23815@@ -1,13 +1,16 @@
23816 *.a
23817 *.aux
23818 *.bin
23819+*.cis
23820 *.cpio
23821 *.csp
23822+*.dbg
23823 *.dsp
23824 *.dvi
23825 *.elf
23826 *.eps
23827 *.fw
23828+*.gcno
23829 *.gen.S
23830 *.gif
23831 *.grep
23832@@ -38,8 +41,10 @@
23833 *.tab.h
23834 *.tex
23835 *.ver
23836+*.vim
23837 *.xml
23838 *_MODULES
23839+*_reg_safe.h
23840 *_vga16.c
23841 *~
23842 *.9
23843@@ -49,11 +54,16 @@
23844 53c700_d.h
23845 CVS
23846 ChangeSet
23847+GPATH
23848+GRTAGS
23849+GSYMS
23850+GTAGS
23851 Image
23852 Kerntypes
23853 Module.markers
23854 Module.symvers
23855 PENDING
23856+PERF*
23857 SCCS
23858 System.map*
23859 TAGS
23860@@ -76,7 +86,11 @@ btfixupprep
23861 build
23862 bvmlinux
23863 bzImage*
23864+capability_names.h
23865+capflags.c
23866 classlist.h*
23867+clut_vga16.c
23868+common-cmds.h
23869 comp*.log
23870 compile.h*
23871 conf
23872@@ -103,13 +117,14 @@ gen_crc32table
23873 gen_init_cpio
23874 genksyms
23875 *_gray256.c
23876+hash
23877 ihex2fw
23878 ikconfig.h*
23879 initramfs_data.cpio
23880+initramfs_data.cpio.bz2
23881 initramfs_data.cpio.gz
23882 initramfs_list
23883 kallsyms
23884-kconfig
23885 keywords.c
23886 ksym.c*
23887 ksym.h*
23888@@ -133,7 +148,9 @@ mkboot
23889 mkbugboot
23890 mkcpustr
23891 mkdep
23892+mkpiggy
23893 mkprep
23894+mkregtable
23895 mktables
23896 mktree
23897 modpost
23898@@ -149,6 +166,7 @@ patches*
23899 pca200e.bin
23900 pca200e_ecd.bin2
23901 piggy.gz
23902+piggy.S
23903 piggyback
23904 pnmtologo
23905 ppc_defs.h*
23906@@ -157,12 +175,15 @@ qconf
23907 raid6altivec*.c
23908 raid6int*.c
23909 raid6tables.c
23910+regdb.c
23911 relocs
23912+rlim_names.h
23913 series
23914 setup
23915 setup.bin
23916 setup.elf
23917 sImage
23918+slabinfo
23919 sm_tbl*
23920 split-include
23921 syscalltab.h
23922@@ -186,14 +207,20 @@ version.h*
23923 vmlinux
23924 vmlinux-*
23925 vmlinux.aout
23926+vmlinux.bin.all
23927+vmlinux.bin.bz2
23928 vmlinux.lds
23929+vmlinux.relocs
23930+voffset.h
23931 vsyscall.lds
23932 vsyscall_32.lds
23933 wanxlfw.inc
23934 uImage
23935 unifdef
23936+utsrelease.h
23937 wakeup.bin
23938 wakeup.elf
23939 wakeup.lds
23940 zImage*
23941 zconf.hash.c
23942+zoffset.h
23943diff -urNp linux-2.6.32.43/Documentation/kernel-parameters.txt linux-2.6.32.43/Documentation/kernel-parameters.txt
23944--- linux-2.6.32.43/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
23945+++ linux-2.6.32.43/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
23946@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
23947 the specified number of seconds. This is to be used if
23948 your oopses keep scrolling off the screen.
23949
23950+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
23951+ virtualization environments that don't cope well with the
23952+ expand down segment used by UDEREF on X86-32 or the frequent
23953+ page table updates on X86-64.
23954+
23955+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
23956+
23957 pcbit= [HW,ISDN]
23958
23959 pcd. [PARIDE]
23960diff -urNp linux-2.6.32.43/drivers/acpi/acpi_pad.c linux-2.6.32.43/drivers/acpi/acpi_pad.c
23961--- linux-2.6.32.43/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
23962+++ linux-2.6.32.43/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
23963@@ -30,7 +30,7 @@
23964 #include <acpi/acpi_bus.h>
23965 #include <acpi/acpi_drivers.h>
23966
23967-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
23968+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
23969 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
23970 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
23971 static DEFINE_MUTEX(isolated_cpus_lock);
23972diff -urNp linux-2.6.32.43/drivers/acpi/battery.c linux-2.6.32.43/drivers/acpi/battery.c
23973--- linux-2.6.32.43/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
23974+++ linux-2.6.32.43/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
23975@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
23976 }
23977
23978 static struct battery_file {
23979- struct file_operations ops;
23980+ const struct file_operations ops;
23981 mode_t mode;
23982 const char *name;
23983 } acpi_battery_file[] = {
23984diff -urNp linux-2.6.32.43/drivers/acpi/dock.c linux-2.6.32.43/drivers/acpi/dock.c
23985--- linux-2.6.32.43/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
23986+++ linux-2.6.32.43/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
23987@@ -77,7 +77,7 @@ struct dock_dependent_device {
23988 struct list_head list;
23989 struct list_head hotplug_list;
23990 acpi_handle handle;
23991- struct acpi_dock_ops *ops;
23992+ const struct acpi_dock_ops *ops;
23993 void *context;
23994 };
23995
23996@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
23997 * the dock driver after _DCK is executed.
23998 */
23999 int
24000-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24001+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24002 void *context)
24003 {
24004 struct dock_dependent_device *dd;
24005diff -urNp linux-2.6.32.43/drivers/acpi/osl.c linux-2.6.32.43/drivers/acpi/osl.c
24006--- linux-2.6.32.43/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24007+++ linux-2.6.32.43/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24008@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24009 void __iomem *virt_addr;
24010
24011 virt_addr = ioremap(phys_addr, width);
24012+ if (!virt_addr)
24013+ return AE_NO_MEMORY;
24014 if (!value)
24015 value = &dummy;
24016
24017@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24018 void __iomem *virt_addr;
24019
24020 virt_addr = ioremap(phys_addr, width);
24021+ if (!virt_addr)
24022+ return AE_NO_MEMORY;
24023
24024 switch (width) {
24025 case 8:
24026diff -urNp linux-2.6.32.43/drivers/acpi/power_meter.c linux-2.6.32.43/drivers/acpi/power_meter.c
24027--- linux-2.6.32.43/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24028+++ linux-2.6.32.43/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24029@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24030 return res;
24031
24032 temp /= 1000;
24033- if (temp < 0)
24034- return -EINVAL;
24035
24036 mutex_lock(&resource->lock);
24037 resource->trip[attr->index - 7] = temp;
24038diff -urNp linux-2.6.32.43/drivers/acpi/proc.c linux-2.6.32.43/drivers/acpi/proc.c
24039--- linux-2.6.32.43/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24040+++ linux-2.6.32.43/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24041@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24042 size_t count, loff_t * ppos)
24043 {
24044 struct list_head *node, *next;
24045- char strbuf[5];
24046- char str[5] = "";
24047- unsigned int len = count;
24048+ char strbuf[5] = {0};
24049 struct acpi_device *found_dev = NULL;
24050
24051- if (len > 4)
24052- len = 4;
24053- if (len < 0)
24054- return -EFAULT;
24055+ if (count > 4)
24056+ count = 4;
24057
24058- if (copy_from_user(strbuf, buffer, len))
24059+ if (copy_from_user(strbuf, buffer, count))
24060 return -EFAULT;
24061- strbuf[len] = '\0';
24062- sscanf(strbuf, "%s", str);
24063+ strbuf[count] = '\0';
24064
24065 mutex_lock(&acpi_device_lock);
24066 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24067@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24068 if (!dev->wakeup.flags.valid)
24069 continue;
24070
24071- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24072+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24073 dev->wakeup.state.enabled =
24074 dev->wakeup.state.enabled ? 0 : 1;
24075 found_dev = dev;
24076diff -urNp linux-2.6.32.43/drivers/acpi/processor_core.c linux-2.6.32.43/drivers/acpi/processor_core.c
24077--- linux-2.6.32.43/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24078+++ linux-2.6.32.43/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24079@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24080 return 0;
24081 }
24082
24083- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24084+ BUG_ON(pr->id >= nr_cpu_ids);
24085
24086 /*
24087 * Buggy BIOS check
24088diff -urNp linux-2.6.32.43/drivers/acpi/sbshc.c linux-2.6.32.43/drivers/acpi/sbshc.c
24089--- linux-2.6.32.43/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24090+++ linux-2.6.32.43/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24091@@ -17,7 +17,7 @@
24092
24093 #define PREFIX "ACPI: "
24094
24095-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24096+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24097 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24098
24099 struct acpi_smb_hc {
24100diff -urNp linux-2.6.32.43/drivers/acpi/sleep.c linux-2.6.32.43/drivers/acpi/sleep.c
24101--- linux-2.6.32.43/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24102+++ linux-2.6.32.43/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24103@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24104 }
24105 }
24106
24107-static struct platform_suspend_ops acpi_suspend_ops = {
24108+static const struct platform_suspend_ops acpi_suspend_ops = {
24109 .valid = acpi_suspend_state_valid,
24110 .begin = acpi_suspend_begin,
24111 .prepare_late = acpi_pm_prepare,
24112@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24113 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24114 * been requested.
24115 */
24116-static struct platform_suspend_ops acpi_suspend_ops_old = {
24117+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24118 .valid = acpi_suspend_state_valid,
24119 .begin = acpi_suspend_begin_old,
24120 .prepare_late = acpi_pm_disable_gpes,
24121@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24122 acpi_enable_all_runtime_gpes();
24123 }
24124
24125-static struct platform_hibernation_ops acpi_hibernation_ops = {
24126+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24127 .begin = acpi_hibernation_begin,
24128 .end = acpi_pm_end,
24129 .pre_snapshot = acpi_hibernation_pre_snapshot,
24130@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24131 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24132 * been requested.
24133 */
24134-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24135+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24136 .begin = acpi_hibernation_begin_old,
24137 .end = acpi_pm_end,
24138 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24139diff -urNp linux-2.6.32.43/drivers/acpi/video.c linux-2.6.32.43/drivers/acpi/video.c
24140--- linux-2.6.32.43/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24141+++ linux-2.6.32.43/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24142@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24143 vd->brightness->levels[request_level]);
24144 }
24145
24146-static struct backlight_ops acpi_backlight_ops = {
24147+static const struct backlight_ops acpi_backlight_ops = {
24148 .get_brightness = acpi_video_get_brightness,
24149 .update_status = acpi_video_set_brightness,
24150 };
24151diff -urNp linux-2.6.32.43/drivers/ata/ahci.c linux-2.6.32.43/drivers/ata/ahci.c
24152--- linux-2.6.32.43/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24153+++ linux-2.6.32.43/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24154@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24155 .sdev_attrs = ahci_sdev_attrs,
24156 };
24157
24158-static struct ata_port_operations ahci_ops = {
24159+static const struct ata_port_operations ahci_ops = {
24160 .inherits = &sata_pmp_port_ops,
24161
24162 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24163@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24164 .port_stop = ahci_port_stop,
24165 };
24166
24167-static struct ata_port_operations ahci_vt8251_ops = {
24168+static const struct ata_port_operations ahci_vt8251_ops = {
24169 .inherits = &ahci_ops,
24170 .hardreset = ahci_vt8251_hardreset,
24171 };
24172
24173-static struct ata_port_operations ahci_p5wdh_ops = {
24174+static const struct ata_port_operations ahci_p5wdh_ops = {
24175 .inherits = &ahci_ops,
24176 .hardreset = ahci_p5wdh_hardreset,
24177 };
24178
24179-static struct ata_port_operations ahci_sb600_ops = {
24180+static const struct ata_port_operations ahci_sb600_ops = {
24181 .inherits = &ahci_ops,
24182 .softreset = ahci_sb600_softreset,
24183 .pmp_softreset = ahci_sb600_softreset,
24184diff -urNp linux-2.6.32.43/drivers/ata/ata_generic.c linux-2.6.32.43/drivers/ata/ata_generic.c
24185--- linux-2.6.32.43/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24186+++ linux-2.6.32.43/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24187@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24188 ATA_BMDMA_SHT(DRV_NAME),
24189 };
24190
24191-static struct ata_port_operations generic_port_ops = {
24192+static const struct ata_port_operations generic_port_ops = {
24193 .inherits = &ata_bmdma_port_ops,
24194 .cable_detect = ata_cable_unknown,
24195 .set_mode = generic_set_mode,
24196diff -urNp linux-2.6.32.43/drivers/ata/ata_piix.c linux-2.6.32.43/drivers/ata/ata_piix.c
24197--- linux-2.6.32.43/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24198+++ linux-2.6.32.43/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24199@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24200 ATA_BMDMA_SHT(DRV_NAME),
24201 };
24202
24203-static struct ata_port_operations piix_pata_ops = {
24204+static const struct ata_port_operations piix_pata_ops = {
24205 .inherits = &ata_bmdma32_port_ops,
24206 .cable_detect = ata_cable_40wire,
24207 .set_piomode = piix_set_piomode,
24208@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24209 .prereset = piix_pata_prereset,
24210 };
24211
24212-static struct ata_port_operations piix_vmw_ops = {
24213+static const struct ata_port_operations piix_vmw_ops = {
24214 .inherits = &piix_pata_ops,
24215 .bmdma_status = piix_vmw_bmdma_status,
24216 };
24217
24218-static struct ata_port_operations ich_pata_ops = {
24219+static const struct ata_port_operations ich_pata_ops = {
24220 .inherits = &piix_pata_ops,
24221 .cable_detect = ich_pata_cable_detect,
24222 .set_dmamode = ich_set_dmamode,
24223 };
24224
24225-static struct ata_port_operations piix_sata_ops = {
24226+static const struct ata_port_operations piix_sata_ops = {
24227 .inherits = &ata_bmdma_port_ops,
24228 };
24229
24230-static struct ata_port_operations piix_sidpr_sata_ops = {
24231+static const struct ata_port_operations piix_sidpr_sata_ops = {
24232 .inherits = &piix_sata_ops,
24233 .hardreset = sata_std_hardreset,
24234 .scr_read = piix_sidpr_scr_read,
24235diff -urNp linux-2.6.32.43/drivers/ata/libata-acpi.c linux-2.6.32.43/drivers/ata/libata-acpi.c
24236--- linux-2.6.32.43/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24237+++ linux-2.6.32.43/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24238@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24239 ata_acpi_uevent(dev->link->ap, dev, event);
24240 }
24241
24242-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24243+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24244 .handler = ata_acpi_dev_notify_dock,
24245 .uevent = ata_acpi_dev_uevent,
24246 };
24247
24248-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24249+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24250 .handler = ata_acpi_ap_notify_dock,
24251 .uevent = ata_acpi_ap_uevent,
24252 };
24253diff -urNp linux-2.6.32.43/drivers/ata/libata-core.c linux-2.6.32.43/drivers/ata/libata-core.c
24254--- linux-2.6.32.43/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24255+++ linux-2.6.32.43/drivers/ata/libata-core.c 2011-04-23 12:56:10.000000000 -0400
24256@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24257 struct ata_port *ap;
24258 unsigned int tag;
24259
24260- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24261+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24262 ap = qc->ap;
24263
24264 qc->flags = 0;
24265@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24266 struct ata_port *ap;
24267 struct ata_link *link;
24268
24269- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24270+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24271 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24272 ap = qc->ap;
24273 link = qc->dev->link;
24274@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24275 * LOCKING:
24276 * None.
24277 */
24278-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24279+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24280 {
24281 static DEFINE_SPINLOCK(lock);
24282 const struct ata_port_operations *cur;
24283@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24284 return;
24285
24286 spin_lock(&lock);
24287+ pax_open_kernel();
24288
24289 for (cur = ops->inherits; cur; cur = cur->inherits) {
24290 void **inherit = (void **)cur;
24291@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24292 if (IS_ERR(*pp))
24293 *pp = NULL;
24294
24295- ops->inherits = NULL;
24296+ ((struct ata_port_operations *)ops)->inherits = NULL;
24297
24298+ pax_close_kernel();
24299 spin_unlock(&lock);
24300 }
24301
24302@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24303 */
24304 /* KILLME - the only user left is ipr */
24305 void ata_host_init(struct ata_host *host, struct device *dev,
24306- unsigned long flags, struct ata_port_operations *ops)
24307+ unsigned long flags, const struct ata_port_operations *ops)
24308 {
24309 spin_lock_init(&host->lock);
24310 host->dev = dev;
24311@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24312 /* truly dummy */
24313 }
24314
24315-struct ata_port_operations ata_dummy_port_ops = {
24316+const struct ata_port_operations ata_dummy_port_ops = {
24317 .qc_prep = ata_noop_qc_prep,
24318 .qc_issue = ata_dummy_qc_issue,
24319 .error_handler = ata_dummy_error_handler,
24320diff -urNp linux-2.6.32.43/drivers/ata/libata-eh.c linux-2.6.32.43/drivers/ata/libata-eh.c
24321--- linux-2.6.32.43/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
24322+++ linux-2.6.32.43/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
24323@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24324 {
24325 struct ata_link *link;
24326
24327+ pax_track_stack();
24328+
24329 ata_for_each_link(link, ap, HOST_FIRST)
24330 ata_eh_link_report(link);
24331 }
24332@@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24333 */
24334 void ata_std_error_handler(struct ata_port *ap)
24335 {
24336- struct ata_port_operations *ops = ap->ops;
24337+ const struct ata_port_operations *ops = ap->ops;
24338 ata_reset_fn_t hardreset = ops->hardreset;
24339
24340 /* ignore built-in hardreset if SCR access is not available */
24341diff -urNp linux-2.6.32.43/drivers/ata/libata-pmp.c linux-2.6.32.43/drivers/ata/libata-pmp.c
24342--- linux-2.6.32.43/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24343+++ linux-2.6.32.43/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24344@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24345 */
24346 static int sata_pmp_eh_recover(struct ata_port *ap)
24347 {
24348- struct ata_port_operations *ops = ap->ops;
24349+ const struct ata_port_operations *ops = ap->ops;
24350 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24351 struct ata_link *pmp_link = &ap->link;
24352 struct ata_device *pmp_dev = pmp_link->device;
24353diff -urNp linux-2.6.32.43/drivers/ata/pata_acpi.c linux-2.6.32.43/drivers/ata/pata_acpi.c
24354--- linux-2.6.32.43/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24355+++ linux-2.6.32.43/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24356@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24357 ATA_BMDMA_SHT(DRV_NAME),
24358 };
24359
24360-static struct ata_port_operations pacpi_ops = {
24361+static const struct ata_port_operations pacpi_ops = {
24362 .inherits = &ata_bmdma_port_ops,
24363 .qc_issue = pacpi_qc_issue,
24364 .cable_detect = pacpi_cable_detect,
24365diff -urNp linux-2.6.32.43/drivers/ata/pata_ali.c linux-2.6.32.43/drivers/ata/pata_ali.c
24366--- linux-2.6.32.43/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24367+++ linux-2.6.32.43/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24368@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24369 * Port operations for PIO only ALi
24370 */
24371
24372-static struct ata_port_operations ali_early_port_ops = {
24373+static const struct ata_port_operations ali_early_port_ops = {
24374 .inherits = &ata_sff_port_ops,
24375 .cable_detect = ata_cable_40wire,
24376 .set_piomode = ali_set_piomode,
24377@@ -382,7 +382,7 @@ static const struct ata_port_operations
24378 * Port operations for DMA capable ALi without cable
24379 * detect
24380 */
24381-static struct ata_port_operations ali_20_port_ops = {
24382+static const struct ata_port_operations ali_20_port_ops = {
24383 .inherits = &ali_dma_base_ops,
24384 .cable_detect = ata_cable_40wire,
24385 .mode_filter = ali_20_filter,
24386@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24387 /*
24388 * Port operations for DMA capable ALi with cable detect
24389 */
24390-static struct ata_port_operations ali_c2_port_ops = {
24391+static const struct ata_port_operations ali_c2_port_ops = {
24392 .inherits = &ali_dma_base_ops,
24393 .check_atapi_dma = ali_check_atapi_dma,
24394 .cable_detect = ali_c2_cable_detect,
24395@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24396 /*
24397 * Port operations for DMA capable ALi with cable detect
24398 */
24399-static struct ata_port_operations ali_c4_port_ops = {
24400+static const struct ata_port_operations ali_c4_port_ops = {
24401 .inherits = &ali_dma_base_ops,
24402 .check_atapi_dma = ali_check_atapi_dma,
24403 .cable_detect = ali_c2_cable_detect,
24404@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24405 /*
24406 * Port operations for DMA capable ALi with cable detect and LBA48
24407 */
24408-static struct ata_port_operations ali_c5_port_ops = {
24409+static const struct ata_port_operations ali_c5_port_ops = {
24410 .inherits = &ali_dma_base_ops,
24411 .check_atapi_dma = ali_check_atapi_dma,
24412 .dev_config = ali_warn_atapi_dma,
24413diff -urNp linux-2.6.32.43/drivers/ata/pata_amd.c linux-2.6.32.43/drivers/ata/pata_amd.c
24414--- linux-2.6.32.43/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24415+++ linux-2.6.32.43/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24416@@ -397,28 +397,28 @@ static const struct ata_port_operations
24417 .prereset = amd_pre_reset,
24418 };
24419
24420-static struct ata_port_operations amd33_port_ops = {
24421+static const struct ata_port_operations amd33_port_ops = {
24422 .inherits = &amd_base_port_ops,
24423 .cable_detect = ata_cable_40wire,
24424 .set_piomode = amd33_set_piomode,
24425 .set_dmamode = amd33_set_dmamode,
24426 };
24427
24428-static struct ata_port_operations amd66_port_ops = {
24429+static const struct ata_port_operations amd66_port_ops = {
24430 .inherits = &amd_base_port_ops,
24431 .cable_detect = ata_cable_unknown,
24432 .set_piomode = amd66_set_piomode,
24433 .set_dmamode = amd66_set_dmamode,
24434 };
24435
24436-static struct ata_port_operations amd100_port_ops = {
24437+static const struct ata_port_operations amd100_port_ops = {
24438 .inherits = &amd_base_port_ops,
24439 .cable_detect = ata_cable_unknown,
24440 .set_piomode = amd100_set_piomode,
24441 .set_dmamode = amd100_set_dmamode,
24442 };
24443
24444-static struct ata_port_operations amd133_port_ops = {
24445+static const struct ata_port_operations amd133_port_ops = {
24446 .inherits = &amd_base_port_ops,
24447 .cable_detect = amd_cable_detect,
24448 .set_piomode = amd133_set_piomode,
24449@@ -433,13 +433,13 @@ static const struct ata_port_operations
24450 .host_stop = nv_host_stop,
24451 };
24452
24453-static struct ata_port_operations nv100_port_ops = {
24454+static const struct ata_port_operations nv100_port_ops = {
24455 .inherits = &nv_base_port_ops,
24456 .set_piomode = nv100_set_piomode,
24457 .set_dmamode = nv100_set_dmamode,
24458 };
24459
24460-static struct ata_port_operations nv133_port_ops = {
24461+static const struct ata_port_operations nv133_port_ops = {
24462 .inherits = &nv_base_port_ops,
24463 .set_piomode = nv133_set_piomode,
24464 .set_dmamode = nv133_set_dmamode,
24465diff -urNp linux-2.6.32.43/drivers/ata/pata_artop.c linux-2.6.32.43/drivers/ata/pata_artop.c
24466--- linux-2.6.32.43/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24467+++ linux-2.6.32.43/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24468@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24469 ATA_BMDMA_SHT(DRV_NAME),
24470 };
24471
24472-static struct ata_port_operations artop6210_ops = {
24473+static const struct ata_port_operations artop6210_ops = {
24474 .inherits = &ata_bmdma_port_ops,
24475 .cable_detect = ata_cable_40wire,
24476 .set_piomode = artop6210_set_piomode,
24477@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24478 .qc_defer = artop6210_qc_defer,
24479 };
24480
24481-static struct ata_port_operations artop6260_ops = {
24482+static const struct ata_port_operations artop6260_ops = {
24483 .inherits = &ata_bmdma_port_ops,
24484 .cable_detect = artop6260_cable_detect,
24485 .set_piomode = artop6260_set_piomode,
24486diff -urNp linux-2.6.32.43/drivers/ata/pata_at32.c linux-2.6.32.43/drivers/ata/pata_at32.c
24487--- linux-2.6.32.43/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24488+++ linux-2.6.32.43/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24489@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24490 ATA_PIO_SHT(DRV_NAME),
24491 };
24492
24493-static struct ata_port_operations at32_port_ops = {
24494+static const struct ata_port_operations at32_port_ops = {
24495 .inherits = &ata_sff_port_ops,
24496 .cable_detect = ata_cable_40wire,
24497 .set_piomode = pata_at32_set_piomode,
24498diff -urNp linux-2.6.32.43/drivers/ata/pata_at91.c linux-2.6.32.43/drivers/ata/pata_at91.c
24499--- linux-2.6.32.43/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24500+++ linux-2.6.32.43/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24501@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24502 ATA_PIO_SHT(DRV_NAME),
24503 };
24504
24505-static struct ata_port_operations pata_at91_port_ops = {
24506+static const struct ata_port_operations pata_at91_port_ops = {
24507 .inherits = &ata_sff_port_ops,
24508
24509 .sff_data_xfer = pata_at91_data_xfer_noirq,
24510diff -urNp linux-2.6.32.43/drivers/ata/pata_atiixp.c linux-2.6.32.43/drivers/ata/pata_atiixp.c
24511--- linux-2.6.32.43/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24512+++ linux-2.6.32.43/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24513@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24514 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24515 };
24516
24517-static struct ata_port_operations atiixp_port_ops = {
24518+static const struct ata_port_operations atiixp_port_ops = {
24519 .inherits = &ata_bmdma_port_ops,
24520
24521 .qc_prep = ata_sff_dumb_qc_prep,
24522diff -urNp linux-2.6.32.43/drivers/ata/pata_atp867x.c linux-2.6.32.43/drivers/ata/pata_atp867x.c
24523--- linux-2.6.32.43/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24524+++ linux-2.6.32.43/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24525@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24526 ATA_BMDMA_SHT(DRV_NAME),
24527 };
24528
24529-static struct ata_port_operations atp867x_ops = {
24530+static const struct ata_port_operations atp867x_ops = {
24531 .inherits = &ata_bmdma_port_ops,
24532 .cable_detect = atp867x_cable_detect,
24533 .set_piomode = atp867x_set_piomode,
24534diff -urNp linux-2.6.32.43/drivers/ata/pata_bf54x.c linux-2.6.32.43/drivers/ata/pata_bf54x.c
24535--- linux-2.6.32.43/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24536+++ linux-2.6.32.43/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24537@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24538 .dma_boundary = ATA_DMA_BOUNDARY,
24539 };
24540
24541-static struct ata_port_operations bfin_pata_ops = {
24542+static const struct ata_port_operations bfin_pata_ops = {
24543 .inherits = &ata_sff_port_ops,
24544
24545 .set_piomode = bfin_set_piomode,
24546diff -urNp linux-2.6.32.43/drivers/ata/pata_cmd640.c linux-2.6.32.43/drivers/ata/pata_cmd640.c
24547--- linux-2.6.32.43/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24548+++ linux-2.6.32.43/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24549@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24550 ATA_BMDMA_SHT(DRV_NAME),
24551 };
24552
24553-static struct ata_port_operations cmd640_port_ops = {
24554+static const struct ata_port_operations cmd640_port_ops = {
24555 .inherits = &ata_bmdma_port_ops,
24556 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24557 .sff_data_xfer = ata_sff_data_xfer_noirq,
24558diff -urNp linux-2.6.32.43/drivers/ata/pata_cmd64x.c linux-2.6.32.43/drivers/ata/pata_cmd64x.c
24559--- linux-2.6.32.43/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24560+++ linux-2.6.32.43/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24561@@ -271,18 +271,18 @@ static const struct ata_port_operations
24562 .set_dmamode = cmd64x_set_dmamode,
24563 };
24564
24565-static struct ata_port_operations cmd64x_port_ops = {
24566+static const struct ata_port_operations cmd64x_port_ops = {
24567 .inherits = &cmd64x_base_ops,
24568 .cable_detect = ata_cable_40wire,
24569 };
24570
24571-static struct ata_port_operations cmd646r1_port_ops = {
24572+static const struct ata_port_operations cmd646r1_port_ops = {
24573 .inherits = &cmd64x_base_ops,
24574 .bmdma_stop = cmd646r1_bmdma_stop,
24575 .cable_detect = ata_cable_40wire,
24576 };
24577
24578-static struct ata_port_operations cmd648_port_ops = {
24579+static const struct ata_port_operations cmd648_port_ops = {
24580 .inherits = &cmd64x_base_ops,
24581 .bmdma_stop = cmd648_bmdma_stop,
24582 .cable_detect = cmd648_cable_detect,
24583diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5520.c linux-2.6.32.43/drivers/ata/pata_cs5520.c
24584--- linux-2.6.32.43/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24585+++ linux-2.6.32.43/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24586@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24587 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24588 };
24589
24590-static struct ata_port_operations cs5520_port_ops = {
24591+static const struct ata_port_operations cs5520_port_ops = {
24592 .inherits = &ata_bmdma_port_ops,
24593 .qc_prep = ata_sff_dumb_qc_prep,
24594 .cable_detect = ata_cable_40wire,
24595diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5530.c linux-2.6.32.43/drivers/ata/pata_cs5530.c
24596--- linux-2.6.32.43/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24597+++ linux-2.6.32.43/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24598@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24599 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24600 };
24601
24602-static struct ata_port_operations cs5530_port_ops = {
24603+static const struct ata_port_operations cs5530_port_ops = {
24604 .inherits = &ata_bmdma_port_ops,
24605
24606 .qc_prep = ata_sff_dumb_qc_prep,
24607diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5535.c linux-2.6.32.43/drivers/ata/pata_cs5535.c
24608--- linux-2.6.32.43/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24609+++ linux-2.6.32.43/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24610@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24611 ATA_BMDMA_SHT(DRV_NAME),
24612 };
24613
24614-static struct ata_port_operations cs5535_port_ops = {
24615+static const struct ata_port_operations cs5535_port_ops = {
24616 .inherits = &ata_bmdma_port_ops,
24617 .cable_detect = cs5535_cable_detect,
24618 .set_piomode = cs5535_set_piomode,
24619diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5536.c linux-2.6.32.43/drivers/ata/pata_cs5536.c
24620--- linux-2.6.32.43/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24621+++ linux-2.6.32.43/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24622@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24623 ATA_BMDMA_SHT(DRV_NAME),
24624 };
24625
24626-static struct ata_port_operations cs5536_port_ops = {
24627+static const struct ata_port_operations cs5536_port_ops = {
24628 .inherits = &ata_bmdma_port_ops,
24629 .cable_detect = cs5536_cable_detect,
24630 .set_piomode = cs5536_set_piomode,
24631diff -urNp linux-2.6.32.43/drivers/ata/pata_cypress.c linux-2.6.32.43/drivers/ata/pata_cypress.c
24632--- linux-2.6.32.43/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24633+++ linux-2.6.32.43/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24634@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24635 ATA_BMDMA_SHT(DRV_NAME),
24636 };
24637
24638-static struct ata_port_operations cy82c693_port_ops = {
24639+static const struct ata_port_operations cy82c693_port_ops = {
24640 .inherits = &ata_bmdma_port_ops,
24641 .cable_detect = ata_cable_40wire,
24642 .set_piomode = cy82c693_set_piomode,
24643diff -urNp linux-2.6.32.43/drivers/ata/pata_efar.c linux-2.6.32.43/drivers/ata/pata_efar.c
24644--- linux-2.6.32.43/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24645+++ linux-2.6.32.43/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24646@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24647 ATA_BMDMA_SHT(DRV_NAME),
24648 };
24649
24650-static struct ata_port_operations efar_ops = {
24651+static const struct ata_port_operations efar_ops = {
24652 .inherits = &ata_bmdma_port_ops,
24653 .cable_detect = efar_cable_detect,
24654 .set_piomode = efar_set_piomode,
24655diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt366.c linux-2.6.32.43/drivers/ata/pata_hpt366.c
24656--- linux-2.6.32.43/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
24657+++ linux-2.6.32.43/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
24658@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
24659 * Configuration for HPT366/68
24660 */
24661
24662-static struct ata_port_operations hpt366_port_ops = {
24663+static const struct ata_port_operations hpt366_port_ops = {
24664 .inherits = &ata_bmdma_port_ops,
24665 .cable_detect = hpt36x_cable_detect,
24666 .mode_filter = hpt366_filter,
24667diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt37x.c linux-2.6.32.43/drivers/ata/pata_hpt37x.c
24668--- linux-2.6.32.43/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
24669+++ linux-2.6.32.43/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
24670@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
24671 * Configuration for HPT370
24672 */
24673
24674-static struct ata_port_operations hpt370_port_ops = {
24675+static const struct ata_port_operations hpt370_port_ops = {
24676 .inherits = &ata_bmdma_port_ops,
24677
24678 .bmdma_stop = hpt370_bmdma_stop,
24679@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
24680 * Configuration for HPT370A. Close to 370 but less filters
24681 */
24682
24683-static struct ata_port_operations hpt370a_port_ops = {
24684+static const struct ata_port_operations hpt370a_port_ops = {
24685 .inherits = &hpt370_port_ops,
24686 .mode_filter = hpt370a_filter,
24687 };
24688@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
24689 * and DMA mode setting functionality.
24690 */
24691
24692-static struct ata_port_operations hpt372_port_ops = {
24693+static const struct ata_port_operations hpt372_port_ops = {
24694 .inherits = &ata_bmdma_port_ops,
24695
24696 .bmdma_stop = hpt37x_bmdma_stop,
24697@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
24698 * but we have a different cable detection procedure for function 1.
24699 */
24700
24701-static struct ata_port_operations hpt374_fn1_port_ops = {
24702+static const struct ata_port_operations hpt374_fn1_port_ops = {
24703 .inherits = &hpt372_port_ops,
24704 .prereset = hpt374_fn1_pre_reset,
24705 };
24706diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c
24707--- linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
24708+++ linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
24709@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
24710 * Configuration for HPT3x2n.
24711 */
24712
24713-static struct ata_port_operations hpt3x2n_port_ops = {
24714+static const struct ata_port_operations hpt3x2n_port_ops = {
24715 .inherits = &ata_bmdma_port_ops,
24716
24717 .bmdma_stop = hpt3x2n_bmdma_stop,
24718diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt3x3.c linux-2.6.32.43/drivers/ata/pata_hpt3x3.c
24719--- linux-2.6.32.43/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
24720+++ linux-2.6.32.43/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
24721@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
24722 ATA_BMDMA_SHT(DRV_NAME),
24723 };
24724
24725-static struct ata_port_operations hpt3x3_port_ops = {
24726+static const struct ata_port_operations hpt3x3_port_ops = {
24727 .inherits = &ata_bmdma_port_ops,
24728 .cable_detect = ata_cable_40wire,
24729 .set_piomode = hpt3x3_set_piomode,
24730diff -urNp linux-2.6.32.43/drivers/ata/pata_icside.c linux-2.6.32.43/drivers/ata/pata_icside.c
24731--- linux-2.6.32.43/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
24732+++ linux-2.6.32.43/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
24733@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
24734 }
24735 }
24736
24737-static struct ata_port_operations pata_icside_port_ops = {
24738+static const struct ata_port_operations pata_icside_port_ops = {
24739 .inherits = &ata_sff_port_ops,
24740 /* no need to build any PRD tables for DMA */
24741 .qc_prep = ata_noop_qc_prep,
24742diff -urNp linux-2.6.32.43/drivers/ata/pata_isapnp.c linux-2.6.32.43/drivers/ata/pata_isapnp.c
24743--- linux-2.6.32.43/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
24744+++ linux-2.6.32.43/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
24745@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
24746 ATA_PIO_SHT(DRV_NAME),
24747 };
24748
24749-static struct ata_port_operations isapnp_port_ops = {
24750+static const struct ata_port_operations isapnp_port_ops = {
24751 .inherits = &ata_sff_port_ops,
24752 .cable_detect = ata_cable_40wire,
24753 };
24754
24755-static struct ata_port_operations isapnp_noalt_port_ops = {
24756+static const struct ata_port_operations isapnp_noalt_port_ops = {
24757 .inherits = &ata_sff_port_ops,
24758 .cable_detect = ata_cable_40wire,
24759 /* No altstatus so we don't want to use the lost interrupt poll */
24760diff -urNp linux-2.6.32.43/drivers/ata/pata_it8213.c linux-2.6.32.43/drivers/ata/pata_it8213.c
24761--- linux-2.6.32.43/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
24762+++ linux-2.6.32.43/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
24763@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
24764 };
24765
24766
24767-static struct ata_port_operations it8213_ops = {
24768+static const struct ata_port_operations it8213_ops = {
24769 .inherits = &ata_bmdma_port_ops,
24770 .cable_detect = it8213_cable_detect,
24771 .set_piomode = it8213_set_piomode,
24772diff -urNp linux-2.6.32.43/drivers/ata/pata_it821x.c linux-2.6.32.43/drivers/ata/pata_it821x.c
24773--- linux-2.6.32.43/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
24774+++ linux-2.6.32.43/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
24775@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
24776 ATA_BMDMA_SHT(DRV_NAME),
24777 };
24778
24779-static struct ata_port_operations it821x_smart_port_ops = {
24780+static const struct ata_port_operations it821x_smart_port_ops = {
24781 .inherits = &ata_bmdma_port_ops,
24782
24783 .check_atapi_dma= it821x_check_atapi_dma,
24784@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
24785 .port_start = it821x_port_start,
24786 };
24787
24788-static struct ata_port_operations it821x_passthru_port_ops = {
24789+static const struct ata_port_operations it821x_passthru_port_ops = {
24790 .inherits = &ata_bmdma_port_ops,
24791
24792 .check_atapi_dma= it821x_check_atapi_dma,
24793@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
24794 .port_start = it821x_port_start,
24795 };
24796
24797-static struct ata_port_operations it821x_rdc_port_ops = {
24798+static const struct ata_port_operations it821x_rdc_port_ops = {
24799 .inherits = &ata_bmdma_port_ops,
24800
24801 .check_atapi_dma= it821x_check_atapi_dma,
24802diff -urNp linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c
24803--- linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
24804+++ linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
24805@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
24806 ATA_PIO_SHT(DRV_NAME),
24807 };
24808
24809-static struct ata_port_operations ixp4xx_port_ops = {
24810+static const struct ata_port_operations ixp4xx_port_ops = {
24811 .inherits = &ata_sff_port_ops,
24812 .sff_data_xfer = ixp4xx_mmio_data_xfer,
24813 .cable_detect = ata_cable_40wire,
24814diff -urNp linux-2.6.32.43/drivers/ata/pata_jmicron.c linux-2.6.32.43/drivers/ata/pata_jmicron.c
24815--- linux-2.6.32.43/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
24816+++ linux-2.6.32.43/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
24817@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
24818 ATA_BMDMA_SHT(DRV_NAME),
24819 };
24820
24821-static struct ata_port_operations jmicron_ops = {
24822+static const struct ata_port_operations jmicron_ops = {
24823 .inherits = &ata_bmdma_port_ops,
24824 .prereset = jmicron_pre_reset,
24825 };
24826diff -urNp linux-2.6.32.43/drivers/ata/pata_legacy.c linux-2.6.32.43/drivers/ata/pata_legacy.c
24827--- linux-2.6.32.43/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
24828+++ linux-2.6.32.43/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
24829@@ -106,7 +106,7 @@ struct legacy_probe {
24830
24831 struct legacy_controller {
24832 const char *name;
24833- struct ata_port_operations *ops;
24834+ const struct ata_port_operations *ops;
24835 unsigned int pio_mask;
24836 unsigned int flags;
24837 unsigned int pflags;
24838@@ -223,12 +223,12 @@ static const struct ata_port_operations
24839 * pio_mask as well.
24840 */
24841
24842-static struct ata_port_operations simple_port_ops = {
24843+static const struct ata_port_operations simple_port_ops = {
24844 .inherits = &legacy_base_port_ops,
24845 .sff_data_xfer = ata_sff_data_xfer_noirq,
24846 };
24847
24848-static struct ata_port_operations legacy_port_ops = {
24849+static const struct ata_port_operations legacy_port_ops = {
24850 .inherits = &legacy_base_port_ops,
24851 .sff_data_xfer = ata_sff_data_xfer_noirq,
24852 .set_mode = legacy_set_mode,
24853@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
24854 return buflen;
24855 }
24856
24857-static struct ata_port_operations pdc20230_port_ops = {
24858+static const struct ata_port_operations pdc20230_port_ops = {
24859 .inherits = &legacy_base_port_ops,
24860 .set_piomode = pdc20230_set_piomode,
24861 .sff_data_xfer = pdc_data_xfer_vlb,
24862@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
24863 ioread8(ap->ioaddr.status_addr);
24864 }
24865
24866-static struct ata_port_operations ht6560a_port_ops = {
24867+static const struct ata_port_operations ht6560a_port_ops = {
24868 .inherits = &legacy_base_port_ops,
24869 .set_piomode = ht6560a_set_piomode,
24870 };
24871@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
24872 ioread8(ap->ioaddr.status_addr);
24873 }
24874
24875-static struct ata_port_operations ht6560b_port_ops = {
24876+static const struct ata_port_operations ht6560b_port_ops = {
24877 .inherits = &legacy_base_port_ops,
24878 .set_piomode = ht6560b_set_piomode,
24879 };
24880@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
24881 }
24882
24883
24884-static struct ata_port_operations opti82c611a_port_ops = {
24885+static const struct ata_port_operations opti82c611a_port_ops = {
24886 .inherits = &legacy_base_port_ops,
24887 .set_piomode = opti82c611a_set_piomode,
24888 };
24889@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
24890 return ata_sff_qc_issue(qc);
24891 }
24892
24893-static struct ata_port_operations opti82c46x_port_ops = {
24894+static const struct ata_port_operations opti82c46x_port_ops = {
24895 .inherits = &legacy_base_port_ops,
24896 .set_piomode = opti82c46x_set_piomode,
24897 .qc_issue = opti82c46x_qc_issue,
24898@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
24899 return 0;
24900 }
24901
24902-static struct ata_port_operations qdi6500_port_ops = {
24903+static const struct ata_port_operations qdi6500_port_ops = {
24904 .inherits = &legacy_base_port_ops,
24905 .set_piomode = qdi6500_set_piomode,
24906 .qc_issue = qdi_qc_issue,
24907 .sff_data_xfer = vlb32_data_xfer,
24908 };
24909
24910-static struct ata_port_operations qdi6580_port_ops = {
24911+static const struct ata_port_operations qdi6580_port_ops = {
24912 .inherits = &legacy_base_port_ops,
24913 .set_piomode = qdi6580_set_piomode,
24914 .sff_data_xfer = vlb32_data_xfer,
24915 };
24916
24917-static struct ata_port_operations qdi6580dp_port_ops = {
24918+static const struct ata_port_operations qdi6580dp_port_ops = {
24919 .inherits = &legacy_base_port_ops,
24920 .set_piomode = qdi6580dp_set_piomode,
24921 .sff_data_xfer = vlb32_data_xfer,
24922@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
24923 return 0;
24924 }
24925
24926-static struct ata_port_operations winbond_port_ops = {
24927+static const struct ata_port_operations winbond_port_ops = {
24928 .inherits = &legacy_base_port_ops,
24929 .set_piomode = winbond_set_piomode,
24930 .sff_data_xfer = vlb32_data_xfer,
24931@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
24932 int pio_modes = controller->pio_mask;
24933 unsigned long io = probe->port;
24934 u32 mask = (1 << probe->slot);
24935- struct ata_port_operations *ops = controller->ops;
24936+ const struct ata_port_operations *ops = controller->ops;
24937 struct legacy_data *ld = &legacy_data[probe->slot];
24938 struct ata_host *host = NULL;
24939 struct ata_port *ap;
24940diff -urNp linux-2.6.32.43/drivers/ata/pata_marvell.c linux-2.6.32.43/drivers/ata/pata_marvell.c
24941--- linux-2.6.32.43/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
24942+++ linux-2.6.32.43/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
24943@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
24944 ATA_BMDMA_SHT(DRV_NAME),
24945 };
24946
24947-static struct ata_port_operations marvell_ops = {
24948+static const struct ata_port_operations marvell_ops = {
24949 .inherits = &ata_bmdma_port_ops,
24950 .cable_detect = marvell_cable_detect,
24951 .prereset = marvell_pre_reset,
24952diff -urNp linux-2.6.32.43/drivers/ata/pata_mpc52xx.c linux-2.6.32.43/drivers/ata/pata_mpc52xx.c
24953--- linux-2.6.32.43/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
24954+++ linux-2.6.32.43/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
24955@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
24956 ATA_PIO_SHT(DRV_NAME),
24957 };
24958
24959-static struct ata_port_operations mpc52xx_ata_port_ops = {
24960+static const struct ata_port_operations mpc52xx_ata_port_ops = {
24961 .inherits = &ata_bmdma_port_ops,
24962 .sff_dev_select = mpc52xx_ata_dev_select,
24963 .set_piomode = mpc52xx_ata_set_piomode,
24964diff -urNp linux-2.6.32.43/drivers/ata/pata_mpiix.c linux-2.6.32.43/drivers/ata/pata_mpiix.c
24965--- linux-2.6.32.43/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
24966+++ linux-2.6.32.43/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
24967@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
24968 ATA_PIO_SHT(DRV_NAME),
24969 };
24970
24971-static struct ata_port_operations mpiix_port_ops = {
24972+static const struct ata_port_operations mpiix_port_ops = {
24973 .inherits = &ata_sff_port_ops,
24974 .qc_issue = mpiix_qc_issue,
24975 .cable_detect = ata_cable_40wire,
24976diff -urNp linux-2.6.32.43/drivers/ata/pata_netcell.c linux-2.6.32.43/drivers/ata/pata_netcell.c
24977--- linux-2.6.32.43/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
24978+++ linux-2.6.32.43/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
24979@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
24980 ATA_BMDMA_SHT(DRV_NAME),
24981 };
24982
24983-static struct ata_port_operations netcell_ops = {
24984+static const struct ata_port_operations netcell_ops = {
24985 .inherits = &ata_bmdma_port_ops,
24986 .cable_detect = ata_cable_80wire,
24987 .read_id = netcell_read_id,
24988diff -urNp linux-2.6.32.43/drivers/ata/pata_ninja32.c linux-2.6.32.43/drivers/ata/pata_ninja32.c
24989--- linux-2.6.32.43/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
24990+++ linux-2.6.32.43/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
24991@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
24992 ATA_BMDMA_SHT(DRV_NAME),
24993 };
24994
24995-static struct ata_port_operations ninja32_port_ops = {
24996+static const struct ata_port_operations ninja32_port_ops = {
24997 .inherits = &ata_bmdma_port_ops,
24998 .sff_dev_select = ninja32_dev_select,
24999 .cable_detect = ata_cable_40wire,
25000diff -urNp linux-2.6.32.43/drivers/ata/pata_ns87410.c linux-2.6.32.43/drivers/ata/pata_ns87410.c
25001--- linux-2.6.32.43/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25002+++ linux-2.6.32.43/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25003@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25004 ATA_PIO_SHT(DRV_NAME),
25005 };
25006
25007-static struct ata_port_operations ns87410_port_ops = {
25008+static const struct ata_port_operations ns87410_port_ops = {
25009 .inherits = &ata_sff_port_ops,
25010 .qc_issue = ns87410_qc_issue,
25011 .cable_detect = ata_cable_40wire,
25012diff -urNp linux-2.6.32.43/drivers/ata/pata_ns87415.c linux-2.6.32.43/drivers/ata/pata_ns87415.c
25013--- linux-2.6.32.43/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25014+++ linux-2.6.32.43/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25015@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25016 }
25017 #endif /* 87560 SuperIO Support */
25018
25019-static struct ata_port_operations ns87415_pata_ops = {
25020+static const struct ata_port_operations ns87415_pata_ops = {
25021 .inherits = &ata_bmdma_port_ops,
25022
25023 .check_atapi_dma = ns87415_check_atapi_dma,
25024@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25025 };
25026
25027 #if defined(CONFIG_SUPERIO)
25028-static struct ata_port_operations ns87560_pata_ops = {
25029+static const struct ata_port_operations ns87560_pata_ops = {
25030 .inherits = &ns87415_pata_ops,
25031 .sff_tf_read = ns87560_tf_read,
25032 .sff_check_status = ns87560_check_status,
25033diff -urNp linux-2.6.32.43/drivers/ata/pata_octeon_cf.c linux-2.6.32.43/drivers/ata/pata_octeon_cf.c
25034--- linux-2.6.32.43/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25035+++ linux-2.6.32.43/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25036@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25037 return 0;
25038 }
25039
25040+/* cannot be const */
25041 static struct ata_port_operations octeon_cf_ops = {
25042 .inherits = &ata_sff_port_ops,
25043 .check_atapi_dma = octeon_cf_check_atapi_dma,
25044diff -urNp linux-2.6.32.43/drivers/ata/pata_oldpiix.c linux-2.6.32.43/drivers/ata/pata_oldpiix.c
25045--- linux-2.6.32.43/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25046+++ linux-2.6.32.43/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25047@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25048 ATA_BMDMA_SHT(DRV_NAME),
25049 };
25050
25051-static struct ata_port_operations oldpiix_pata_ops = {
25052+static const struct ata_port_operations oldpiix_pata_ops = {
25053 .inherits = &ata_bmdma_port_ops,
25054 .qc_issue = oldpiix_qc_issue,
25055 .cable_detect = ata_cable_40wire,
25056diff -urNp linux-2.6.32.43/drivers/ata/pata_opti.c linux-2.6.32.43/drivers/ata/pata_opti.c
25057--- linux-2.6.32.43/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25058+++ linux-2.6.32.43/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25059@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25060 ATA_PIO_SHT(DRV_NAME),
25061 };
25062
25063-static struct ata_port_operations opti_port_ops = {
25064+static const struct ata_port_operations opti_port_ops = {
25065 .inherits = &ata_sff_port_ops,
25066 .cable_detect = ata_cable_40wire,
25067 .set_piomode = opti_set_piomode,
25068diff -urNp linux-2.6.32.43/drivers/ata/pata_optidma.c linux-2.6.32.43/drivers/ata/pata_optidma.c
25069--- linux-2.6.32.43/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25070+++ linux-2.6.32.43/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25071@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25072 ATA_BMDMA_SHT(DRV_NAME),
25073 };
25074
25075-static struct ata_port_operations optidma_port_ops = {
25076+static const struct ata_port_operations optidma_port_ops = {
25077 .inherits = &ata_bmdma_port_ops,
25078 .cable_detect = ata_cable_40wire,
25079 .set_piomode = optidma_set_pio_mode,
25080@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25081 .prereset = optidma_pre_reset,
25082 };
25083
25084-static struct ata_port_operations optiplus_port_ops = {
25085+static const struct ata_port_operations optiplus_port_ops = {
25086 .inherits = &optidma_port_ops,
25087 .set_piomode = optiplus_set_pio_mode,
25088 .set_dmamode = optiplus_set_dma_mode,
25089diff -urNp linux-2.6.32.43/drivers/ata/pata_palmld.c linux-2.6.32.43/drivers/ata/pata_palmld.c
25090--- linux-2.6.32.43/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25091+++ linux-2.6.32.43/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25092@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25093 ATA_PIO_SHT(DRV_NAME),
25094 };
25095
25096-static struct ata_port_operations palmld_port_ops = {
25097+static const struct ata_port_operations palmld_port_ops = {
25098 .inherits = &ata_sff_port_ops,
25099 .sff_data_xfer = ata_sff_data_xfer_noirq,
25100 .cable_detect = ata_cable_40wire,
25101diff -urNp linux-2.6.32.43/drivers/ata/pata_pcmcia.c linux-2.6.32.43/drivers/ata/pata_pcmcia.c
25102--- linux-2.6.32.43/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25103+++ linux-2.6.32.43/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25104@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25105 ATA_PIO_SHT(DRV_NAME),
25106 };
25107
25108-static struct ata_port_operations pcmcia_port_ops = {
25109+static const struct ata_port_operations pcmcia_port_ops = {
25110 .inherits = &ata_sff_port_ops,
25111 .sff_data_xfer = ata_sff_data_xfer_noirq,
25112 .cable_detect = ata_cable_40wire,
25113 .set_mode = pcmcia_set_mode,
25114 };
25115
25116-static struct ata_port_operations pcmcia_8bit_port_ops = {
25117+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25118 .inherits = &ata_sff_port_ops,
25119 .sff_data_xfer = ata_data_xfer_8bit,
25120 .cable_detect = ata_cable_40wire,
25121@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25122 unsigned long io_base, ctl_base;
25123 void __iomem *io_addr, *ctl_addr;
25124 int n_ports = 1;
25125- struct ata_port_operations *ops = &pcmcia_port_ops;
25126+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25127
25128 info = kzalloc(sizeof(*info), GFP_KERNEL);
25129 if (info == NULL)
25130diff -urNp linux-2.6.32.43/drivers/ata/pata_pdc2027x.c linux-2.6.32.43/drivers/ata/pata_pdc2027x.c
25131--- linux-2.6.32.43/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25132+++ linux-2.6.32.43/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25133@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25134 ATA_BMDMA_SHT(DRV_NAME),
25135 };
25136
25137-static struct ata_port_operations pdc2027x_pata100_ops = {
25138+static const struct ata_port_operations pdc2027x_pata100_ops = {
25139 .inherits = &ata_bmdma_port_ops,
25140 .check_atapi_dma = pdc2027x_check_atapi_dma,
25141 .cable_detect = pdc2027x_cable_detect,
25142 .prereset = pdc2027x_prereset,
25143 };
25144
25145-static struct ata_port_operations pdc2027x_pata133_ops = {
25146+static const struct ata_port_operations pdc2027x_pata133_ops = {
25147 .inherits = &pdc2027x_pata100_ops,
25148 .mode_filter = pdc2027x_mode_filter,
25149 .set_piomode = pdc2027x_set_piomode,
25150diff -urNp linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c
25151--- linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25152+++ linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25153@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25154 ATA_BMDMA_SHT(DRV_NAME),
25155 };
25156
25157-static struct ata_port_operations pdc2024x_port_ops = {
25158+static const struct ata_port_operations pdc2024x_port_ops = {
25159 .inherits = &ata_bmdma_port_ops,
25160
25161 .cable_detect = ata_cable_40wire,
25162@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25163 .sff_exec_command = pdc202xx_exec_command,
25164 };
25165
25166-static struct ata_port_operations pdc2026x_port_ops = {
25167+static const struct ata_port_operations pdc2026x_port_ops = {
25168 .inherits = &pdc2024x_port_ops,
25169
25170 .check_atapi_dma = pdc2026x_check_atapi_dma,
25171diff -urNp linux-2.6.32.43/drivers/ata/pata_platform.c linux-2.6.32.43/drivers/ata/pata_platform.c
25172--- linux-2.6.32.43/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25173+++ linux-2.6.32.43/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25174@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25175 ATA_PIO_SHT(DRV_NAME),
25176 };
25177
25178-static struct ata_port_operations pata_platform_port_ops = {
25179+static const struct ata_port_operations pata_platform_port_ops = {
25180 .inherits = &ata_sff_port_ops,
25181 .sff_data_xfer = ata_sff_data_xfer_noirq,
25182 .cable_detect = ata_cable_unknown,
25183diff -urNp linux-2.6.32.43/drivers/ata/pata_qdi.c linux-2.6.32.43/drivers/ata/pata_qdi.c
25184--- linux-2.6.32.43/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25185+++ linux-2.6.32.43/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25186@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25187 ATA_PIO_SHT(DRV_NAME),
25188 };
25189
25190-static struct ata_port_operations qdi6500_port_ops = {
25191+static const struct ata_port_operations qdi6500_port_ops = {
25192 .inherits = &ata_sff_port_ops,
25193 .qc_issue = qdi_qc_issue,
25194 .sff_data_xfer = qdi_data_xfer,
25195@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25196 .set_piomode = qdi6500_set_piomode,
25197 };
25198
25199-static struct ata_port_operations qdi6580_port_ops = {
25200+static const struct ata_port_operations qdi6580_port_ops = {
25201 .inherits = &qdi6500_port_ops,
25202 .set_piomode = qdi6580_set_piomode,
25203 };
25204diff -urNp linux-2.6.32.43/drivers/ata/pata_radisys.c linux-2.6.32.43/drivers/ata/pata_radisys.c
25205--- linux-2.6.32.43/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25206+++ linux-2.6.32.43/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25207@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25208 ATA_BMDMA_SHT(DRV_NAME),
25209 };
25210
25211-static struct ata_port_operations radisys_pata_ops = {
25212+static const struct ata_port_operations radisys_pata_ops = {
25213 .inherits = &ata_bmdma_port_ops,
25214 .qc_issue = radisys_qc_issue,
25215 .cable_detect = ata_cable_unknown,
25216diff -urNp linux-2.6.32.43/drivers/ata/pata_rb532_cf.c linux-2.6.32.43/drivers/ata/pata_rb532_cf.c
25217--- linux-2.6.32.43/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25218+++ linux-2.6.32.43/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25219@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25220 return IRQ_HANDLED;
25221 }
25222
25223-static struct ata_port_operations rb532_pata_port_ops = {
25224+static const struct ata_port_operations rb532_pata_port_ops = {
25225 .inherits = &ata_sff_port_ops,
25226 .sff_data_xfer = ata_sff_data_xfer32,
25227 };
25228diff -urNp linux-2.6.32.43/drivers/ata/pata_rdc.c linux-2.6.32.43/drivers/ata/pata_rdc.c
25229--- linux-2.6.32.43/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25230+++ linux-2.6.32.43/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25231@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25232 pci_write_config_byte(dev, 0x48, udma_enable);
25233 }
25234
25235-static struct ata_port_operations rdc_pata_ops = {
25236+static const struct ata_port_operations rdc_pata_ops = {
25237 .inherits = &ata_bmdma32_port_ops,
25238 .cable_detect = rdc_pata_cable_detect,
25239 .set_piomode = rdc_set_piomode,
25240diff -urNp linux-2.6.32.43/drivers/ata/pata_rz1000.c linux-2.6.32.43/drivers/ata/pata_rz1000.c
25241--- linux-2.6.32.43/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25242+++ linux-2.6.32.43/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25243@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25244 ATA_PIO_SHT(DRV_NAME),
25245 };
25246
25247-static struct ata_port_operations rz1000_port_ops = {
25248+static const struct ata_port_operations rz1000_port_ops = {
25249 .inherits = &ata_sff_port_ops,
25250 .cable_detect = ata_cable_40wire,
25251 .set_mode = rz1000_set_mode,
25252diff -urNp linux-2.6.32.43/drivers/ata/pata_sc1200.c linux-2.6.32.43/drivers/ata/pata_sc1200.c
25253--- linux-2.6.32.43/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25254+++ linux-2.6.32.43/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25255@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25256 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25257 };
25258
25259-static struct ata_port_operations sc1200_port_ops = {
25260+static const struct ata_port_operations sc1200_port_ops = {
25261 .inherits = &ata_bmdma_port_ops,
25262 .qc_prep = ata_sff_dumb_qc_prep,
25263 .qc_issue = sc1200_qc_issue,
25264diff -urNp linux-2.6.32.43/drivers/ata/pata_scc.c linux-2.6.32.43/drivers/ata/pata_scc.c
25265--- linux-2.6.32.43/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25266+++ linux-2.6.32.43/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25267@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25268 ATA_BMDMA_SHT(DRV_NAME),
25269 };
25270
25271-static struct ata_port_operations scc_pata_ops = {
25272+static const struct ata_port_operations scc_pata_ops = {
25273 .inherits = &ata_bmdma_port_ops,
25274
25275 .set_piomode = scc_set_piomode,
25276diff -urNp linux-2.6.32.43/drivers/ata/pata_sch.c linux-2.6.32.43/drivers/ata/pata_sch.c
25277--- linux-2.6.32.43/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25278+++ linux-2.6.32.43/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25279@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25280 ATA_BMDMA_SHT(DRV_NAME),
25281 };
25282
25283-static struct ata_port_operations sch_pata_ops = {
25284+static const struct ata_port_operations sch_pata_ops = {
25285 .inherits = &ata_bmdma_port_ops,
25286 .cable_detect = ata_cable_unknown,
25287 .set_piomode = sch_set_piomode,
25288diff -urNp linux-2.6.32.43/drivers/ata/pata_serverworks.c linux-2.6.32.43/drivers/ata/pata_serverworks.c
25289--- linux-2.6.32.43/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25290+++ linux-2.6.32.43/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25291@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25292 ATA_BMDMA_SHT(DRV_NAME),
25293 };
25294
25295-static struct ata_port_operations serverworks_osb4_port_ops = {
25296+static const struct ata_port_operations serverworks_osb4_port_ops = {
25297 .inherits = &ata_bmdma_port_ops,
25298 .cable_detect = serverworks_cable_detect,
25299 .mode_filter = serverworks_osb4_filter,
25300@@ -307,7 +307,7 @@ static struct ata_port_operations server
25301 .set_dmamode = serverworks_set_dmamode,
25302 };
25303
25304-static struct ata_port_operations serverworks_csb_port_ops = {
25305+static const struct ata_port_operations serverworks_csb_port_ops = {
25306 .inherits = &serverworks_osb4_port_ops,
25307 .mode_filter = serverworks_csb_filter,
25308 };
25309diff -urNp linux-2.6.32.43/drivers/ata/pata_sil680.c linux-2.6.32.43/drivers/ata/pata_sil680.c
25310--- linux-2.6.32.43/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25311+++ linux-2.6.32.43/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25312@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25313 ATA_BMDMA_SHT(DRV_NAME),
25314 };
25315
25316-static struct ata_port_operations sil680_port_ops = {
25317+static const struct ata_port_operations sil680_port_ops = {
25318 .inherits = &ata_bmdma32_port_ops,
25319 .cable_detect = sil680_cable_detect,
25320 .set_piomode = sil680_set_piomode,
25321diff -urNp linux-2.6.32.43/drivers/ata/pata_sis.c linux-2.6.32.43/drivers/ata/pata_sis.c
25322--- linux-2.6.32.43/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25323+++ linux-2.6.32.43/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25324@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25325 ATA_BMDMA_SHT(DRV_NAME),
25326 };
25327
25328-static struct ata_port_operations sis_133_for_sata_ops = {
25329+static const struct ata_port_operations sis_133_for_sata_ops = {
25330 .inherits = &ata_bmdma_port_ops,
25331 .set_piomode = sis_133_set_piomode,
25332 .set_dmamode = sis_133_set_dmamode,
25333 .cable_detect = sis_133_cable_detect,
25334 };
25335
25336-static struct ata_port_operations sis_base_ops = {
25337+static const struct ata_port_operations sis_base_ops = {
25338 .inherits = &ata_bmdma_port_ops,
25339 .prereset = sis_pre_reset,
25340 };
25341
25342-static struct ata_port_operations sis_133_ops = {
25343+static const struct ata_port_operations sis_133_ops = {
25344 .inherits = &sis_base_ops,
25345 .set_piomode = sis_133_set_piomode,
25346 .set_dmamode = sis_133_set_dmamode,
25347 .cable_detect = sis_133_cable_detect,
25348 };
25349
25350-static struct ata_port_operations sis_133_early_ops = {
25351+static const struct ata_port_operations sis_133_early_ops = {
25352 .inherits = &sis_base_ops,
25353 .set_piomode = sis_100_set_piomode,
25354 .set_dmamode = sis_133_early_set_dmamode,
25355 .cable_detect = sis_66_cable_detect,
25356 };
25357
25358-static struct ata_port_operations sis_100_ops = {
25359+static const struct ata_port_operations sis_100_ops = {
25360 .inherits = &sis_base_ops,
25361 .set_piomode = sis_100_set_piomode,
25362 .set_dmamode = sis_100_set_dmamode,
25363 .cable_detect = sis_66_cable_detect,
25364 };
25365
25366-static struct ata_port_operations sis_66_ops = {
25367+static const struct ata_port_operations sis_66_ops = {
25368 .inherits = &sis_base_ops,
25369 .set_piomode = sis_old_set_piomode,
25370 .set_dmamode = sis_66_set_dmamode,
25371 .cable_detect = sis_66_cable_detect,
25372 };
25373
25374-static struct ata_port_operations sis_old_ops = {
25375+static const struct ata_port_operations sis_old_ops = {
25376 .inherits = &sis_base_ops,
25377 .set_piomode = sis_old_set_piomode,
25378 .set_dmamode = sis_old_set_dmamode,
25379diff -urNp linux-2.6.32.43/drivers/ata/pata_sl82c105.c linux-2.6.32.43/drivers/ata/pata_sl82c105.c
25380--- linux-2.6.32.43/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25381+++ linux-2.6.32.43/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25382@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25383 ATA_BMDMA_SHT(DRV_NAME),
25384 };
25385
25386-static struct ata_port_operations sl82c105_port_ops = {
25387+static const struct ata_port_operations sl82c105_port_ops = {
25388 .inherits = &ata_bmdma_port_ops,
25389 .qc_defer = sl82c105_qc_defer,
25390 .bmdma_start = sl82c105_bmdma_start,
25391diff -urNp linux-2.6.32.43/drivers/ata/pata_triflex.c linux-2.6.32.43/drivers/ata/pata_triflex.c
25392--- linux-2.6.32.43/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25393+++ linux-2.6.32.43/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25394@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25395 ATA_BMDMA_SHT(DRV_NAME),
25396 };
25397
25398-static struct ata_port_operations triflex_port_ops = {
25399+static const struct ata_port_operations triflex_port_ops = {
25400 .inherits = &ata_bmdma_port_ops,
25401 .bmdma_start = triflex_bmdma_start,
25402 .bmdma_stop = triflex_bmdma_stop,
25403diff -urNp linux-2.6.32.43/drivers/ata/pata_via.c linux-2.6.32.43/drivers/ata/pata_via.c
25404--- linux-2.6.32.43/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25405+++ linux-2.6.32.43/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25406@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25407 ATA_BMDMA_SHT(DRV_NAME),
25408 };
25409
25410-static struct ata_port_operations via_port_ops = {
25411+static const struct ata_port_operations via_port_ops = {
25412 .inherits = &ata_bmdma_port_ops,
25413 .cable_detect = via_cable_detect,
25414 .set_piomode = via_set_piomode,
25415@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25416 .port_start = via_port_start,
25417 };
25418
25419-static struct ata_port_operations via_port_ops_noirq = {
25420+static const struct ata_port_operations via_port_ops_noirq = {
25421 .inherits = &via_port_ops,
25422 .sff_data_xfer = ata_sff_data_xfer_noirq,
25423 };
25424diff -urNp linux-2.6.32.43/drivers/ata/pata_winbond.c linux-2.6.32.43/drivers/ata/pata_winbond.c
25425--- linux-2.6.32.43/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25426+++ linux-2.6.32.43/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25427@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25428 ATA_PIO_SHT(DRV_NAME),
25429 };
25430
25431-static struct ata_port_operations winbond_port_ops = {
25432+static const struct ata_port_operations winbond_port_ops = {
25433 .inherits = &ata_sff_port_ops,
25434 .sff_data_xfer = winbond_data_xfer,
25435 .cable_detect = ata_cable_40wire,
25436diff -urNp linux-2.6.32.43/drivers/ata/pdc_adma.c linux-2.6.32.43/drivers/ata/pdc_adma.c
25437--- linux-2.6.32.43/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25438+++ linux-2.6.32.43/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25439@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25440 .dma_boundary = ADMA_DMA_BOUNDARY,
25441 };
25442
25443-static struct ata_port_operations adma_ata_ops = {
25444+static const struct ata_port_operations adma_ata_ops = {
25445 .inherits = &ata_sff_port_ops,
25446
25447 .lost_interrupt = ATA_OP_NULL,
25448diff -urNp linux-2.6.32.43/drivers/ata/sata_fsl.c linux-2.6.32.43/drivers/ata/sata_fsl.c
25449--- linux-2.6.32.43/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25450+++ linux-2.6.32.43/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25451@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25452 .dma_boundary = ATA_DMA_BOUNDARY,
25453 };
25454
25455-static struct ata_port_operations sata_fsl_ops = {
25456+static const struct ata_port_operations sata_fsl_ops = {
25457 .inherits = &sata_pmp_port_ops,
25458
25459 .qc_defer = ata_std_qc_defer,
25460diff -urNp linux-2.6.32.43/drivers/ata/sata_inic162x.c linux-2.6.32.43/drivers/ata/sata_inic162x.c
25461--- linux-2.6.32.43/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25462+++ linux-2.6.32.43/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25463@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25464 return 0;
25465 }
25466
25467-static struct ata_port_operations inic_port_ops = {
25468+static const struct ata_port_operations inic_port_ops = {
25469 .inherits = &sata_port_ops,
25470
25471 .check_atapi_dma = inic_check_atapi_dma,
25472diff -urNp linux-2.6.32.43/drivers/ata/sata_mv.c linux-2.6.32.43/drivers/ata/sata_mv.c
25473--- linux-2.6.32.43/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25474+++ linux-2.6.32.43/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25475@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25476 .dma_boundary = MV_DMA_BOUNDARY,
25477 };
25478
25479-static struct ata_port_operations mv5_ops = {
25480+static const struct ata_port_operations mv5_ops = {
25481 .inherits = &ata_sff_port_ops,
25482
25483 .lost_interrupt = ATA_OP_NULL,
25484@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25485 .port_stop = mv_port_stop,
25486 };
25487
25488-static struct ata_port_operations mv6_ops = {
25489+static const struct ata_port_operations mv6_ops = {
25490 .inherits = &mv5_ops,
25491 .dev_config = mv6_dev_config,
25492 .scr_read = mv_scr_read,
25493@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25494 .bmdma_status = mv_bmdma_status,
25495 };
25496
25497-static struct ata_port_operations mv_iie_ops = {
25498+static const struct ata_port_operations mv_iie_ops = {
25499 .inherits = &mv6_ops,
25500 .dev_config = ATA_OP_NULL,
25501 .qc_prep = mv_qc_prep_iie,
25502diff -urNp linux-2.6.32.43/drivers/ata/sata_nv.c linux-2.6.32.43/drivers/ata/sata_nv.c
25503--- linux-2.6.32.43/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25504+++ linux-2.6.32.43/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25505@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25506 * cases. Define nv_hardreset() which only kicks in for post-boot
25507 * probing and use it for all variants.
25508 */
25509-static struct ata_port_operations nv_generic_ops = {
25510+static const struct ata_port_operations nv_generic_ops = {
25511 .inherits = &ata_bmdma_port_ops,
25512 .lost_interrupt = ATA_OP_NULL,
25513 .scr_read = nv_scr_read,
25514@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25515 .hardreset = nv_hardreset,
25516 };
25517
25518-static struct ata_port_operations nv_nf2_ops = {
25519+static const struct ata_port_operations nv_nf2_ops = {
25520 .inherits = &nv_generic_ops,
25521 .freeze = nv_nf2_freeze,
25522 .thaw = nv_nf2_thaw,
25523 };
25524
25525-static struct ata_port_operations nv_ck804_ops = {
25526+static const struct ata_port_operations nv_ck804_ops = {
25527 .inherits = &nv_generic_ops,
25528 .freeze = nv_ck804_freeze,
25529 .thaw = nv_ck804_thaw,
25530 .host_stop = nv_ck804_host_stop,
25531 };
25532
25533-static struct ata_port_operations nv_adma_ops = {
25534+static const struct ata_port_operations nv_adma_ops = {
25535 .inherits = &nv_ck804_ops,
25536
25537 .check_atapi_dma = nv_adma_check_atapi_dma,
25538@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25539 .host_stop = nv_adma_host_stop,
25540 };
25541
25542-static struct ata_port_operations nv_swncq_ops = {
25543+static const struct ata_port_operations nv_swncq_ops = {
25544 .inherits = &nv_generic_ops,
25545
25546 .qc_defer = ata_std_qc_defer,
25547diff -urNp linux-2.6.32.43/drivers/ata/sata_promise.c linux-2.6.32.43/drivers/ata/sata_promise.c
25548--- linux-2.6.32.43/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25549+++ linux-2.6.32.43/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25550@@ -195,7 +195,7 @@ static const struct ata_port_operations
25551 .error_handler = pdc_error_handler,
25552 };
25553
25554-static struct ata_port_operations pdc_sata_ops = {
25555+static const struct ata_port_operations pdc_sata_ops = {
25556 .inherits = &pdc_common_ops,
25557 .cable_detect = pdc_sata_cable_detect,
25558 .freeze = pdc_sata_freeze,
25559@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25560
25561 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25562 and ->freeze/thaw that ignore the hotplug controls. */
25563-static struct ata_port_operations pdc_old_sata_ops = {
25564+static const struct ata_port_operations pdc_old_sata_ops = {
25565 .inherits = &pdc_sata_ops,
25566 .freeze = pdc_freeze,
25567 .thaw = pdc_thaw,
25568 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25569 };
25570
25571-static struct ata_port_operations pdc_pata_ops = {
25572+static const struct ata_port_operations pdc_pata_ops = {
25573 .inherits = &pdc_common_ops,
25574 .cable_detect = pdc_pata_cable_detect,
25575 .freeze = pdc_freeze,
25576diff -urNp linux-2.6.32.43/drivers/ata/sata_qstor.c linux-2.6.32.43/drivers/ata/sata_qstor.c
25577--- linux-2.6.32.43/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25578+++ linux-2.6.32.43/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25579@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25580 .dma_boundary = QS_DMA_BOUNDARY,
25581 };
25582
25583-static struct ata_port_operations qs_ata_ops = {
25584+static const struct ata_port_operations qs_ata_ops = {
25585 .inherits = &ata_sff_port_ops,
25586
25587 .check_atapi_dma = qs_check_atapi_dma,
25588diff -urNp linux-2.6.32.43/drivers/ata/sata_sil24.c linux-2.6.32.43/drivers/ata/sata_sil24.c
25589--- linux-2.6.32.43/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25590+++ linux-2.6.32.43/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25591@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25592 .dma_boundary = ATA_DMA_BOUNDARY,
25593 };
25594
25595-static struct ata_port_operations sil24_ops = {
25596+static const struct ata_port_operations sil24_ops = {
25597 .inherits = &sata_pmp_port_ops,
25598
25599 .qc_defer = sil24_qc_defer,
25600diff -urNp linux-2.6.32.43/drivers/ata/sata_sil.c linux-2.6.32.43/drivers/ata/sata_sil.c
25601--- linux-2.6.32.43/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25602+++ linux-2.6.32.43/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25603@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25604 .sg_tablesize = ATA_MAX_PRD
25605 };
25606
25607-static struct ata_port_operations sil_ops = {
25608+static const struct ata_port_operations sil_ops = {
25609 .inherits = &ata_bmdma32_port_ops,
25610 .dev_config = sil_dev_config,
25611 .set_mode = sil_set_mode,
25612diff -urNp linux-2.6.32.43/drivers/ata/sata_sis.c linux-2.6.32.43/drivers/ata/sata_sis.c
25613--- linux-2.6.32.43/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25614+++ linux-2.6.32.43/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25615@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25616 ATA_BMDMA_SHT(DRV_NAME),
25617 };
25618
25619-static struct ata_port_operations sis_ops = {
25620+static const struct ata_port_operations sis_ops = {
25621 .inherits = &ata_bmdma_port_ops,
25622 .scr_read = sis_scr_read,
25623 .scr_write = sis_scr_write,
25624diff -urNp linux-2.6.32.43/drivers/ata/sata_svw.c linux-2.6.32.43/drivers/ata/sata_svw.c
25625--- linux-2.6.32.43/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25626+++ linux-2.6.32.43/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25627@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25628 };
25629
25630
25631-static struct ata_port_operations k2_sata_ops = {
25632+static const struct ata_port_operations k2_sata_ops = {
25633 .inherits = &ata_bmdma_port_ops,
25634 .sff_tf_load = k2_sata_tf_load,
25635 .sff_tf_read = k2_sata_tf_read,
25636diff -urNp linux-2.6.32.43/drivers/ata/sata_sx4.c linux-2.6.32.43/drivers/ata/sata_sx4.c
25637--- linux-2.6.32.43/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25638+++ linux-2.6.32.43/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25639@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25640 };
25641
25642 /* TODO: inherit from base port_ops after converting to new EH */
25643-static struct ata_port_operations pdc_20621_ops = {
25644+static const struct ata_port_operations pdc_20621_ops = {
25645 .inherits = &ata_sff_port_ops,
25646
25647 .check_atapi_dma = pdc_check_atapi_dma,
25648diff -urNp linux-2.6.32.43/drivers/ata/sata_uli.c linux-2.6.32.43/drivers/ata/sata_uli.c
25649--- linux-2.6.32.43/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25650+++ linux-2.6.32.43/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25651@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
25652 ATA_BMDMA_SHT(DRV_NAME),
25653 };
25654
25655-static struct ata_port_operations uli_ops = {
25656+static const struct ata_port_operations uli_ops = {
25657 .inherits = &ata_bmdma_port_ops,
25658 .scr_read = uli_scr_read,
25659 .scr_write = uli_scr_write,
25660diff -urNp linux-2.6.32.43/drivers/ata/sata_via.c linux-2.6.32.43/drivers/ata/sata_via.c
25661--- linux-2.6.32.43/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
25662+++ linux-2.6.32.43/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
25663@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
25664 ATA_BMDMA_SHT(DRV_NAME),
25665 };
25666
25667-static struct ata_port_operations svia_base_ops = {
25668+static const struct ata_port_operations svia_base_ops = {
25669 .inherits = &ata_bmdma_port_ops,
25670 .sff_tf_load = svia_tf_load,
25671 };
25672
25673-static struct ata_port_operations vt6420_sata_ops = {
25674+static const struct ata_port_operations vt6420_sata_ops = {
25675 .inherits = &svia_base_ops,
25676 .freeze = svia_noop_freeze,
25677 .prereset = vt6420_prereset,
25678 .bmdma_start = vt6420_bmdma_start,
25679 };
25680
25681-static struct ata_port_operations vt6421_pata_ops = {
25682+static const struct ata_port_operations vt6421_pata_ops = {
25683 .inherits = &svia_base_ops,
25684 .cable_detect = vt6421_pata_cable_detect,
25685 .set_piomode = vt6421_set_pio_mode,
25686 .set_dmamode = vt6421_set_dma_mode,
25687 };
25688
25689-static struct ata_port_operations vt6421_sata_ops = {
25690+static const struct ata_port_operations vt6421_sata_ops = {
25691 .inherits = &svia_base_ops,
25692 .scr_read = svia_scr_read,
25693 .scr_write = svia_scr_write,
25694 };
25695
25696-static struct ata_port_operations vt8251_ops = {
25697+static const struct ata_port_operations vt8251_ops = {
25698 .inherits = &svia_base_ops,
25699 .hardreset = sata_std_hardreset,
25700 .scr_read = vt8251_scr_read,
25701diff -urNp linux-2.6.32.43/drivers/ata/sata_vsc.c linux-2.6.32.43/drivers/ata/sata_vsc.c
25702--- linux-2.6.32.43/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
25703+++ linux-2.6.32.43/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
25704@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
25705 };
25706
25707
25708-static struct ata_port_operations vsc_sata_ops = {
25709+static const struct ata_port_operations vsc_sata_ops = {
25710 .inherits = &ata_bmdma_port_ops,
25711 /* The IRQ handling is not quite standard SFF behaviour so we
25712 cannot use the default lost interrupt handler */
25713diff -urNp linux-2.6.32.43/drivers/atm/adummy.c linux-2.6.32.43/drivers/atm/adummy.c
25714--- linux-2.6.32.43/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
25715+++ linux-2.6.32.43/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
25716@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
25717 vcc->pop(vcc, skb);
25718 else
25719 dev_kfree_skb_any(skb);
25720- atomic_inc(&vcc->stats->tx);
25721+ atomic_inc_unchecked(&vcc->stats->tx);
25722
25723 return 0;
25724 }
25725diff -urNp linux-2.6.32.43/drivers/atm/ambassador.c linux-2.6.32.43/drivers/atm/ambassador.c
25726--- linux-2.6.32.43/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
25727+++ linux-2.6.32.43/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
25728@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
25729 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
25730
25731 // VC layer stats
25732- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25733+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25734
25735 // free the descriptor
25736 kfree (tx_descr);
25737@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
25738 dump_skb ("<<<", vc, skb);
25739
25740 // VC layer stats
25741- atomic_inc(&atm_vcc->stats->rx);
25742+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25743 __net_timestamp(skb);
25744 // end of our responsability
25745 atm_vcc->push (atm_vcc, skb);
25746@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
25747 } else {
25748 PRINTK (KERN_INFO, "dropped over-size frame");
25749 // should we count this?
25750- atomic_inc(&atm_vcc->stats->rx_drop);
25751+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25752 }
25753
25754 } else {
25755@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
25756 }
25757
25758 if (check_area (skb->data, skb->len)) {
25759- atomic_inc(&atm_vcc->stats->tx_err);
25760+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
25761 return -ENOMEM; // ?
25762 }
25763
25764diff -urNp linux-2.6.32.43/drivers/atm/atmtcp.c linux-2.6.32.43/drivers/atm/atmtcp.c
25765--- linux-2.6.32.43/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
25766+++ linux-2.6.32.43/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
25767@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
25768 if (vcc->pop) vcc->pop(vcc,skb);
25769 else dev_kfree_skb(skb);
25770 if (dev_data) return 0;
25771- atomic_inc(&vcc->stats->tx_err);
25772+ atomic_inc_unchecked(&vcc->stats->tx_err);
25773 return -ENOLINK;
25774 }
25775 size = skb->len+sizeof(struct atmtcp_hdr);
25776@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
25777 if (!new_skb) {
25778 if (vcc->pop) vcc->pop(vcc,skb);
25779 else dev_kfree_skb(skb);
25780- atomic_inc(&vcc->stats->tx_err);
25781+ atomic_inc_unchecked(&vcc->stats->tx_err);
25782 return -ENOBUFS;
25783 }
25784 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
25785@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
25786 if (vcc->pop) vcc->pop(vcc,skb);
25787 else dev_kfree_skb(skb);
25788 out_vcc->push(out_vcc,new_skb);
25789- atomic_inc(&vcc->stats->tx);
25790- atomic_inc(&out_vcc->stats->rx);
25791+ atomic_inc_unchecked(&vcc->stats->tx);
25792+ atomic_inc_unchecked(&out_vcc->stats->rx);
25793 return 0;
25794 }
25795
25796@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
25797 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
25798 read_unlock(&vcc_sklist_lock);
25799 if (!out_vcc) {
25800- atomic_inc(&vcc->stats->tx_err);
25801+ atomic_inc_unchecked(&vcc->stats->tx_err);
25802 goto done;
25803 }
25804 skb_pull(skb,sizeof(struct atmtcp_hdr));
25805@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
25806 __net_timestamp(new_skb);
25807 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
25808 out_vcc->push(out_vcc,new_skb);
25809- atomic_inc(&vcc->stats->tx);
25810- atomic_inc(&out_vcc->stats->rx);
25811+ atomic_inc_unchecked(&vcc->stats->tx);
25812+ atomic_inc_unchecked(&out_vcc->stats->rx);
25813 done:
25814 if (vcc->pop) vcc->pop(vcc,skb);
25815 else dev_kfree_skb(skb);
25816diff -urNp linux-2.6.32.43/drivers/atm/eni.c linux-2.6.32.43/drivers/atm/eni.c
25817--- linux-2.6.32.43/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
25818+++ linux-2.6.32.43/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
25819@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
25820 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
25821 vcc->dev->number);
25822 length = 0;
25823- atomic_inc(&vcc->stats->rx_err);
25824+ atomic_inc_unchecked(&vcc->stats->rx_err);
25825 }
25826 else {
25827 length = ATM_CELL_SIZE-1; /* no HEC */
25828@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25829 size);
25830 }
25831 eff = length = 0;
25832- atomic_inc(&vcc->stats->rx_err);
25833+ atomic_inc_unchecked(&vcc->stats->rx_err);
25834 }
25835 else {
25836 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
25837@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
25838 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
25839 vcc->dev->number,vcc->vci,length,size << 2,descr);
25840 length = eff = 0;
25841- atomic_inc(&vcc->stats->rx_err);
25842+ atomic_inc_unchecked(&vcc->stats->rx_err);
25843 }
25844 }
25845 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
25846@@ -770,7 +770,7 @@ rx_dequeued++;
25847 vcc->push(vcc,skb);
25848 pushed++;
25849 }
25850- atomic_inc(&vcc->stats->rx);
25851+ atomic_inc_unchecked(&vcc->stats->rx);
25852 }
25853 wake_up(&eni_dev->rx_wait);
25854 }
25855@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
25856 PCI_DMA_TODEVICE);
25857 if (vcc->pop) vcc->pop(vcc,skb);
25858 else dev_kfree_skb_irq(skb);
25859- atomic_inc(&vcc->stats->tx);
25860+ atomic_inc_unchecked(&vcc->stats->tx);
25861 wake_up(&eni_dev->tx_wait);
25862 dma_complete++;
25863 }
25864diff -urNp linux-2.6.32.43/drivers/atm/firestream.c linux-2.6.32.43/drivers/atm/firestream.c
25865--- linux-2.6.32.43/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
25866+++ linux-2.6.32.43/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
25867@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
25868 }
25869 }
25870
25871- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
25872+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
25873
25874 fs_dprintk (FS_DEBUG_TXMEM, "i");
25875 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
25876@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
25877 #endif
25878 skb_put (skb, qe->p1 & 0xffff);
25879 ATM_SKB(skb)->vcc = atm_vcc;
25880- atomic_inc(&atm_vcc->stats->rx);
25881+ atomic_inc_unchecked(&atm_vcc->stats->rx);
25882 __net_timestamp(skb);
25883 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
25884 atm_vcc->push (atm_vcc, skb);
25885@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
25886 kfree (pe);
25887 }
25888 if (atm_vcc)
25889- atomic_inc(&atm_vcc->stats->rx_drop);
25890+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25891 break;
25892 case 0x1f: /* Reassembly abort: no buffers. */
25893 /* Silently increment error counter. */
25894 if (atm_vcc)
25895- atomic_inc(&atm_vcc->stats->rx_drop);
25896+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
25897 break;
25898 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
25899 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
25900diff -urNp linux-2.6.32.43/drivers/atm/fore200e.c linux-2.6.32.43/drivers/atm/fore200e.c
25901--- linux-2.6.32.43/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
25902+++ linux-2.6.32.43/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
25903@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
25904 #endif
25905 /* check error condition */
25906 if (*entry->status & STATUS_ERROR)
25907- atomic_inc(&vcc->stats->tx_err);
25908+ atomic_inc_unchecked(&vcc->stats->tx_err);
25909 else
25910- atomic_inc(&vcc->stats->tx);
25911+ atomic_inc_unchecked(&vcc->stats->tx);
25912 }
25913 }
25914
25915@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
25916 if (skb == NULL) {
25917 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
25918
25919- atomic_inc(&vcc->stats->rx_drop);
25920+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25921 return -ENOMEM;
25922 }
25923
25924@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
25925
25926 dev_kfree_skb_any(skb);
25927
25928- atomic_inc(&vcc->stats->rx_drop);
25929+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25930 return -ENOMEM;
25931 }
25932
25933 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25934
25935 vcc->push(vcc, skb);
25936- atomic_inc(&vcc->stats->rx);
25937+ atomic_inc_unchecked(&vcc->stats->rx);
25938
25939 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
25940
25941@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
25942 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
25943 fore200e->atm_dev->number,
25944 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
25945- atomic_inc(&vcc->stats->rx_err);
25946+ atomic_inc_unchecked(&vcc->stats->rx_err);
25947 }
25948 }
25949
25950@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
25951 goto retry_here;
25952 }
25953
25954- atomic_inc(&vcc->stats->tx_err);
25955+ atomic_inc_unchecked(&vcc->stats->tx_err);
25956
25957 fore200e->tx_sat++;
25958 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
25959diff -urNp linux-2.6.32.43/drivers/atm/he.c linux-2.6.32.43/drivers/atm/he.c
25960--- linux-2.6.32.43/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
25961+++ linux-2.6.32.43/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
25962@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25963
25964 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
25965 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
25966- atomic_inc(&vcc->stats->rx_drop);
25967+ atomic_inc_unchecked(&vcc->stats->rx_drop);
25968 goto return_host_buffers;
25969 }
25970
25971@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25972 RBRQ_LEN_ERR(he_dev->rbrq_head)
25973 ? "LEN_ERR" : "",
25974 vcc->vpi, vcc->vci);
25975- atomic_inc(&vcc->stats->rx_err);
25976+ atomic_inc_unchecked(&vcc->stats->rx_err);
25977 goto return_host_buffers;
25978 }
25979
25980@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
25981 vcc->push(vcc, skb);
25982 spin_lock(&he_dev->global_lock);
25983
25984- atomic_inc(&vcc->stats->rx);
25985+ atomic_inc_unchecked(&vcc->stats->rx);
25986
25987 return_host_buffers:
25988 ++pdus_assembled;
25989@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
25990 tpd->vcc->pop(tpd->vcc, tpd->skb);
25991 else
25992 dev_kfree_skb_any(tpd->skb);
25993- atomic_inc(&tpd->vcc->stats->tx_err);
25994+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
25995 }
25996 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
25997 return;
25998@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
25999 vcc->pop(vcc, skb);
26000 else
26001 dev_kfree_skb_any(skb);
26002- atomic_inc(&vcc->stats->tx_err);
26003+ atomic_inc_unchecked(&vcc->stats->tx_err);
26004 return -EINVAL;
26005 }
26006
26007@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26008 vcc->pop(vcc, skb);
26009 else
26010 dev_kfree_skb_any(skb);
26011- atomic_inc(&vcc->stats->tx_err);
26012+ atomic_inc_unchecked(&vcc->stats->tx_err);
26013 return -EINVAL;
26014 }
26015 #endif
26016@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26017 vcc->pop(vcc, skb);
26018 else
26019 dev_kfree_skb_any(skb);
26020- atomic_inc(&vcc->stats->tx_err);
26021+ atomic_inc_unchecked(&vcc->stats->tx_err);
26022 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26023 return -ENOMEM;
26024 }
26025@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26026 vcc->pop(vcc, skb);
26027 else
26028 dev_kfree_skb_any(skb);
26029- atomic_inc(&vcc->stats->tx_err);
26030+ atomic_inc_unchecked(&vcc->stats->tx_err);
26031 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26032 return -ENOMEM;
26033 }
26034@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26035 __enqueue_tpd(he_dev, tpd, cid);
26036 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26037
26038- atomic_inc(&vcc->stats->tx);
26039+ atomic_inc_unchecked(&vcc->stats->tx);
26040
26041 return 0;
26042 }
26043diff -urNp linux-2.6.32.43/drivers/atm/horizon.c linux-2.6.32.43/drivers/atm/horizon.c
26044--- linux-2.6.32.43/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26045+++ linux-2.6.32.43/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26046@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26047 {
26048 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26049 // VC layer stats
26050- atomic_inc(&vcc->stats->rx);
26051+ atomic_inc_unchecked(&vcc->stats->rx);
26052 __net_timestamp(skb);
26053 // end of our responsability
26054 vcc->push (vcc, skb);
26055@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26056 dev->tx_iovec = NULL;
26057
26058 // VC layer stats
26059- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26060+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26061
26062 // free the skb
26063 hrz_kfree_skb (skb);
26064diff -urNp linux-2.6.32.43/drivers/atm/idt77252.c linux-2.6.32.43/drivers/atm/idt77252.c
26065--- linux-2.6.32.43/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26066+++ linux-2.6.32.43/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26067@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26068 else
26069 dev_kfree_skb(skb);
26070
26071- atomic_inc(&vcc->stats->tx);
26072+ atomic_inc_unchecked(&vcc->stats->tx);
26073 }
26074
26075 atomic_dec(&scq->used);
26076@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26077 if ((sb = dev_alloc_skb(64)) == NULL) {
26078 printk("%s: Can't allocate buffers for aal0.\n",
26079 card->name);
26080- atomic_add(i, &vcc->stats->rx_drop);
26081+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26082 break;
26083 }
26084 if (!atm_charge(vcc, sb->truesize)) {
26085 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26086 card->name);
26087- atomic_add(i - 1, &vcc->stats->rx_drop);
26088+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26089 dev_kfree_skb(sb);
26090 break;
26091 }
26092@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26093 ATM_SKB(sb)->vcc = vcc;
26094 __net_timestamp(sb);
26095 vcc->push(vcc, sb);
26096- atomic_inc(&vcc->stats->rx);
26097+ atomic_inc_unchecked(&vcc->stats->rx);
26098
26099 cell += ATM_CELL_PAYLOAD;
26100 }
26101@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26102 "(CDC: %08x)\n",
26103 card->name, len, rpp->len, readl(SAR_REG_CDC));
26104 recycle_rx_pool_skb(card, rpp);
26105- atomic_inc(&vcc->stats->rx_err);
26106+ atomic_inc_unchecked(&vcc->stats->rx_err);
26107 return;
26108 }
26109 if (stat & SAR_RSQE_CRC) {
26110 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26111 recycle_rx_pool_skb(card, rpp);
26112- atomic_inc(&vcc->stats->rx_err);
26113+ atomic_inc_unchecked(&vcc->stats->rx_err);
26114 return;
26115 }
26116 if (skb_queue_len(&rpp->queue) > 1) {
26117@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26118 RXPRINTK("%s: Can't alloc RX skb.\n",
26119 card->name);
26120 recycle_rx_pool_skb(card, rpp);
26121- atomic_inc(&vcc->stats->rx_err);
26122+ atomic_inc_unchecked(&vcc->stats->rx_err);
26123 return;
26124 }
26125 if (!atm_charge(vcc, skb->truesize)) {
26126@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26127 __net_timestamp(skb);
26128
26129 vcc->push(vcc, skb);
26130- atomic_inc(&vcc->stats->rx);
26131+ atomic_inc_unchecked(&vcc->stats->rx);
26132
26133 return;
26134 }
26135@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26136 __net_timestamp(skb);
26137
26138 vcc->push(vcc, skb);
26139- atomic_inc(&vcc->stats->rx);
26140+ atomic_inc_unchecked(&vcc->stats->rx);
26141
26142 if (skb->truesize > SAR_FB_SIZE_3)
26143 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26144@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26145 if (vcc->qos.aal != ATM_AAL0) {
26146 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26147 card->name, vpi, vci);
26148- atomic_inc(&vcc->stats->rx_drop);
26149+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26150 goto drop;
26151 }
26152
26153 if ((sb = dev_alloc_skb(64)) == NULL) {
26154 printk("%s: Can't allocate buffers for AAL0.\n",
26155 card->name);
26156- atomic_inc(&vcc->stats->rx_err);
26157+ atomic_inc_unchecked(&vcc->stats->rx_err);
26158 goto drop;
26159 }
26160
26161@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26162 ATM_SKB(sb)->vcc = vcc;
26163 __net_timestamp(sb);
26164 vcc->push(vcc, sb);
26165- atomic_inc(&vcc->stats->rx);
26166+ atomic_inc_unchecked(&vcc->stats->rx);
26167
26168 drop:
26169 skb_pull(queue, 64);
26170@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26171
26172 if (vc == NULL) {
26173 printk("%s: NULL connection in send().\n", card->name);
26174- atomic_inc(&vcc->stats->tx_err);
26175+ atomic_inc_unchecked(&vcc->stats->tx_err);
26176 dev_kfree_skb(skb);
26177 return -EINVAL;
26178 }
26179 if (!test_bit(VCF_TX, &vc->flags)) {
26180 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26181- atomic_inc(&vcc->stats->tx_err);
26182+ atomic_inc_unchecked(&vcc->stats->tx_err);
26183 dev_kfree_skb(skb);
26184 return -EINVAL;
26185 }
26186@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26187 break;
26188 default:
26189 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26190- atomic_inc(&vcc->stats->tx_err);
26191+ atomic_inc_unchecked(&vcc->stats->tx_err);
26192 dev_kfree_skb(skb);
26193 return -EINVAL;
26194 }
26195
26196 if (skb_shinfo(skb)->nr_frags != 0) {
26197 printk("%s: No scatter-gather yet.\n", card->name);
26198- atomic_inc(&vcc->stats->tx_err);
26199+ atomic_inc_unchecked(&vcc->stats->tx_err);
26200 dev_kfree_skb(skb);
26201 return -EINVAL;
26202 }
26203@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26204
26205 err = queue_skb(card, vc, skb, oam);
26206 if (err) {
26207- atomic_inc(&vcc->stats->tx_err);
26208+ atomic_inc_unchecked(&vcc->stats->tx_err);
26209 dev_kfree_skb(skb);
26210 return err;
26211 }
26212@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26213 skb = dev_alloc_skb(64);
26214 if (!skb) {
26215 printk("%s: Out of memory in send_oam().\n", card->name);
26216- atomic_inc(&vcc->stats->tx_err);
26217+ atomic_inc_unchecked(&vcc->stats->tx_err);
26218 return -ENOMEM;
26219 }
26220 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26221diff -urNp linux-2.6.32.43/drivers/atm/iphase.c linux-2.6.32.43/drivers/atm/iphase.c
26222--- linux-2.6.32.43/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26223+++ linux-2.6.32.43/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26224@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26225 status = (u_short) (buf_desc_ptr->desc_mode);
26226 if (status & (RX_CER | RX_PTE | RX_OFL))
26227 {
26228- atomic_inc(&vcc->stats->rx_err);
26229+ atomic_inc_unchecked(&vcc->stats->rx_err);
26230 IF_ERR(printk("IA: bad packet, dropping it");)
26231 if (status & RX_CER) {
26232 IF_ERR(printk(" cause: packet CRC error\n");)
26233@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26234 len = dma_addr - buf_addr;
26235 if (len > iadev->rx_buf_sz) {
26236 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26237- atomic_inc(&vcc->stats->rx_err);
26238+ atomic_inc_unchecked(&vcc->stats->rx_err);
26239 goto out_free_desc;
26240 }
26241
26242@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26243 ia_vcc = INPH_IA_VCC(vcc);
26244 if (ia_vcc == NULL)
26245 {
26246- atomic_inc(&vcc->stats->rx_err);
26247+ atomic_inc_unchecked(&vcc->stats->rx_err);
26248 dev_kfree_skb_any(skb);
26249 atm_return(vcc, atm_guess_pdu2truesize(len));
26250 goto INCR_DLE;
26251@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26252 if ((length > iadev->rx_buf_sz) || (length >
26253 (skb->len - sizeof(struct cpcs_trailer))))
26254 {
26255- atomic_inc(&vcc->stats->rx_err);
26256+ atomic_inc_unchecked(&vcc->stats->rx_err);
26257 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26258 length, skb->len);)
26259 dev_kfree_skb_any(skb);
26260@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26261
26262 IF_RX(printk("rx_dle_intr: skb push");)
26263 vcc->push(vcc,skb);
26264- atomic_inc(&vcc->stats->rx);
26265+ atomic_inc_unchecked(&vcc->stats->rx);
26266 iadev->rx_pkt_cnt++;
26267 }
26268 INCR_DLE:
26269@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26270 {
26271 struct k_sonet_stats *stats;
26272 stats = &PRIV(_ia_dev[board])->sonet_stats;
26273- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26274- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26275- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26276- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26277- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26278- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26279- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26280- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26281- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26282+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26283+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26284+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26285+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26286+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26287+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26288+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26289+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26290+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26291 }
26292 ia_cmds.status = 0;
26293 break;
26294@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26295 if ((desc == 0) || (desc > iadev->num_tx_desc))
26296 {
26297 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26298- atomic_inc(&vcc->stats->tx);
26299+ atomic_inc_unchecked(&vcc->stats->tx);
26300 if (vcc->pop)
26301 vcc->pop(vcc, skb);
26302 else
26303@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26304 ATM_DESC(skb) = vcc->vci;
26305 skb_queue_tail(&iadev->tx_dma_q, skb);
26306
26307- atomic_inc(&vcc->stats->tx);
26308+ atomic_inc_unchecked(&vcc->stats->tx);
26309 iadev->tx_pkt_cnt++;
26310 /* Increment transaction counter */
26311 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26312
26313 #if 0
26314 /* add flow control logic */
26315- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26316+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26317 if (iavcc->vc_desc_cnt > 10) {
26318 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26319 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26320diff -urNp linux-2.6.32.43/drivers/atm/lanai.c linux-2.6.32.43/drivers/atm/lanai.c
26321--- linux-2.6.32.43/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26322+++ linux-2.6.32.43/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26323@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26324 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26325 lanai_endtx(lanai, lvcc);
26326 lanai_free_skb(lvcc->tx.atmvcc, skb);
26327- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26328+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26329 }
26330
26331 /* Try to fill the buffer - don't call unless there is backlog */
26332@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26333 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26334 __net_timestamp(skb);
26335 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26336- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26337+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26338 out:
26339 lvcc->rx.buf.ptr = end;
26340 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26341@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26342 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26343 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26344 lanai->stats.service_rxnotaal5++;
26345- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26346+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26347 return 0;
26348 }
26349 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26350@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26351 int bytes;
26352 read_unlock(&vcc_sklist_lock);
26353 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26354- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26355+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26356 lvcc->stats.x.aal5.service_trash++;
26357 bytes = (SERVICE_GET_END(s) * 16) -
26358 (((unsigned long) lvcc->rx.buf.ptr) -
26359@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26360 }
26361 if (s & SERVICE_STREAM) {
26362 read_unlock(&vcc_sklist_lock);
26363- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26364+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26365 lvcc->stats.x.aal5.service_stream++;
26366 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26367 "PDU on VCI %d!\n", lanai->number, vci);
26368@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26369 return 0;
26370 }
26371 DPRINTK("got rx crc error on vci %d\n", vci);
26372- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26373+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26374 lvcc->stats.x.aal5.service_rxcrc++;
26375 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26376 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26377diff -urNp linux-2.6.32.43/drivers/atm/nicstar.c linux-2.6.32.43/drivers/atm/nicstar.c
26378--- linux-2.6.32.43/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26379+++ linux-2.6.32.43/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26380@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26381 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26382 {
26383 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26384- atomic_inc(&vcc->stats->tx_err);
26385+ atomic_inc_unchecked(&vcc->stats->tx_err);
26386 dev_kfree_skb_any(skb);
26387 return -EINVAL;
26388 }
26389@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26390 if (!vc->tx)
26391 {
26392 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26393- atomic_inc(&vcc->stats->tx_err);
26394+ atomic_inc_unchecked(&vcc->stats->tx_err);
26395 dev_kfree_skb_any(skb);
26396 return -EINVAL;
26397 }
26398@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26399 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26400 {
26401 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26402- atomic_inc(&vcc->stats->tx_err);
26403+ atomic_inc_unchecked(&vcc->stats->tx_err);
26404 dev_kfree_skb_any(skb);
26405 return -EINVAL;
26406 }
26407@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26408 if (skb_shinfo(skb)->nr_frags != 0)
26409 {
26410 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26411- atomic_inc(&vcc->stats->tx_err);
26412+ atomic_inc_unchecked(&vcc->stats->tx_err);
26413 dev_kfree_skb_any(skb);
26414 return -EINVAL;
26415 }
26416@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26417
26418 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26419 {
26420- atomic_inc(&vcc->stats->tx_err);
26421+ atomic_inc_unchecked(&vcc->stats->tx_err);
26422 dev_kfree_skb_any(skb);
26423 return -EIO;
26424 }
26425- atomic_inc(&vcc->stats->tx);
26426+ atomic_inc_unchecked(&vcc->stats->tx);
26427
26428 return 0;
26429 }
26430@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26431 {
26432 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26433 card->index);
26434- atomic_add(i,&vcc->stats->rx_drop);
26435+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26436 break;
26437 }
26438 if (!atm_charge(vcc, sb->truesize))
26439 {
26440 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26441 card->index);
26442- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26443+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26444 dev_kfree_skb_any(sb);
26445 break;
26446 }
26447@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26448 ATM_SKB(sb)->vcc = vcc;
26449 __net_timestamp(sb);
26450 vcc->push(vcc, sb);
26451- atomic_inc(&vcc->stats->rx);
26452+ atomic_inc_unchecked(&vcc->stats->rx);
26453 cell += ATM_CELL_PAYLOAD;
26454 }
26455
26456@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26457 if (iovb == NULL)
26458 {
26459 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26460- atomic_inc(&vcc->stats->rx_drop);
26461+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26462 recycle_rx_buf(card, skb);
26463 return;
26464 }
26465@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26466 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26467 {
26468 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26469- atomic_inc(&vcc->stats->rx_err);
26470+ atomic_inc_unchecked(&vcc->stats->rx_err);
26471 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26472 NS_SKB(iovb)->iovcnt = 0;
26473 iovb->len = 0;
26474@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26475 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26476 card->index);
26477 which_list(card, skb);
26478- atomic_inc(&vcc->stats->rx_err);
26479+ atomic_inc_unchecked(&vcc->stats->rx_err);
26480 recycle_rx_buf(card, skb);
26481 vc->rx_iov = NULL;
26482 recycle_iov_buf(card, iovb);
26483@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26484 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26485 card->index);
26486 which_list(card, skb);
26487- atomic_inc(&vcc->stats->rx_err);
26488+ atomic_inc_unchecked(&vcc->stats->rx_err);
26489 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26490 NS_SKB(iovb)->iovcnt);
26491 vc->rx_iov = NULL;
26492@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26493 printk(" - PDU size mismatch.\n");
26494 else
26495 printk(".\n");
26496- atomic_inc(&vcc->stats->rx_err);
26497+ atomic_inc_unchecked(&vcc->stats->rx_err);
26498 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26499 NS_SKB(iovb)->iovcnt);
26500 vc->rx_iov = NULL;
26501@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26502 if (!atm_charge(vcc, skb->truesize))
26503 {
26504 push_rxbufs(card, skb);
26505- atomic_inc(&vcc->stats->rx_drop);
26506+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26507 }
26508 else
26509 {
26510@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26511 ATM_SKB(skb)->vcc = vcc;
26512 __net_timestamp(skb);
26513 vcc->push(vcc, skb);
26514- atomic_inc(&vcc->stats->rx);
26515+ atomic_inc_unchecked(&vcc->stats->rx);
26516 }
26517 }
26518 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26519@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26520 if (!atm_charge(vcc, sb->truesize))
26521 {
26522 push_rxbufs(card, sb);
26523- atomic_inc(&vcc->stats->rx_drop);
26524+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26525 }
26526 else
26527 {
26528@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26529 ATM_SKB(sb)->vcc = vcc;
26530 __net_timestamp(sb);
26531 vcc->push(vcc, sb);
26532- atomic_inc(&vcc->stats->rx);
26533+ atomic_inc_unchecked(&vcc->stats->rx);
26534 }
26535
26536 push_rxbufs(card, skb);
26537@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26538 if (!atm_charge(vcc, skb->truesize))
26539 {
26540 push_rxbufs(card, skb);
26541- atomic_inc(&vcc->stats->rx_drop);
26542+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26543 }
26544 else
26545 {
26546@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26547 ATM_SKB(skb)->vcc = vcc;
26548 __net_timestamp(skb);
26549 vcc->push(vcc, skb);
26550- atomic_inc(&vcc->stats->rx);
26551+ atomic_inc_unchecked(&vcc->stats->rx);
26552 }
26553
26554 push_rxbufs(card, sb);
26555@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26556 if (hb == NULL)
26557 {
26558 printk("nicstar%d: Out of huge buffers.\n", card->index);
26559- atomic_inc(&vcc->stats->rx_drop);
26560+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26561 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26562 NS_SKB(iovb)->iovcnt);
26563 vc->rx_iov = NULL;
26564@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26565 }
26566 else
26567 dev_kfree_skb_any(hb);
26568- atomic_inc(&vcc->stats->rx_drop);
26569+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26570 }
26571 else
26572 {
26573@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26574 #endif /* NS_USE_DESTRUCTORS */
26575 __net_timestamp(hb);
26576 vcc->push(vcc, hb);
26577- atomic_inc(&vcc->stats->rx);
26578+ atomic_inc_unchecked(&vcc->stats->rx);
26579 }
26580 }
26581
26582diff -urNp linux-2.6.32.43/drivers/atm/solos-pci.c linux-2.6.32.43/drivers/atm/solos-pci.c
26583--- linux-2.6.32.43/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26584+++ linux-2.6.32.43/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26585@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26586 }
26587 atm_charge(vcc, skb->truesize);
26588 vcc->push(vcc, skb);
26589- atomic_inc(&vcc->stats->rx);
26590+ atomic_inc_unchecked(&vcc->stats->rx);
26591 break;
26592
26593 case PKT_STATUS:
26594@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26595 char msg[500];
26596 char item[10];
26597
26598+ pax_track_stack();
26599+
26600 len = buf->len;
26601 for (i = 0; i < len; i++){
26602 if(i % 8 == 0)
26603@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26604 vcc = SKB_CB(oldskb)->vcc;
26605
26606 if (vcc) {
26607- atomic_inc(&vcc->stats->tx);
26608+ atomic_inc_unchecked(&vcc->stats->tx);
26609 solos_pop(vcc, oldskb);
26610 } else
26611 dev_kfree_skb_irq(oldskb);
26612diff -urNp linux-2.6.32.43/drivers/atm/suni.c linux-2.6.32.43/drivers/atm/suni.c
26613--- linux-2.6.32.43/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26614+++ linux-2.6.32.43/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26615@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26616
26617
26618 #define ADD_LIMITED(s,v) \
26619- atomic_add((v),&stats->s); \
26620- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26621+ atomic_add_unchecked((v),&stats->s); \
26622+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26623
26624
26625 static void suni_hz(unsigned long from_timer)
26626diff -urNp linux-2.6.32.43/drivers/atm/uPD98402.c linux-2.6.32.43/drivers/atm/uPD98402.c
26627--- linux-2.6.32.43/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26628+++ linux-2.6.32.43/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26629@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26630 struct sonet_stats tmp;
26631 int error = 0;
26632
26633- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26634+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26635 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26636 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26637 if (zero && !error) {
26638@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26639
26640
26641 #define ADD_LIMITED(s,v) \
26642- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26643- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26644- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26645+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26646+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26647+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26648
26649
26650 static void stat_event(struct atm_dev *dev)
26651@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
26652 if (reason & uPD98402_INT_PFM) stat_event(dev);
26653 if (reason & uPD98402_INT_PCO) {
26654 (void) GET(PCOCR); /* clear interrupt cause */
26655- atomic_add(GET(HECCT),
26656+ atomic_add_unchecked(GET(HECCT),
26657 &PRIV(dev)->sonet_stats.uncorr_hcs);
26658 }
26659 if ((reason & uPD98402_INT_RFO) &&
26660@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
26661 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
26662 uPD98402_INT_LOS),PIMR); /* enable them */
26663 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
26664- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26665- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
26666- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
26667+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
26668+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
26669+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
26670 return 0;
26671 }
26672
26673diff -urNp linux-2.6.32.43/drivers/atm/zatm.c linux-2.6.32.43/drivers/atm/zatm.c
26674--- linux-2.6.32.43/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
26675+++ linux-2.6.32.43/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
26676@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26677 }
26678 if (!size) {
26679 dev_kfree_skb_irq(skb);
26680- if (vcc) atomic_inc(&vcc->stats->rx_err);
26681+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
26682 continue;
26683 }
26684 if (!atm_charge(vcc,skb->truesize)) {
26685@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
26686 skb->len = size;
26687 ATM_SKB(skb)->vcc = vcc;
26688 vcc->push(vcc,skb);
26689- atomic_inc(&vcc->stats->rx);
26690+ atomic_inc_unchecked(&vcc->stats->rx);
26691 }
26692 zout(pos & 0xffff,MTA(mbx));
26693 #if 0 /* probably a stupid idea */
26694@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
26695 skb_queue_head(&zatm_vcc->backlog,skb);
26696 break;
26697 }
26698- atomic_inc(&vcc->stats->tx);
26699+ atomic_inc_unchecked(&vcc->stats->tx);
26700 wake_up(&zatm_vcc->tx_wait);
26701 }
26702
26703diff -urNp linux-2.6.32.43/drivers/base/bus.c linux-2.6.32.43/drivers/base/bus.c
26704--- linux-2.6.32.43/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
26705+++ linux-2.6.32.43/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
26706@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
26707 return ret;
26708 }
26709
26710-static struct sysfs_ops driver_sysfs_ops = {
26711+static const struct sysfs_ops driver_sysfs_ops = {
26712 .show = drv_attr_show,
26713 .store = drv_attr_store,
26714 };
26715@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
26716 return ret;
26717 }
26718
26719-static struct sysfs_ops bus_sysfs_ops = {
26720+static const struct sysfs_ops bus_sysfs_ops = {
26721 .show = bus_attr_show,
26722 .store = bus_attr_store,
26723 };
26724@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
26725 return 0;
26726 }
26727
26728-static struct kset_uevent_ops bus_uevent_ops = {
26729+static const struct kset_uevent_ops bus_uevent_ops = {
26730 .filter = bus_uevent_filter,
26731 };
26732
26733diff -urNp linux-2.6.32.43/drivers/base/class.c linux-2.6.32.43/drivers/base/class.c
26734--- linux-2.6.32.43/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
26735+++ linux-2.6.32.43/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
26736@@ -63,7 +63,7 @@ static void class_release(struct kobject
26737 kfree(cp);
26738 }
26739
26740-static struct sysfs_ops class_sysfs_ops = {
26741+static const struct sysfs_ops class_sysfs_ops = {
26742 .show = class_attr_show,
26743 .store = class_attr_store,
26744 };
26745diff -urNp linux-2.6.32.43/drivers/base/core.c linux-2.6.32.43/drivers/base/core.c
26746--- linux-2.6.32.43/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
26747+++ linux-2.6.32.43/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
26748@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
26749 return ret;
26750 }
26751
26752-static struct sysfs_ops dev_sysfs_ops = {
26753+static const struct sysfs_ops dev_sysfs_ops = {
26754 .show = dev_attr_show,
26755 .store = dev_attr_store,
26756 };
26757@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
26758 return retval;
26759 }
26760
26761-static struct kset_uevent_ops device_uevent_ops = {
26762+static const struct kset_uevent_ops device_uevent_ops = {
26763 .filter = dev_uevent_filter,
26764 .name = dev_uevent_name,
26765 .uevent = dev_uevent,
26766diff -urNp linux-2.6.32.43/drivers/base/memory.c linux-2.6.32.43/drivers/base/memory.c
26767--- linux-2.6.32.43/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
26768+++ linux-2.6.32.43/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
26769@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
26770 return retval;
26771 }
26772
26773-static struct kset_uevent_ops memory_uevent_ops = {
26774+static const struct kset_uevent_ops memory_uevent_ops = {
26775 .name = memory_uevent_name,
26776 .uevent = memory_uevent,
26777 };
26778diff -urNp linux-2.6.32.43/drivers/base/sys.c linux-2.6.32.43/drivers/base/sys.c
26779--- linux-2.6.32.43/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
26780+++ linux-2.6.32.43/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
26781@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
26782 return -EIO;
26783 }
26784
26785-static struct sysfs_ops sysfs_ops = {
26786+static const struct sysfs_ops sysfs_ops = {
26787 .show = sysdev_show,
26788 .store = sysdev_store,
26789 };
26790@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
26791 return -EIO;
26792 }
26793
26794-static struct sysfs_ops sysfs_class_ops = {
26795+static const struct sysfs_ops sysfs_class_ops = {
26796 .show = sysdev_class_show,
26797 .store = sysdev_class_store,
26798 };
26799diff -urNp linux-2.6.32.43/drivers/block/cciss.c linux-2.6.32.43/drivers/block/cciss.c
26800--- linux-2.6.32.43/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
26801+++ linux-2.6.32.43/drivers/block/cciss.c 2011-04-17 15:56:46.000000000 -0400
26802@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
26803 int err;
26804 u32 cp;
26805
26806+ memset(&arg64, 0, sizeof(arg64));
26807+
26808 err = 0;
26809 err |=
26810 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
26811diff -urNp linux-2.6.32.43/drivers/block/cpqarray.c linux-2.6.32.43/drivers/block/cpqarray.c
26812--- linux-2.6.32.43/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
26813+++ linux-2.6.32.43/drivers/block/cpqarray.c 2011-05-16 21:46:57.000000000 -0400
26814@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
26815 struct scatterlist tmp_sg[SG_MAX];
26816 int i, dir, seg;
26817
26818+ pax_track_stack();
26819+
26820 if (blk_queue_plugged(q))
26821 goto startio;
26822
26823diff -urNp linux-2.6.32.43/drivers/block/DAC960.c linux-2.6.32.43/drivers/block/DAC960.c
26824--- linux-2.6.32.43/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
26825+++ linux-2.6.32.43/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
26826@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
26827 unsigned long flags;
26828 int Channel, TargetID;
26829
26830+ pax_track_stack();
26831+
26832 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
26833 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
26834 sizeof(DAC960_SCSI_Inquiry_T) +
26835diff -urNp linux-2.6.32.43/drivers/block/nbd.c linux-2.6.32.43/drivers/block/nbd.c
26836--- linux-2.6.32.43/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
26837+++ linux-2.6.32.43/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
26838@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
26839 struct kvec iov;
26840 sigset_t blocked, oldset;
26841
26842+ pax_track_stack();
26843+
26844 if (unlikely(!sock)) {
26845 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
26846 lo->disk->disk_name, (send ? "send" : "recv"));
26847@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
26848 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
26849 unsigned int cmd, unsigned long arg)
26850 {
26851+ pax_track_stack();
26852+
26853 switch (cmd) {
26854 case NBD_DISCONNECT: {
26855 struct request sreq;
26856diff -urNp linux-2.6.32.43/drivers/block/pktcdvd.c linux-2.6.32.43/drivers/block/pktcdvd.c
26857--- linux-2.6.32.43/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
26858+++ linux-2.6.32.43/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
26859@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
26860 return len;
26861 }
26862
26863-static struct sysfs_ops kobj_pkt_ops = {
26864+static const struct sysfs_ops kobj_pkt_ops = {
26865 .show = kobj_pkt_show,
26866 .store = kobj_pkt_store
26867 };
26868diff -urNp linux-2.6.32.43/drivers/char/agp/frontend.c linux-2.6.32.43/drivers/char/agp/frontend.c
26869--- linux-2.6.32.43/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
26870+++ linux-2.6.32.43/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
26871@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
26872 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
26873 return -EFAULT;
26874
26875- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
26876+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
26877 return -EFAULT;
26878
26879 client = agp_find_client_by_pid(reserve.pid);
26880diff -urNp linux-2.6.32.43/drivers/char/briq_panel.c linux-2.6.32.43/drivers/char/briq_panel.c
26881--- linux-2.6.32.43/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
26882+++ linux-2.6.32.43/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
26883@@ -10,6 +10,7 @@
26884 #include <linux/types.h>
26885 #include <linux/errno.h>
26886 #include <linux/tty.h>
26887+#include <linux/mutex.h>
26888 #include <linux/timer.h>
26889 #include <linux/kernel.h>
26890 #include <linux/wait.h>
26891@@ -36,6 +37,7 @@ static int vfd_is_open;
26892 static unsigned char vfd[40];
26893 static int vfd_cursor;
26894 static unsigned char ledpb, led;
26895+static DEFINE_MUTEX(vfd_mutex);
26896
26897 static void update_vfd(void)
26898 {
26899@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
26900 if (!vfd_is_open)
26901 return -EBUSY;
26902
26903+ mutex_lock(&vfd_mutex);
26904 for (;;) {
26905 char c;
26906 if (!indx)
26907 break;
26908- if (get_user(c, buf))
26909+ if (get_user(c, buf)) {
26910+ mutex_unlock(&vfd_mutex);
26911 return -EFAULT;
26912+ }
26913 if (esc) {
26914 set_led(c);
26915 esc = 0;
26916@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
26917 buf++;
26918 }
26919 update_vfd();
26920+ mutex_unlock(&vfd_mutex);
26921
26922 return len;
26923 }
26924diff -urNp linux-2.6.32.43/drivers/char/genrtc.c linux-2.6.32.43/drivers/char/genrtc.c
26925--- linux-2.6.32.43/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
26926+++ linux-2.6.32.43/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
26927@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
26928 switch (cmd) {
26929
26930 case RTC_PLL_GET:
26931+ memset(&pll, 0, sizeof(pll));
26932 if (get_rtc_pll(&pll))
26933 return -EINVAL;
26934 else
26935diff -urNp linux-2.6.32.43/drivers/char/hpet.c linux-2.6.32.43/drivers/char/hpet.c
26936--- linux-2.6.32.43/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
26937+++ linux-2.6.32.43/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
26938@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
26939 return 0;
26940 }
26941
26942-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
26943+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
26944
26945 static int
26946 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
26947@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
26948 }
26949
26950 static int
26951-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
26952+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
26953 {
26954 struct hpet_timer __iomem *timer;
26955 struct hpet __iomem *hpet;
26956@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
26957 {
26958 struct hpet_info info;
26959
26960+ memset(&info, 0, sizeof(info));
26961+
26962 if (devp->hd_ireqfreq)
26963 info.hi_ireqfreq =
26964 hpet_time_div(hpetp, devp->hd_ireqfreq);
26965- else
26966- info.hi_ireqfreq = 0;
26967 info.hi_flags =
26968 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
26969 info.hi_hpet = hpetp->hp_which;
26970diff -urNp linux-2.6.32.43/drivers/char/hvc_beat.c linux-2.6.32.43/drivers/char/hvc_beat.c
26971--- linux-2.6.32.43/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
26972+++ linux-2.6.32.43/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
26973@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
26974 return cnt;
26975 }
26976
26977-static struct hv_ops hvc_beat_get_put_ops = {
26978+static const struct hv_ops hvc_beat_get_put_ops = {
26979 .get_chars = hvc_beat_get_chars,
26980 .put_chars = hvc_beat_put_chars,
26981 };
26982diff -urNp linux-2.6.32.43/drivers/char/hvc_console.c linux-2.6.32.43/drivers/char/hvc_console.c
26983--- linux-2.6.32.43/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
26984+++ linux-2.6.32.43/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
26985@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
26986 * console interfaces but can still be used as a tty device. This has to be
26987 * static because kmalloc will not work during early console init.
26988 */
26989-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26990+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
26991 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
26992 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
26993
26994@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
26995 * vty adapters do NOT get an hvc_instantiate() callback since they
26996 * appear after early console init.
26997 */
26998-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
26999+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27000 {
27001 struct hvc_struct *hp;
27002
27003@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27004 };
27005
27006 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27007- struct hv_ops *ops, int outbuf_size)
27008+ const struct hv_ops *ops, int outbuf_size)
27009 {
27010 struct hvc_struct *hp;
27011 int i;
27012diff -urNp linux-2.6.32.43/drivers/char/hvc_console.h linux-2.6.32.43/drivers/char/hvc_console.h
27013--- linux-2.6.32.43/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27014+++ linux-2.6.32.43/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27015@@ -55,7 +55,7 @@ struct hvc_struct {
27016 int outbuf_size;
27017 int n_outbuf;
27018 uint32_t vtermno;
27019- struct hv_ops *ops;
27020+ const struct hv_ops *ops;
27021 int irq_requested;
27022 int data;
27023 struct winsize ws;
27024@@ -76,11 +76,11 @@ struct hv_ops {
27025 };
27026
27027 /* Register a vterm and a slot index for use as a console (console_init) */
27028-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27029+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27030
27031 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27032 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27033- struct hv_ops *ops, int outbuf_size);
27034+ const struct hv_ops *ops, int outbuf_size);
27035 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27036 extern int hvc_remove(struct hvc_struct *hp);
27037
27038diff -urNp linux-2.6.32.43/drivers/char/hvc_iseries.c linux-2.6.32.43/drivers/char/hvc_iseries.c
27039--- linux-2.6.32.43/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27040+++ linux-2.6.32.43/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27041@@ -197,7 +197,7 @@ done:
27042 return sent;
27043 }
27044
27045-static struct hv_ops hvc_get_put_ops = {
27046+static const struct hv_ops hvc_get_put_ops = {
27047 .get_chars = get_chars,
27048 .put_chars = put_chars,
27049 .notifier_add = notifier_add_irq,
27050diff -urNp linux-2.6.32.43/drivers/char/hvc_iucv.c linux-2.6.32.43/drivers/char/hvc_iucv.c
27051--- linux-2.6.32.43/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27052+++ linux-2.6.32.43/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27053@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27054
27055
27056 /* HVC operations */
27057-static struct hv_ops hvc_iucv_ops = {
27058+static const struct hv_ops hvc_iucv_ops = {
27059 .get_chars = hvc_iucv_get_chars,
27060 .put_chars = hvc_iucv_put_chars,
27061 .notifier_add = hvc_iucv_notifier_add,
27062diff -urNp linux-2.6.32.43/drivers/char/hvc_rtas.c linux-2.6.32.43/drivers/char/hvc_rtas.c
27063--- linux-2.6.32.43/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27064+++ linux-2.6.32.43/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27065@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27066 return i;
27067 }
27068
27069-static struct hv_ops hvc_rtas_get_put_ops = {
27070+static const struct hv_ops hvc_rtas_get_put_ops = {
27071 .get_chars = hvc_rtas_read_console,
27072 .put_chars = hvc_rtas_write_console,
27073 };
27074diff -urNp linux-2.6.32.43/drivers/char/hvcs.c linux-2.6.32.43/drivers/char/hvcs.c
27075--- linux-2.6.32.43/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27076+++ linux-2.6.32.43/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27077@@ -82,6 +82,7 @@
27078 #include <asm/hvcserver.h>
27079 #include <asm/uaccess.h>
27080 #include <asm/vio.h>
27081+#include <asm/local.h>
27082
27083 /*
27084 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27085@@ -269,7 +270,7 @@ struct hvcs_struct {
27086 unsigned int index;
27087
27088 struct tty_struct *tty;
27089- int open_count;
27090+ local_t open_count;
27091
27092 /*
27093 * Used to tell the driver kernel_thread what operations need to take
27094@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27095
27096 spin_lock_irqsave(&hvcsd->lock, flags);
27097
27098- if (hvcsd->open_count > 0) {
27099+ if (local_read(&hvcsd->open_count) > 0) {
27100 spin_unlock_irqrestore(&hvcsd->lock, flags);
27101 printk(KERN_INFO "HVCS: vterm state unchanged. "
27102 "The hvcs device node is still in use.\n");
27103@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27104 if ((retval = hvcs_partner_connect(hvcsd)))
27105 goto error_release;
27106
27107- hvcsd->open_count = 1;
27108+ local_set(&hvcsd->open_count, 1);
27109 hvcsd->tty = tty;
27110 tty->driver_data = hvcsd;
27111
27112@@ -1169,7 +1170,7 @@ fast_open:
27113
27114 spin_lock_irqsave(&hvcsd->lock, flags);
27115 kref_get(&hvcsd->kref);
27116- hvcsd->open_count++;
27117+ local_inc(&hvcsd->open_count);
27118 hvcsd->todo_mask |= HVCS_SCHED_READ;
27119 spin_unlock_irqrestore(&hvcsd->lock, flags);
27120
27121@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27122 hvcsd = tty->driver_data;
27123
27124 spin_lock_irqsave(&hvcsd->lock, flags);
27125- if (--hvcsd->open_count == 0) {
27126+ if (local_dec_and_test(&hvcsd->open_count)) {
27127
27128 vio_disable_interrupts(hvcsd->vdev);
27129
27130@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27131 free_irq(irq, hvcsd);
27132 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27133 return;
27134- } else if (hvcsd->open_count < 0) {
27135+ } else if (local_read(&hvcsd->open_count) < 0) {
27136 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27137 " is missmanaged.\n",
27138- hvcsd->vdev->unit_address, hvcsd->open_count);
27139+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27140 }
27141
27142 spin_unlock_irqrestore(&hvcsd->lock, flags);
27143@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27144
27145 spin_lock_irqsave(&hvcsd->lock, flags);
27146 /* Preserve this so that we know how many kref refs to put */
27147- temp_open_count = hvcsd->open_count;
27148+ temp_open_count = local_read(&hvcsd->open_count);
27149
27150 /*
27151 * Don't kref put inside the spinlock because the destruction
27152@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27153 hvcsd->tty->driver_data = NULL;
27154 hvcsd->tty = NULL;
27155
27156- hvcsd->open_count = 0;
27157+ local_set(&hvcsd->open_count, 0);
27158
27159 /* This will drop any buffered data on the floor which is OK in a hangup
27160 * scenario. */
27161@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27162 * the middle of a write operation? This is a crummy place to do this
27163 * but we want to keep it all in the spinlock.
27164 */
27165- if (hvcsd->open_count <= 0) {
27166+ if (local_read(&hvcsd->open_count) <= 0) {
27167 spin_unlock_irqrestore(&hvcsd->lock, flags);
27168 return -ENODEV;
27169 }
27170@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27171 {
27172 struct hvcs_struct *hvcsd = tty->driver_data;
27173
27174- if (!hvcsd || hvcsd->open_count <= 0)
27175+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27176 return 0;
27177
27178 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27179diff -urNp linux-2.6.32.43/drivers/char/hvc_udbg.c linux-2.6.32.43/drivers/char/hvc_udbg.c
27180--- linux-2.6.32.43/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27181+++ linux-2.6.32.43/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27182@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27183 return i;
27184 }
27185
27186-static struct hv_ops hvc_udbg_ops = {
27187+static const struct hv_ops hvc_udbg_ops = {
27188 .get_chars = hvc_udbg_get,
27189 .put_chars = hvc_udbg_put,
27190 };
27191diff -urNp linux-2.6.32.43/drivers/char/hvc_vio.c linux-2.6.32.43/drivers/char/hvc_vio.c
27192--- linux-2.6.32.43/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27193+++ linux-2.6.32.43/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27194@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27195 return got;
27196 }
27197
27198-static struct hv_ops hvc_get_put_ops = {
27199+static const struct hv_ops hvc_get_put_ops = {
27200 .get_chars = filtered_get_chars,
27201 .put_chars = hvc_put_chars,
27202 .notifier_add = notifier_add_irq,
27203diff -urNp linux-2.6.32.43/drivers/char/hvc_xen.c linux-2.6.32.43/drivers/char/hvc_xen.c
27204--- linux-2.6.32.43/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27205+++ linux-2.6.32.43/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27206@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27207 return recv;
27208 }
27209
27210-static struct hv_ops hvc_ops = {
27211+static const struct hv_ops hvc_ops = {
27212 .get_chars = read_console,
27213 .put_chars = write_console,
27214 .notifier_add = notifier_add_irq,
27215diff -urNp linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c
27216--- linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27217+++ linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27218@@ -414,7 +414,7 @@ struct ipmi_smi {
27219 struct proc_dir_entry *proc_dir;
27220 char proc_dir_name[10];
27221
27222- atomic_t stats[IPMI_NUM_STATS];
27223+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27224
27225 /*
27226 * run_to_completion duplicate of smb_info, smi_info
27227@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27228
27229
27230 #define ipmi_inc_stat(intf, stat) \
27231- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27232+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27233 #define ipmi_get_stat(intf, stat) \
27234- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27235+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27236
27237 static int is_lan_addr(struct ipmi_addr *addr)
27238 {
27239@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27240 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27241 init_waitqueue_head(&intf->waitq);
27242 for (i = 0; i < IPMI_NUM_STATS; i++)
27243- atomic_set(&intf->stats[i], 0);
27244+ atomic_set_unchecked(&intf->stats[i], 0);
27245
27246 intf->proc_dir = NULL;
27247
27248@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27249 struct ipmi_smi_msg smi_msg;
27250 struct ipmi_recv_msg recv_msg;
27251
27252+ pax_track_stack();
27253+
27254 si = (struct ipmi_system_interface_addr *) &addr;
27255 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27256 si->channel = IPMI_BMC_CHANNEL;
27257diff -urNp linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c
27258--- linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27259+++ linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27260@@ -277,7 +277,7 @@ struct smi_info {
27261 unsigned char slave_addr;
27262
27263 /* Counters and things for the proc filesystem. */
27264- atomic_t stats[SI_NUM_STATS];
27265+ atomic_unchecked_t stats[SI_NUM_STATS];
27266
27267 struct task_struct *thread;
27268
27269@@ -285,9 +285,9 @@ struct smi_info {
27270 };
27271
27272 #define smi_inc_stat(smi, stat) \
27273- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27274+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27275 #define smi_get_stat(smi, stat) \
27276- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27277+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27278
27279 #define SI_MAX_PARMS 4
27280
27281@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27282 atomic_set(&new_smi->req_events, 0);
27283 new_smi->run_to_completion = 0;
27284 for (i = 0; i < SI_NUM_STATS; i++)
27285- atomic_set(&new_smi->stats[i], 0);
27286+ atomic_set_unchecked(&new_smi->stats[i], 0);
27287
27288 new_smi->interrupt_disabled = 0;
27289 atomic_set(&new_smi->stop_operation, 0);
27290diff -urNp linux-2.6.32.43/drivers/char/istallion.c linux-2.6.32.43/drivers/char/istallion.c
27291--- linux-2.6.32.43/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27292+++ linux-2.6.32.43/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27293@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27294 * re-used for each stats call.
27295 */
27296 static comstats_t stli_comstats;
27297-static combrd_t stli_brdstats;
27298 static struct asystats stli_cdkstats;
27299
27300 /*****************************************************************************/
27301@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27302 {
27303 struct stlibrd *brdp;
27304 unsigned int i;
27305+ combrd_t stli_brdstats;
27306
27307 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27308 return -EFAULT;
27309@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27310 struct stliport stli_dummyport;
27311 struct stliport *portp;
27312
27313+ pax_track_stack();
27314+
27315 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27316 return -EFAULT;
27317 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27318@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27319 struct stlibrd stli_dummybrd;
27320 struct stlibrd *brdp;
27321
27322+ pax_track_stack();
27323+
27324 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27325 return -EFAULT;
27326 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27327diff -urNp linux-2.6.32.43/drivers/char/Kconfig linux-2.6.32.43/drivers/char/Kconfig
27328--- linux-2.6.32.43/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27329+++ linux-2.6.32.43/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27330@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27331
27332 config DEVKMEM
27333 bool "/dev/kmem virtual device support"
27334- default y
27335+ default n
27336+ depends on !GRKERNSEC_KMEM
27337 help
27338 Say Y here if you want to support the /dev/kmem device. The
27339 /dev/kmem device is rarely used, but can be used for certain
27340@@ -1114,6 +1115,7 @@ config DEVPORT
27341 bool
27342 depends on !M68K
27343 depends on ISA || PCI
27344+ depends on !GRKERNSEC_KMEM
27345 default y
27346
27347 source "drivers/s390/char/Kconfig"
27348diff -urNp linux-2.6.32.43/drivers/char/keyboard.c linux-2.6.32.43/drivers/char/keyboard.c
27349--- linux-2.6.32.43/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27350+++ linux-2.6.32.43/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27351@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27352 kbd->kbdmode == VC_MEDIUMRAW) &&
27353 value != KVAL(K_SAK))
27354 return; /* SAK is allowed even in raw mode */
27355+
27356+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27357+ {
27358+ void *func = fn_handler[value];
27359+ if (func == fn_show_state || func == fn_show_ptregs ||
27360+ func == fn_show_mem)
27361+ return;
27362+ }
27363+#endif
27364+
27365 fn_handler[value](vc);
27366 }
27367
27368@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27369 .evbit = { BIT_MASK(EV_SND) },
27370 },
27371
27372- { }, /* Terminating entry */
27373+ { 0 }, /* Terminating entry */
27374 };
27375
27376 MODULE_DEVICE_TABLE(input, kbd_ids);
27377diff -urNp linux-2.6.32.43/drivers/char/mem.c linux-2.6.32.43/drivers/char/mem.c
27378--- linux-2.6.32.43/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27379+++ linux-2.6.32.43/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27380@@ -18,6 +18,7 @@
27381 #include <linux/raw.h>
27382 #include <linux/tty.h>
27383 #include <linux/capability.h>
27384+#include <linux/security.h>
27385 #include <linux/ptrace.h>
27386 #include <linux/device.h>
27387 #include <linux/highmem.h>
27388@@ -35,6 +36,10 @@
27389 # include <linux/efi.h>
27390 #endif
27391
27392+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27393+extern struct file_operations grsec_fops;
27394+#endif
27395+
27396 static inline unsigned long size_inside_page(unsigned long start,
27397 unsigned long size)
27398 {
27399@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27400
27401 while (cursor < to) {
27402 if (!devmem_is_allowed(pfn)) {
27403+#ifdef CONFIG_GRKERNSEC_KMEM
27404+ gr_handle_mem_readwrite(from, to);
27405+#else
27406 printk(KERN_INFO
27407 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27408 current->comm, from, to);
27409+#endif
27410 return 0;
27411 }
27412 cursor += PAGE_SIZE;
27413@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
27414 }
27415 return 1;
27416 }
27417+#elif defined(CONFIG_GRKERNSEC_KMEM)
27418+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27419+{
27420+ return 0;
27421+}
27422 #else
27423 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
27424 {
27425@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
27426 #endif
27427
27428 while (count > 0) {
27429+ char *temp;
27430+
27431 /*
27432 * Handle first page in case it's not aligned
27433 */
27434@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
27435 if (!ptr)
27436 return -EFAULT;
27437
27438- if (copy_to_user(buf, ptr, sz)) {
27439+#ifdef CONFIG_PAX_USERCOPY
27440+ temp = kmalloc(sz, GFP_KERNEL);
27441+ if (!temp) {
27442+ unxlate_dev_mem_ptr(p, ptr);
27443+ return -ENOMEM;
27444+ }
27445+ memcpy(temp, ptr, sz);
27446+#else
27447+ temp = ptr;
27448+#endif
27449+
27450+ if (copy_to_user(buf, temp, sz)) {
27451+
27452+#ifdef CONFIG_PAX_USERCOPY
27453+ kfree(temp);
27454+#endif
27455+
27456 unxlate_dev_mem_ptr(p, ptr);
27457 return -EFAULT;
27458 }
27459
27460+#ifdef CONFIG_PAX_USERCOPY
27461+ kfree(temp);
27462+#endif
27463+
27464 unxlate_dev_mem_ptr(p, ptr);
27465
27466 buf += sz;
27467@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
27468 size_t count, loff_t *ppos)
27469 {
27470 unsigned long p = *ppos;
27471- ssize_t low_count, read, sz;
27472+ ssize_t low_count, read, sz, err = 0;
27473 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
27474- int err = 0;
27475
27476 read = 0;
27477 if (p < (unsigned long) high_memory) {
27478@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
27479 }
27480 #endif
27481 while (low_count > 0) {
27482+ char *temp;
27483+
27484 sz = size_inside_page(p, low_count);
27485
27486 /*
27487@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
27488 */
27489 kbuf = xlate_dev_kmem_ptr((char *)p);
27490
27491- if (copy_to_user(buf, kbuf, sz))
27492+#ifdef CONFIG_PAX_USERCOPY
27493+ temp = kmalloc(sz, GFP_KERNEL);
27494+ if (!temp)
27495+ return -ENOMEM;
27496+ memcpy(temp, kbuf, sz);
27497+#else
27498+ temp = kbuf;
27499+#endif
27500+
27501+ err = copy_to_user(buf, temp, sz);
27502+
27503+#ifdef CONFIG_PAX_USERCOPY
27504+ kfree(temp);
27505+#endif
27506+
27507+ if (err)
27508 return -EFAULT;
27509 buf += sz;
27510 p += sz;
27511@@ -889,6 +941,9 @@ static const struct memdev {
27512 #ifdef CONFIG_CRASH_DUMP
27513 [12] = { "oldmem", 0, &oldmem_fops, NULL },
27514 #endif
27515+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27516+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
27517+#endif
27518 };
27519
27520 static int memory_open(struct inode *inode, struct file *filp)
27521diff -urNp linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c
27522--- linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
27523+++ linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
27524@@ -29,6 +29,7 @@
27525 #include <linux/tty_driver.h>
27526 #include <linux/tty_flip.h>
27527 #include <linux/uaccess.h>
27528+#include <asm/local.h>
27529
27530 #include "tty.h"
27531 #include "network.h"
27532@@ -51,7 +52,7 @@ struct ipw_tty {
27533 int tty_type;
27534 struct ipw_network *network;
27535 struct tty_struct *linux_tty;
27536- int open_count;
27537+ local_t open_count;
27538 unsigned int control_lines;
27539 struct mutex ipw_tty_mutex;
27540 int tx_bytes_queued;
27541@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
27542 mutex_unlock(&tty->ipw_tty_mutex);
27543 return -ENODEV;
27544 }
27545- if (tty->open_count == 0)
27546+ if (local_read(&tty->open_count) == 0)
27547 tty->tx_bytes_queued = 0;
27548
27549- tty->open_count++;
27550+ local_inc(&tty->open_count);
27551
27552 tty->linux_tty = linux_tty;
27553 linux_tty->driver_data = tty;
27554@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
27555
27556 static void do_ipw_close(struct ipw_tty *tty)
27557 {
27558- tty->open_count--;
27559-
27560- if (tty->open_count == 0) {
27561+ if (local_dec_return(&tty->open_count) == 0) {
27562 struct tty_struct *linux_tty = tty->linux_tty;
27563
27564 if (linux_tty != NULL) {
27565@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
27566 return;
27567
27568 mutex_lock(&tty->ipw_tty_mutex);
27569- if (tty->open_count == 0) {
27570+ if (local_read(&tty->open_count) == 0) {
27571 mutex_unlock(&tty->ipw_tty_mutex);
27572 return;
27573 }
27574@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
27575 return;
27576 }
27577
27578- if (!tty->open_count) {
27579+ if (!local_read(&tty->open_count)) {
27580 mutex_unlock(&tty->ipw_tty_mutex);
27581 return;
27582 }
27583@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
27584 return -ENODEV;
27585
27586 mutex_lock(&tty->ipw_tty_mutex);
27587- if (!tty->open_count) {
27588+ if (!local_read(&tty->open_count)) {
27589 mutex_unlock(&tty->ipw_tty_mutex);
27590 return -EINVAL;
27591 }
27592@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
27593 if (!tty)
27594 return -ENODEV;
27595
27596- if (!tty->open_count)
27597+ if (!local_read(&tty->open_count))
27598 return -EINVAL;
27599
27600 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
27601@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
27602 if (!tty)
27603 return 0;
27604
27605- if (!tty->open_count)
27606+ if (!local_read(&tty->open_count))
27607 return 0;
27608
27609 return tty->tx_bytes_queued;
27610@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
27611 if (!tty)
27612 return -ENODEV;
27613
27614- if (!tty->open_count)
27615+ if (!local_read(&tty->open_count))
27616 return -EINVAL;
27617
27618 return get_control_lines(tty);
27619@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
27620 if (!tty)
27621 return -ENODEV;
27622
27623- if (!tty->open_count)
27624+ if (!local_read(&tty->open_count))
27625 return -EINVAL;
27626
27627 return set_control_lines(tty, set, clear);
27628@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
27629 if (!tty)
27630 return -ENODEV;
27631
27632- if (!tty->open_count)
27633+ if (!local_read(&tty->open_count))
27634 return -EINVAL;
27635
27636 /* FIXME: Exactly how is the tty object locked here .. */
27637@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
27638 against a parallel ioctl etc */
27639 mutex_lock(&ttyj->ipw_tty_mutex);
27640 }
27641- while (ttyj->open_count)
27642+ while (local_read(&ttyj->open_count))
27643 do_ipw_close(ttyj);
27644 ipwireless_disassociate_network_ttys(network,
27645 ttyj->channel_idx);
27646diff -urNp linux-2.6.32.43/drivers/char/pty.c linux-2.6.32.43/drivers/char/pty.c
27647--- linux-2.6.32.43/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
27648+++ linux-2.6.32.43/drivers/char/pty.c 2011-04-17 15:56:46.000000000 -0400
27649@@ -682,7 +682,18 @@ static int ptmx_open(struct inode *inode
27650 return ret;
27651 }
27652
27653-static struct file_operations ptmx_fops;
27654+static const struct file_operations ptmx_fops = {
27655+ .llseek = no_llseek,
27656+ .read = tty_read,
27657+ .write = tty_write,
27658+ .poll = tty_poll,
27659+ .unlocked_ioctl = tty_ioctl,
27660+ .compat_ioctl = tty_compat_ioctl,
27661+ .open = ptmx_open,
27662+ .release = tty_release,
27663+ .fasync = tty_fasync,
27664+};
27665+
27666
27667 static void __init unix98_pty_init(void)
27668 {
27669@@ -736,9 +747,6 @@ static void __init unix98_pty_init(void)
27670 register_sysctl_table(pty_root_table);
27671
27672 /* Now create the /dev/ptmx special device */
27673- tty_default_fops(&ptmx_fops);
27674- ptmx_fops.open = ptmx_open;
27675-
27676 cdev_init(&ptmx_cdev, &ptmx_fops);
27677 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
27678 register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0)
27679diff -urNp linux-2.6.32.43/drivers/char/random.c linux-2.6.32.43/drivers/char/random.c
27680--- linux-2.6.32.43/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
27681+++ linux-2.6.32.43/drivers/char/random.c 2011-04-17 15:56:46.000000000 -0400
27682@@ -254,8 +254,13 @@
27683 /*
27684 * Configuration information
27685 */
27686+#ifdef CONFIG_GRKERNSEC_RANDNET
27687+#define INPUT_POOL_WORDS 512
27688+#define OUTPUT_POOL_WORDS 128
27689+#else
27690 #define INPUT_POOL_WORDS 128
27691 #define OUTPUT_POOL_WORDS 32
27692+#endif
27693 #define SEC_XFER_SIZE 512
27694
27695 /*
27696@@ -292,10 +297,17 @@ static struct poolinfo {
27697 int poolwords;
27698 int tap1, tap2, tap3, tap4, tap5;
27699 } poolinfo_table[] = {
27700+#ifdef CONFIG_GRKERNSEC_RANDNET
27701+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
27702+ { 512, 411, 308, 208, 104, 1 },
27703+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
27704+ { 128, 103, 76, 51, 25, 1 },
27705+#else
27706 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
27707 { 128, 103, 76, 51, 25, 1 },
27708 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
27709 { 32, 26, 20, 14, 7, 1 },
27710+#endif
27711 #if 0
27712 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
27713 { 2048, 1638, 1231, 819, 411, 1 },
27714@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
27715 #include <linux/sysctl.h>
27716
27717 static int min_read_thresh = 8, min_write_thresh;
27718-static int max_read_thresh = INPUT_POOL_WORDS * 32;
27719+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
27720 static int max_write_thresh = INPUT_POOL_WORDS * 32;
27721 static char sysctl_bootid[16];
27722
27723diff -urNp linux-2.6.32.43/drivers/char/rocket.c linux-2.6.32.43/drivers/char/rocket.c
27724--- linux-2.6.32.43/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
27725+++ linux-2.6.32.43/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
27726@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
27727 struct rocket_ports tmp;
27728 int board;
27729
27730+ pax_track_stack();
27731+
27732 if (!retports)
27733 return -EFAULT;
27734 memset(&tmp, 0, sizeof (tmp));
27735diff -urNp linux-2.6.32.43/drivers/char/sonypi.c linux-2.6.32.43/drivers/char/sonypi.c
27736--- linux-2.6.32.43/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
27737+++ linux-2.6.32.43/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
27738@@ -55,6 +55,7 @@
27739 #include <asm/uaccess.h>
27740 #include <asm/io.h>
27741 #include <asm/system.h>
27742+#include <asm/local.h>
27743
27744 #include <linux/sonypi.h>
27745
27746@@ -491,7 +492,7 @@ static struct sonypi_device {
27747 spinlock_t fifo_lock;
27748 wait_queue_head_t fifo_proc_list;
27749 struct fasync_struct *fifo_async;
27750- int open_count;
27751+ local_t open_count;
27752 int model;
27753 struct input_dev *input_jog_dev;
27754 struct input_dev *input_key_dev;
27755@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
27756 static int sonypi_misc_release(struct inode *inode, struct file *file)
27757 {
27758 mutex_lock(&sonypi_device.lock);
27759- sonypi_device.open_count--;
27760+ local_dec(&sonypi_device.open_count);
27761 mutex_unlock(&sonypi_device.lock);
27762 return 0;
27763 }
27764@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
27765 lock_kernel();
27766 mutex_lock(&sonypi_device.lock);
27767 /* Flush input queue on first open */
27768- if (!sonypi_device.open_count)
27769+ if (!local_read(&sonypi_device.open_count))
27770 kfifo_reset(sonypi_device.fifo);
27771- sonypi_device.open_count++;
27772+ local_inc(&sonypi_device.open_count);
27773 mutex_unlock(&sonypi_device.lock);
27774 unlock_kernel();
27775 return 0;
27776diff -urNp linux-2.6.32.43/drivers/char/stallion.c linux-2.6.32.43/drivers/char/stallion.c
27777--- linux-2.6.32.43/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
27778+++ linux-2.6.32.43/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
27779@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
27780 struct stlport stl_dummyport;
27781 struct stlport *portp;
27782
27783+ pax_track_stack();
27784+
27785 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
27786 return -EFAULT;
27787 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
27788diff -urNp linux-2.6.32.43/drivers/char/tpm/tpm_bios.c linux-2.6.32.43/drivers/char/tpm/tpm_bios.c
27789--- linux-2.6.32.43/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
27790+++ linux-2.6.32.43/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
27791@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
27792 event = addr;
27793
27794 if ((event->event_type == 0 && event->event_size == 0) ||
27795- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
27796+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
27797 return NULL;
27798
27799 return addr;
27800@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
27801 return NULL;
27802
27803 if ((event->event_type == 0 && event->event_size == 0) ||
27804- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
27805+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
27806 return NULL;
27807
27808 (*pos)++;
27809@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
27810 int i;
27811
27812 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
27813- seq_putc(m, data[i]);
27814+ if (!seq_putc(m, data[i]))
27815+ return -EFAULT;
27816
27817 return 0;
27818 }
27819@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
27820 log->bios_event_log_end = log->bios_event_log + len;
27821
27822 virt = acpi_os_map_memory(start, len);
27823+ if (!virt) {
27824+ kfree(log->bios_event_log);
27825+ log->bios_event_log = NULL;
27826+ return -EFAULT;
27827+ }
27828
27829 memcpy(log->bios_event_log, virt, len);
27830
27831diff -urNp linux-2.6.32.43/drivers/char/tpm/tpm.c linux-2.6.32.43/drivers/char/tpm/tpm.c
27832--- linux-2.6.32.43/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
27833+++ linux-2.6.32.43/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
27834@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
27835 chip->vendor.req_complete_val)
27836 goto out_recv;
27837
27838- if ((status == chip->vendor.req_canceled)) {
27839+ if (status == chip->vendor.req_canceled) {
27840 dev_err(chip->dev, "Operation Canceled\n");
27841 rc = -ECANCELED;
27842 goto out;
27843@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
27844
27845 struct tpm_chip *chip = dev_get_drvdata(dev);
27846
27847+ pax_track_stack();
27848+
27849 tpm_cmd.header.in = tpm_readpubek_header;
27850 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
27851 "attempting to read the PUBEK");
27852diff -urNp linux-2.6.32.43/drivers/char/tty_io.c linux-2.6.32.43/drivers/char/tty_io.c
27853--- linux-2.6.32.43/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
27854+++ linux-2.6.32.43/drivers/char/tty_io.c 2011-04-17 15:56:46.000000000 -0400
27855@@ -136,21 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list
27856 DEFINE_MUTEX(tty_mutex);
27857 EXPORT_SYMBOL(tty_mutex);
27858
27859-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
27860-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
27861 ssize_t redirected_tty_write(struct file *, const char __user *,
27862 size_t, loff_t *);
27863-static unsigned int tty_poll(struct file *, poll_table *);
27864 static int tty_open(struct inode *, struct file *);
27865-static int tty_release(struct inode *, struct file *);
27866 long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
27867-#ifdef CONFIG_COMPAT
27868-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27869- unsigned long arg);
27870-#else
27871-#define tty_compat_ioctl NULL
27872-#endif
27873-static int tty_fasync(int fd, struct file *filp, int on);
27874 static void release_tty(struct tty_struct *tty, int idx);
27875 static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27876 static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty);
27877@@ -870,7 +859,7 @@ EXPORT_SYMBOL(start_tty);
27878 * read calls may be outstanding in parallel.
27879 */
27880
27881-static ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27882+ssize_t tty_read(struct file *file, char __user *buf, size_t count,
27883 loff_t *ppos)
27884 {
27885 int i;
27886@@ -898,6 +887,8 @@ static ssize_t tty_read(struct file *fil
27887 return i;
27888 }
27889
27890+EXPORT_SYMBOL(tty_read);
27891+
27892 void tty_write_unlock(struct tty_struct *tty)
27893 {
27894 mutex_unlock(&tty->atomic_write_lock);
27895@@ -1045,7 +1036,7 @@ void tty_write_message(struct tty_struct
27896 * write method will not be invoked in parallel for each device.
27897 */
27898
27899-static ssize_t tty_write(struct file *file, const char __user *buf,
27900+ssize_t tty_write(struct file *file, const char __user *buf,
27901 size_t count, loff_t *ppos)
27902 {
27903 struct tty_struct *tty;
27904@@ -1072,6 +1063,8 @@ static ssize_t tty_write(struct file *fi
27905 return ret;
27906 }
27907
27908+EXPORT_SYMBOL(tty_write);
27909+
27910 ssize_t redirected_tty_write(struct file *file, const char __user *buf,
27911 size_t count, loff_t *ppos)
27912 {
27913@@ -1867,7 +1860,7 @@ static int tty_open(struct inode *inode,
27914 * Takes bkl. See tty_release_dev
27915 */
27916
27917-static int tty_release(struct inode *inode, struct file *filp)
27918+int tty_release(struct inode *inode, struct file *filp)
27919 {
27920 lock_kernel();
27921 tty_release_dev(filp);
27922@@ -1875,6 +1868,8 @@ static int tty_release(struct inode *ino
27923 return 0;
27924 }
27925
27926+EXPORT_SYMBOL(tty_release);
27927+
27928 /**
27929 * tty_poll - check tty status
27930 * @filp: file being polled
27931@@ -1887,7 +1882,7 @@ static int tty_release(struct inode *ino
27932 * may be re-entered freely by other callers.
27933 */
27934
27935-static unsigned int tty_poll(struct file *filp, poll_table *wait)
27936+unsigned int tty_poll(struct file *filp, poll_table *wait)
27937 {
27938 struct tty_struct *tty;
27939 struct tty_ldisc *ld;
27940@@ -1904,7 +1899,9 @@ static unsigned int tty_poll(struct file
27941 return ret;
27942 }
27943
27944-static int tty_fasync(int fd, struct file *filp, int on)
27945+EXPORT_SYMBOL(tty_poll);
27946+
27947+int tty_fasync(int fd, struct file *filp, int on)
27948 {
27949 struct tty_struct *tty;
27950 unsigned long flags;
27951@@ -1948,6 +1945,8 @@ out:
27952 return retval;
27953 }
27954
27955+EXPORT_SYMBOL(tty_fasync);
27956+
27957 /**
27958 * tiocsti - fake input character
27959 * @tty: tty to fake input into
27960@@ -2582,8 +2581,10 @@ long tty_ioctl(struct file *file, unsign
27961 return retval;
27962 }
27963
27964+EXPORT_SYMBOL(tty_ioctl);
27965+
27966 #ifdef CONFIG_COMPAT
27967-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
27968+long tty_compat_ioctl(struct file *file, unsigned int cmd,
27969 unsigned long arg)
27970 {
27971 struct inode *inode = file->f_dentry->d_inode;
27972@@ -2607,6 +2608,8 @@ static long tty_compat_ioctl(struct file
27973
27974 return retval;
27975 }
27976+
27977+EXPORT_SYMBOL(tty_compat_ioctl);
27978 #endif
27979
27980 /*
27981@@ -3050,11 +3053,6 @@ struct tty_struct *get_current_tty(void)
27982 }
27983 EXPORT_SYMBOL_GPL(get_current_tty);
27984
27985-void tty_default_fops(struct file_operations *fops)
27986-{
27987- *fops = tty_fops;
27988-}
27989-
27990 /*
27991 * Initialize the console device. This is called *early*, so
27992 * we can't necessarily depend on lots of kernel help here.
27993diff -urNp linux-2.6.32.43/drivers/char/tty_ldisc.c linux-2.6.32.43/drivers/char/tty_ldisc.c
27994--- linux-2.6.32.43/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
27995+++ linux-2.6.32.43/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
27996@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
27997 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
27998 struct tty_ldisc_ops *ldo = ld->ops;
27999
28000- ldo->refcount--;
28001+ atomic_dec(&ldo->refcount);
28002 module_put(ldo->owner);
28003 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28004
28005@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28006 spin_lock_irqsave(&tty_ldisc_lock, flags);
28007 tty_ldiscs[disc] = new_ldisc;
28008 new_ldisc->num = disc;
28009- new_ldisc->refcount = 0;
28010+ atomic_set(&new_ldisc->refcount, 0);
28011 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28012
28013 return ret;
28014@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28015 return -EINVAL;
28016
28017 spin_lock_irqsave(&tty_ldisc_lock, flags);
28018- if (tty_ldiscs[disc]->refcount)
28019+ if (atomic_read(&tty_ldiscs[disc]->refcount))
28020 ret = -EBUSY;
28021 else
28022 tty_ldiscs[disc] = NULL;
28023@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28024 if (ldops) {
28025 ret = ERR_PTR(-EAGAIN);
28026 if (try_module_get(ldops->owner)) {
28027- ldops->refcount++;
28028+ atomic_inc(&ldops->refcount);
28029 ret = ldops;
28030 }
28031 }
28032@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28033 unsigned long flags;
28034
28035 spin_lock_irqsave(&tty_ldisc_lock, flags);
28036- ldops->refcount--;
28037+ atomic_dec(&ldops->refcount);
28038 module_put(ldops->owner);
28039 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28040 }
28041diff -urNp linux-2.6.32.43/drivers/char/virtio_console.c linux-2.6.32.43/drivers/char/virtio_console.c
28042--- linux-2.6.32.43/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28043+++ linux-2.6.32.43/drivers/char/virtio_console.c 2011-04-17 15:56:46.000000000 -0400
28044@@ -44,6 +44,7 @@ static unsigned int in_len;
28045 static char *in, *inbuf;
28046
28047 /* The operations for our console. */
28048+/* cannot be const */
28049 static struct hv_ops virtio_cons;
28050
28051 /* The hvc device */
28052diff -urNp linux-2.6.32.43/drivers/char/vt.c linux-2.6.32.43/drivers/char/vt.c
28053--- linux-2.6.32.43/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28054+++ linux-2.6.32.43/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28055@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28056
28057 static void notify_write(struct vc_data *vc, unsigned int unicode)
28058 {
28059- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28060+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28061 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28062 }
28063
28064diff -urNp linux-2.6.32.43/drivers/char/vt_ioctl.c linux-2.6.32.43/drivers/char/vt_ioctl.c
28065--- linux-2.6.32.43/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28066+++ linux-2.6.32.43/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28067@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28068 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28069 return -EFAULT;
28070
28071- if (!capable(CAP_SYS_TTY_CONFIG))
28072- perm = 0;
28073-
28074 switch (cmd) {
28075 case KDGKBENT:
28076 key_map = key_maps[s];
28077@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28078 val = (i ? K_HOLE : K_NOSUCHMAP);
28079 return put_user(val, &user_kbe->kb_value);
28080 case KDSKBENT:
28081+ if (!capable(CAP_SYS_TTY_CONFIG))
28082+ perm = 0;
28083+
28084 if (!perm)
28085 return -EPERM;
28086+
28087 if (!i && v == K_NOSUCHMAP) {
28088 /* deallocate map */
28089 key_map = key_maps[s];
28090@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28091 int i, j, k;
28092 int ret;
28093
28094- if (!capable(CAP_SYS_TTY_CONFIG))
28095- perm = 0;
28096-
28097 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28098 if (!kbs) {
28099 ret = -ENOMEM;
28100@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28101 kfree(kbs);
28102 return ((p && *p) ? -EOVERFLOW : 0);
28103 case KDSKBSENT:
28104+ if (!capable(CAP_SYS_TTY_CONFIG))
28105+ perm = 0;
28106+
28107 if (!perm) {
28108 ret = -EPERM;
28109 goto reterr;
28110diff -urNp linux-2.6.32.43/drivers/cpufreq/cpufreq.c linux-2.6.32.43/drivers/cpufreq/cpufreq.c
28111--- linux-2.6.32.43/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28112+++ linux-2.6.32.43/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28113@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28114 complete(&policy->kobj_unregister);
28115 }
28116
28117-static struct sysfs_ops sysfs_ops = {
28118+static const struct sysfs_ops sysfs_ops = {
28119 .show = show,
28120 .store = store,
28121 };
28122diff -urNp linux-2.6.32.43/drivers/cpuidle/sysfs.c linux-2.6.32.43/drivers/cpuidle/sysfs.c
28123--- linux-2.6.32.43/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28124+++ linux-2.6.32.43/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28125@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28126 return ret;
28127 }
28128
28129-static struct sysfs_ops cpuidle_sysfs_ops = {
28130+static const struct sysfs_ops cpuidle_sysfs_ops = {
28131 .show = cpuidle_show,
28132 .store = cpuidle_store,
28133 };
28134@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28135 return ret;
28136 }
28137
28138-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28139+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28140 .show = cpuidle_state_show,
28141 };
28142
28143@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28144 .release = cpuidle_state_sysfs_release,
28145 };
28146
28147-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28148+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28149 {
28150 kobject_put(&device->kobjs[i]->kobj);
28151 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28152diff -urNp linux-2.6.32.43/drivers/crypto/hifn_795x.c linux-2.6.32.43/drivers/crypto/hifn_795x.c
28153--- linux-2.6.32.43/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28154+++ linux-2.6.32.43/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28155@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28156 0xCA, 0x34, 0x2B, 0x2E};
28157 struct scatterlist sg;
28158
28159+ pax_track_stack();
28160+
28161 memset(src, 0, sizeof(src));
28162 memset(ctx.key, 0, sizeof(ctx.key));
28163
28164diff -urNp linux-2.6.32.43/drivers/crypto/padlock-aes.c linux-2.6.32.43/drivers/crypto/padlock-aes.c
28165--- linux-2.6.32.43/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
28166+++ linux-2.6.32.43/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
28167@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
28168 struct crypto_aes_ctx gen_aes;
28169 int cpu;
28170
28171+ pax_track_stack();
28172+
28173 if (key_len % 8) {
28174 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
28175 return -EINVAL;
28176diff -urNp linux-2.6.32.43/drivers/dma/ioat/dma.c linux-2.6.32.43/drivers/dma/ioat/dma.c
28177--- linux-2.6.32.43/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
28178+++ linux-2.6.32.43/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
28179@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
28180 return entry->show(&chan->common, page);
28181 }
28182
28183-struct sysfs_ops ioat_sysfs_ops = {
28184+const struct sysfs_ops ioat_sysfs_ops = {
28185 .show = ioat_attr_show,
28186 };
28187
28188diff -urNp linux-2.6.32.43/drivers/dma/ioat/dma.h linux-2.6.32.43/drivers/dma/ioat/dma.h
28189--- linux-2.6.32.43/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
28190+++ linux-2.6.32.43/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
28191@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
28192 unsigned long *phys_complete);
28193 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
28194 void ioat_kobject_del(struct ioatdma_device *device);
28195-extern struct sysfs_ops ioat_sysfs_ops;
28196+extern const struct sysfs_ops ioat_sysfs_ops;
28197 extern struct ioat_sysfs_entry ioat_version_attr;
28198 extern struct ioat_sysfs_entry ioat_cap_attr;
28199 #endif /* IOATDMA_H */
28200diff -urNp linux-2.6.32.43/drivers/edac/edac_device_sysfs.c linux-2.6.32.43/drivers/edac/edac_device_sysfs.c
28201--- linux-2.6.32.43/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28202+++ linux-2.6.32.43/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28203@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
28204 }
28205
28206 /* edac_dev file operations for an 'ctl_info' */
28207-static struct sysfs_ops device_ctl_info_ops = {
28208+static const struct sysfs_ops device_ctl_info_ops = {
28209 .show = edac_dev_ctl_info_show,
28210 .store = edac_dev_ctl_info_store
28211 };
28212@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
28213 }
28214
28215 /* edac_dev file operations for an 'instance' */
28216-static struct sysfs_ops device_instance_ops = {
28217+static const struct sysfs_ops device_instance_ops = {
28218 .show = edac_dev_instance_show,
28219 .store = edac_dev_instance_store
28220 };
28221@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
28222 }
28223
28224 /* edac_dev file operations for a 'block' */
28225-static struct sysfs_ops device_block_ops = {
28226+static const struct sysfs_ops device_block_ops = {
28227 .show = edac_dev_block_show,
28228 .store = edac_dev_block_store
28229 };
28230diff -urNp linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c
28231--- linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28232+++ linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
28233@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
28234 return -EIO;
28235 }
28236
28237-static struct sysfs_ops csrowfs_ops = {
28238+static const struct sysfs_ops csrowfs_ops = {
28239 .show = csrowdev_show,
28240 .store = csrowdev_store
28241 };
28242@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
28243 }
28244
28245 /* Intermediate show/store table */
28246-static struct sysfs_ops mci_ops = {
28247+static const struct sysfs_ops mci_ops = {
28248 .show = mcidev_show,
28249 .store = mcidev_store
28250 };
28251diff -urNp linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c
28252--- linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
28253+++ linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
28254@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
28255 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
28256 static int edac_pci_poll_msec = 1000; /* one second workq period */
28257
28258-static atomic_t pci_parity_count = ATOMIC_INIT(0);
28259-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
28260+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
28261+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
28262
28263 static struct kobject *edac_pci_top_main_kobj;
28264 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
28265@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
28266 }
28267
28268 /* fs_ops table */
28269-static struct sysfs_ops pci_instance_ops = {
28270+static const struct sysfs_ops pci_instance_ops = {
28271 .show = edac_pci_instance_show,
28272 .store = edac_pci_instance_store
28273 };
28274@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
28275 return -EIO;
28276 }
28277
28278-static struct sysfs_ops edac_pci_sysfs_ops = {
28279+static const struct sysfs_ops edac_pci_sysfs_ops = {
28280 .show = edac_pci_dev_show,
28281 .store = edac_pci_dev_store
28282 };
28283@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
28284 edac_printk(KERN_CRIT, EDAC_PCI,
28285 "Signaled System Error on %s\n",
28286 pci_name(dev));
28287- atomic_inc(&pci_nonparity_count);
28288+ atomic_inc_unchecked(&pci_nonparity_count);
28289 }
28290
28291 if (status & (PCI_STATUS_PARITY)) {
28292@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
28293 "Master Data Parity Error on %s\n",
28294 pci_name(dev));
28295
28296- atomic_inc(&pci_parity_count);
28297+ atomic_inc_unchecked(&pci_parity_count);
28298 }
28299
28300 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28301@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
28302 "Detected Parity Error on %s\n",
28303 pci_name(dev));
28304
28305- atomic_inc(&pci_parity_count);
28306+ atomic_inc_unchecked(&pci_parity_count);
28307 }
28308 }
28309
28310@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
28311 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
28312 "Signaled System Error on %s\n",
28313 pci_name(dev));
28314- atomic_inc(&pci_nonparity_count);
28315+ atomic_inc_unchecked(&pci_nonparity_count);
28316 }
28317
28318 if (status & (PCI_STATUS_PARITY)) {
28319@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
28320 "Master Data Parity Error on "
28321 "%s\n", pci_name(dev));
28322
28323- atomic_inc(&pci_parity_count);
28324+ atomic_inc_unchecked(&pci_parity_count);
28325 }
28326
28327 if (status & (PCI_STATUS_DETECTED_PARITY)) {
28328@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
28329 "Detected Parity Error on %s\n",
28330 pci_name(dev));
28331
28332- atomic_inc(&pci_parity_count);
28333+ atomic_inc_unchecked(&pci_parity_count);
28334 }
28335 }
28336 }
28337@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
28338 if (!check_pci_errors)
28339 return;
28340
28341- before_count = atomic_read(&pci_parity_count);
28342+ before_count = atomic_read_unchecked(&pci_parity_count);
28343
28344 /* scan all PCI devices looking for a Parity Error on devices and
28345 * bridges.
28346@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
28347 /* Only if operator has selected panic on PCI Error */
28348 if (edac_pci_get_panic_on_pe()) {
28349 /* If the count is different 'after' from 'before' */
28350- if (before_count != atomic_read(&pci_parity_count))
28351+ if (before_count != atomic_read_unchecked(&pci_parity_count))
28352 panic("EDAC: PCI Parity Error");
28353 }
28354 }
28355diff -urNp linux-2.6.32.43/drivers/firewire/core-cdev.c linux-2.6.32.43/drivers/firewire/core-cdev.c
28356--- linux-2.6.32.43/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
28357+++ linux-2.6.32.43/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
28358@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
28359 int ret;
28360
28361 if ((request->channels == 0 && request->bandwidth == 0) ||
28362- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
28363- request->bandwidth < 0)
28364+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
28365 return -EINVAL;
28366
28367 r = kmalloc(sizeof(*r), GFP_KERNEL);
28368diff -urNp linux-2.6.32.43/drivers/firewire/core-transaction.c linux-2.6.32.43/drivers/firewire/core-transaction.c
28369--- linux-2.6.32.43/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
28370+++ linux-2.6.32.43/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
28371@@ -36,6 +36,7 @@
28372 #include <linux/string.h>
28373 #include <linux/timer.h>
28374 #include <linux/types.h>
28375+#include <linux/sched.h>
28376
28377 #include <asm/byteorder.h>
28378
28379@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
28380 struct transaction_callback_data d;
28381 struct fw_transaction t;
28382
28383+ pax_track_stack();
28384+
28385 init_completion(&d.done);
28386 d.payload = payload;
28387 fw_send_request(card, &t, tcode, destination_id, generation, speed,
28388diff -urNp linux-2.6.32.43/drivers/firmware/dmi_scan.c linux-2.6.32.43/drivers/firmware/dmi_scan.c
28389--- linux-2.6.32.43/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
28390+++ linux-2.6.32.43/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
28391@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
28392 }
28393 }
28394 else {
28395- /*
28396- * no iounmap() for that ioremap(); it would be a no-op, but
28397- * it's so early in setup that sucker gets confused into doing
28398- * what it shouldn't if we actually call it.
28399- */
28400 p = dmi_ioremap(0xF0000, 0x10000);
28401 if (p == NULL)
28402 goto error;
28403diff -urNp linux-2.6.32.43/drivers/firmware/edd.c linux-2.6.32.43/drivers/firmware/edd.c
28404--- linux-2.6.32.43/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
28405+++ linux-2.6.32.43/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
28406@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
28407 return ret;
28408 }
28409
28410-static struct sysfs_ops edd_attr_ops = {
28411+static const struct sysfs_ops edd_attr_ops = {
28412 .show = edd_attr_show,
28413 };
28414
28415diff -urNp linux-2.6.32.43/drivers/firmware/efivars.c linux-2.6.32.43/drivers/firmware/efivars.c
28416--- linux-2.6.32.43/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
28417+++ linux-2.6.32.43/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
28418@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
28419 return ret;
28420 }
28421
28422-static struct sysfs_ops efivar_attr_ops = {
28423+static const struct sysfs_ops efivar_attr_ops = {
28424 .show = efivar_attr_show,
28425 .store = efivar_attr_store,
28426 };
28427diff -urNp linux-2.6.32.43/drivers/firmware/iscsi_ibft.c linux-2.6.32.43/drivers/firmware/iscsi_ibft.c
28428--- linux-2.6.32.43/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
28429+++ linux-2.6.32.43/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
28430@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
28431 return ret;
28432 }
28433
28434-static struct sysfs_ops ibft_attr_ops = {
28435+static const struct sysfs_ops ibft_attr_ops = {
28436 .show = ibft_show_attribute,
28437 };
28438
28439diff -urNp linux-2.6.32.43/drivers/firmware/memmap.c linux-2.6.32.43/drivers/firmware/memmap.c
28440--- linux-2.6.32.43/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
28441+++ linux-2.6.32.43/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
28442@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
28443 NULL
28444 };
28445
28446-static struct sysfs_ops memmap_attr_ops = {
28447+static const struct sysfs_ops memmap_attr_ops = {
28448 .show = memmap_attr_show,
28449 };
28450
28451diff -urNp linux-2.6.32.43/drivers/gpio/vr41xx_giu.c linux-2.6.32.43/drivers/gpio/vr41xx_giu.c
28452--- linux-2.6.32.43/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
28453+++ linux-2.6.32.43/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
28454@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
28455 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
28456 maskl, pendl, maskh, pendh);
28457
28458- atomic_inc(&irq_err_count);
28459+ atomic_inc_unchecked(&irq_err_count);
28460
28461 return -EINVAL;
28462 }
28463diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c
28464--- linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
28465+++ linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
28466@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
28467 struct drm_crtc *tmp;
28468 int crtc_mask = 1;
28469
28470- WARN(!crtc, "checking null crtc?");
28471+ BUG_ON(!crtc);
28472
28473 dev = crtc->dev;
28474
28475@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
28476
28477 adjusted_mode = drm_mode_duplicate(dev, mode);
28478
28479+ pax_track_stack();
28480+
28481 crtc->enabled = drm_helper_crtc_in_use(crtc);
28482
28483 if (!crtc->enabled)
28484diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_drv.c linux-2.6.32.43/drivers/gpu/drm/drm_drv.c
28485--- linux-2.6.32.43/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
28486+++ linux-2.6.32.43/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
28487@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
28488 char *kdata = NULL;
28489
28490 atomic_inc(&dev->ioctl_count);
28491- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
28492+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
28493 ++file_priv->ioctl_count;
28494
28495 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
28496diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_fops.c linux-2.6.32.43/drivers/gpu/drm/drm_fops.c
28497--- linux-2.6.32.43/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
28498+++ linux-2.6.32.43/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
28499@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
28500 }
28501
28502 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
28503- atomic_set(&dev->counts[i], 0);
28504+ atomic_set_unchecked(&dev->counts[i], 0);
28505
28506 dev->sigdata.lock = NULL;
28507
28508@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
28509
28510 retcode = drm_open_helper(inode, filp, dev);
28511 if (!retcode) {
28512- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
28513+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
28514 spin_lock(&dev->count_lock);
28515- if (!dev->open_count++) {
28516+ if (local_inc_return(&dev->open_count) == 1) {
28517 spin_unlock(&dev->count_lock);
28518 retcode = drm_setup(dev);
28519 goto out;
28520@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
28521
28522 lock_kernel();
28523
28524- DRM_DEBUG("open_count = %d\n", dev->open_count);
28525+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
28526
28527 if (dev->driver->preclose)
28528 dev->driver->preclose(dev, file_priv);
28529@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
28530 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
28531 task_pid_nr(current),
28532 (long)old_encode_dev(file_priv->minor->device),
28533- dev->open_count);
28534+ local_read(&dev->open_count));
28535
28536 /* if the master has gone away we can't do anything with the lock */
28537 if (file_priv->minor->master)
28538@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
28539 * End inline drm_release
28540 */
28541
28542- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
28543+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
28544 spin_lock(&dev->count_lock);
28545- if (!--dev->open_count) {
28546+ if (local_dec_and_test(&dev->open_count)) {
28547 if (atomic_read(&dev->ioctl_count)) {
28548 DRM_ERROR("Device busy: %d\n",
28549 atomic_read(&dev->ioctl_count));
28550diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_gem.c linux-2.6.32.43/drivers/gpu/drm/drm_gem.c
28551--- linux-2.6.32.43/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
28552+++ linux-2.6.32.43/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
28553@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
28554 spin_lock_init(&dev->object_name_lock);
28555 idr_init(&dev->object_name_idr);
28556 atomic_set(&dev->object_count, 0);
28557- atomic_set(&dev->object_memory, 0);
28558+ atomic_set_unchecked(&dev->object_memory, 0);
28559 atomic_set(&dev->pin_count, 0);
28560- atomic_set(&dev->pin_memory, 0);
28561+ atomic_set_unchecked(&dev->pin_memory, 0);
28562 atomic_set(&dev->gtt_count, 0);
28563- atomic_set(&dev->gtt_memory, 0);
28564+ atomic_set_unchecked(&dev->gtt_memory, 0);
28565
28566 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
28567 if (!mm) {
28568@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
28569 goto fput;
28570 }
28571 atomic_inc(&dev->object_count);
28572- atomic_add(obj->size, &dev->object_memory);
28573+ atomic_add_unchecked(obj->size, &dev->object_memory);
28574 return obj;
28575 fput:
28576 fput(obj->filp);
28577@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
28578
28579 fput(obj->filp);
28580 atomic_dec(&dev->object_count);
28581- atomic_sub(obj->size, &dev->object_memory);
28582+ atomic_sub_unchecked(obj->size, &dev->object_memory);
28583 kfree(obj);
28584 }
28585 EXPORT_SYMBOL(drm_gem_object_free);
28586diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_info.c linux-2.6.32.43/drivers/gpu/drm/drm_info.c
28587--- linux-2.6.32.43/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
28588+++ linux-2.6.32.43/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
28589@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
28590 struct drm_local_map *map;
28591 struct drm_map_list *r_list;
28592
28593- /* Hardcoded from _DRM_FRAME_BUFFER,
28594- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
28595- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
28596- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
28597+ static const char * const types[] = {
28598+ [_DRM_FRAME_BUFFER] = "FB",
28599+ [_DRM_REGISTERS] = "REG",
28600+ [_DRM_SHM] = "SHM",
28601+ [_DRM_AGP] = "AGP",
28602+ [_DRM_SCATTER_GATHER] = "SG",
28603+ [_DRM_CONSISTENT] = "PCI",
28604+ [_DRM_GEM] = "GEM" };
28605 const char *type;
28606 int i;
28607
28608@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
28609 map = r_list->map;
28610 if (!map)
28611 continue;
28612- if (map->type < 0 || map->type > 5)
28613+ if (map->type >= ARRAY_SIZE(types))
28614 type = "??";
28615 else
28616 type = types[map->type];
28617@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
28618 struct drm_device *dev = node->minor->dev;
28619
28620 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
28621- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
28622+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
28623 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
28624- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
28625- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
28626+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
28627+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
28628 seq_printf(m, "%d gtt total\n", dev->gtt_total);
28629 return 0;
28630 }
28631@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
28632 mutex_lock(&dev->struct_mutex);
28633 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
28634 atomic_read(&dev->vma_count),
28635+#ifdef CONFIG_GRKERNSEC_HIDESYM
28636+ NULL, 0);
28637+#else
28638 high_memory, (u64)virt_to_phys(high_memory));
28639+#endif
28640
28641 list_for_each_entry(pt, &dev->vmalist, head) {
28642 vma = pt->vma;
28643@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
28644 continue;
28645 seq_printf(m,
28646 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
28647- pt->pid, vma->vm_start, vma->vm_end,
28648+ pt->pid,
28649+#ifdef CONFIG_GRKERNSEC_HIDESYM
28650+ 0, 0,
28651+#else
28652+ vma->vm_start, vma->vm_end,
28653+#endif
28654 vma->vm_flags & VM_READ ? 'r' : '-',
28655 vma->vm_flags & VM_WRITE ? 'w' : '-',
28656 vma->vm_flags & VM_EXEC ? 'x' : '-',
28657 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
28658 vma->vm_flags & VM_LOCKED ? 'l' : '-',
28659 vma->vm_flags & VM_IO ? 'i' : '-',
28660+#ifdef CONFIG_GRKERNSEC_HIDESYM
28661+ 0);
28662+#else
28663 vma->vm_pgoff);
28664+#endif
28665
28666 #if defined(__i386__)
28667 pgprot = pgprot_val(vma->vm_page_prot);
28668diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c
28669--- linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28670+++ linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28671@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
28672 stats->data[i].value =
28673 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
28674 else
28675- stats->data[i].value = atomic_read(&dev->counts[i]);
28676+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
28677 stats->data[i].type = dev->types[i];
28678 }
28679
28680diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_lock.c linux-2.6.32.43/drivers/gpu/drm/drm_lock.c
28681--- linux-2.6.32.43/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
28682+++ linux-2.6.32.43/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
28683@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
28684 if (drm_lock_take(&master->lock, lock->context)) {
28685 master->lock.file_priv = file_priv;
28686 master->lock.lock_time = jiffies;
28687- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
28688+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
28689 break; /* Got lock */
28690 }
28691
28692@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
28693 return -EINVAL;
28694 }
28695
28696- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
28697+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
28698
28699 /* kernel_context_switch isn't used by any of the x86 drm
28700 * modules but is required by the Sparc driver.
28701diff -urNp linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c
28702--- linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
28703+++ linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
28704@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
28705 dma->buflist[vertex->idx],
28706 vertex->discard, vertex->used);
28707
28708- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28709- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28710+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
28711+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28712 sarea_priv->last_enqueue = dev_priv->counter - 1;
28713 sarea_priv->last_dispatch = (int)hw_status[5];
28714
28715@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
28716 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
28717 mc->last_render);
28718
28719- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28720- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
28721+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
28722+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
28723 sarea_priv->last_enqueue = dev_priv->counter - 1;
28724 sarea_priv->last_dispatch = (int)hw_status[5];
28725
28726diff -urNp linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h
28727--- linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
28728+++ linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
28729@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
28730 int page_flipping;
28731
28732 wait_queue_head_t irq_queue;
28733- atomic_t irq_received;
28734- atomic_t irq_emitted;
28735+ atomic_unchecked_t irq_received;
28736+ atomic_unchecked_t irq_emitted;
28737
28738 int front_offset;
28739 } drm_i810_private_t;
28740diff -urNp linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h
28741--- linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
28742+++ linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
28743@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
28744 int page_flipping;
28745
28746 wait_queue_head_t irq_queue;
28747- atomic_t irq_received;
28748- atomic_t irq_emitted;
28749+ atomic_unchecked_t irq_received;
28750+ atomic_unchecked_t irq_emitted;
28751
28752 int use_mi_batchbuffer_start;
28753
28754diff -urNp linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c
28755--- linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
28756+++ linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
28757@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
28758
28759 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
28760
28761- atomic_inc(&dev_priv->irq_received);
28762+ atomic_inc_unchecked(&dev_priv->irq_received);
28763 wake_up_interruptible(&dev_priv->irq_queue);
28764
28765 return IRQ_HANDLED;
28766@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
28767
28768 DRM_DEBUG("%s\n", __func__);
28769
28770- atomic_inc(&dev_priv->irq_emitted);
28771+ atomic_inc_unchecked(&dev_priv->irq_emitted);
28772
28773 BEGIN_LP_RING(2);
28774 OUT_RING(0);
28775 OUT_RING(GFX_OP_USER_INTERRUPT);
28776 ADVANCE_LP_RING();
28777
28778- return atomic_read(&dev_priv->irq_emitted);
28779+ return atomic_read_unchecked(&dev_priv->irq_emitted);
28780 }
28781
28782 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
28783@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
28784
28785 DRM_DEBUG("%s\n", __func__);
28786
28787- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28788+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28789 return 0;
28790
28791 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
28792@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
28793
28794 for (;;) {
28795 __set_current_state(TASK_INTERRUPTIBLE);
28796- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
28797+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
28798 break;
28799 if ((signed)(end - jiffies) <= 0) {
28800 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
28801@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
28802 I830_WRITE16(I830REG_HWSTAM, 0xffff);
28803 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
28804 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
28805- atomic_set(&dev_priv->irq_received, 0);
28806- atomic_set(&dev_priv->irq_emitted, 0);
28807+ atomic_set_unchecked(&dev_priv->irq_received, 0);
28808+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
28809 init_waitqueue_head(&dev_priv->irq_queue);
28810 }
28811
28812diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c
28813--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
28814+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
28815@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
28816 }
28817 }
28818
28819-struct intel_dvo_dev_ops ch7017_ops = {
28820+const struct intel_dvo_dev_ops ch7017_ops = {
28821 .init = ch7017_init,
28822 .detect = ch7017_detect,
28823 .mode_valid = ch7017_mode_valid,
28824diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c
28825--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
28826+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
28827@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
28828 }
28829 }
28830
28831-struct intel_dvo_dev_ops ch7xxx_ops = {
28832+const struct intel_dvo_dev_ops ch7xxx_ops = {
28833 .init = ch7xxx_init,
28834 .detect = ch7xxx_detect,
28835 .mode_valid = ch7xxx_mode_valid,
28836diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h
28837--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
28838+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
28839@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
28840 *
28841 * \return singly-linked list of modes or NULL if no modes found.
28842 */
28843- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
28844+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
28845
28846 /**
28847 * Clean up driver-specific bits of the output
28848 */
28849- void (*destroy) (struct intel_dvo_device *dvo);
28850+ void (* const destroy) (struct intel_dvo_device *dvo);
28851
28852 /**
28853 * Debugging hook to dump device registers to log file
28854 */
28855- void (*dump_regs)(struct intel_dvo_device *dvo);
28856+ void (* const dump_regs)(struct intel_dvo_device *dvo);
28857 };
28858
28859-extern struct intel_dvo_dev_ops sil164_ops;
28860-extern struct intel_dvo_dev_ops ch7xxx_ops;
28861-extern struct intel_dvo_dev_ops ivch_ops;
28862-extern struct intel_dvo_dev_ops tfp410_ops;
28863-extern struct intel_dvo_dev_ops ch7017_ops;
28864+extern const struct intel_dvo_dev_ops sil164_ops;
28865+extern const struct intel_dvo_dev_ops ch7xxx_ops;
28866+extern const struct intel_dvo_dev_ops ivch_ops;
28867+extern const struct intel_dvo_dev_ops tfp410_ops;
28868+extern const struct intel_dvo_dev_ops ch7017_ops;
28869
28870 #endif /* _INTEL_DVO_H */
28871diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c
28872--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
28873+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
28874@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
28875 }
28876 }
28877
28878-struct intel_dvo_dev_ops ivch_ops= {
28879+const struct intel_dvo_dev_ops ivch_ops= {
28880 .init = ivch_init,
28881 .dpms = ivch_dpms,
28882 .save = ivch_save,
28883diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c
28884--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
28885+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
28886@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
28887 }
28888 }
28889
28890-struct intel_dvo_dev_ops sil164_ops = {
28891+const struct intel_dvo_dev_ops sil164_ops = {
28892 .init = sil164_init,
28893 .detect = sil164_detect,
28894 .mode_valid = sil164_mode_valid,
28895diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c
28896--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
28897+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
28898@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
28899 }
28900 }
28901
28902-struct intel_dvo_dev_ops tfp410_ops = {
28903+const struct intel_dvo_dev_ops tfp410_ops = {
28904 .init = tfp410_init,
28905 .detect = tfp410_detect,
28906 .mode_valid = tfp410_mode_valid,
28907diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c
28908--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
28909+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
28910@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
28911 I915_READ(GTIMR));
28912 }
28913 seq_printf(m, "Interrupts received: %d\n",
28914- atomic_read(&dev_priv->irq_received));
28915+ atomic_read_unchecked(&dev_priv->irq_received));
28916 if (dev_priv->hw_status_page != NULL) {
28917 seq_printf(m, "Current sequence: %d\n",
28918 i915_get_gem_seqno(dev));
28919diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c
28920--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
28921+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
28922@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
28923 return i915_resume(dev);
28924 }
28925
28926-static struct vm_operations_struct i915_gem_vm_ops = {
28927+static const struct vm_operations_struct i915_gem_vm_ops = {
28928 .fault = i915_gem_fault,
28929 .open = drm_gem_vm_open,
28930 .close = drm_gem_vm_close,
28931diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h
28932--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
28933+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h 2011-05-04 17:56:28.000000000 -0400
28934@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
28935 int page_flipping;
28936
28937 wait_queue_head_t irq_queue;
28938- atomic_t irq_received;
28939+ atomic_unchecked_t irq_received;
28940 /** Protects user_irq_refcount and irq_mask_reg */
28941 spinlock_t user_irq_lock;
28942 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
28943diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c
28944--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
28945+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
28946@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
28947
28948 args->aper_size = dev->gtt_total;
28949 args->aper_available_size = (args->aper_size -
28950- atomic_read(&dev->pin_memory));
28951+ atomic_read_unchecked(&dev->pin_memory));
28952
28953 return 0;
28954 }
28955@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
28956 return -EINVAL;
28957 }
28958
28959+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28960+ drm_gem_object_unreference(obj);
28961+ return -EFAULT;
28962+ }
28963+
28964 if (i915_gem_object_needs_bit17_swizzle(obj)) {
28965 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
28966 } else {
28967@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
28968 return -EINVAL;
28969 }
28970
28971+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
28972+ drm_gem_object_unreference(obj);
28973+ return -EFAULT;
28974+ }
28975+
28976 /* We can only do the GTT pwrite on untiled buffers, as otherwise
28977 * it would end up going through the fenced access, and we'll get
28978 * different detiling behavior between reading and writing.
28979@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
28980
28981 if (obj_priv->gtt_space) {
28982 atomic_dec(&dev->gtt_count);
28983- atomic_sub(obj->size, &dev->gtt_memory);
28984+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
28985
28986 drm_mm_put_block(obj_priv->gtt_space);
28987 obj_priv->gtt_space = NULL;
28988@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
28989 goto search_free;
28990 }
28991 atomic_inc(&dev->gtt_count);
28992- atomic_add(obj->size, &dev->gtt_memory);
28993+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
28994
28995 /* Assert that the object is not currently in any GPU domain. As it
28996 * wasn't in the GTT, there shouldn't be any way it could have been in
28997@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
28998 "%d/%d gtt bytes\n",
28999 atomic_read(&dev->object_count),
29000 atomic_read(&dev->pin_count),
29001- atomic_read(&dev->object_memory),
29002- atomic_read(&dev->pin_memory),
29003- atomic_read(&dev->gtt_memory),
29004+ atomic_read_unchecked(&dev->object_memory),
29005+ atomic_read_unchecked(&dev->pin_memory),
29006+ atomic_read_unchecked(&dev->gtt_memory),
29007 dev->gtt_total);
29008 }
29009 goto err;
29010@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29011 */
29012 if (obj_priv->pin_count == 1) {
29013 atomic_inc(&dev->pin_count);
29014- atomic_add(obj->size, &dev->pin_memory);
29015+ atomic_add_unchecked(obj->size, &dev->pin_memory);
29016 if (!obj_priv->active &&
29017 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29018 !list_empty(&obj_priv->list))
29019@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29020 list_move_tail(&obj_priv->list,
29021 &dev_priv->mm.inactive_list);
29022 atomic_dec(&dev->pin_count);
29023- atomic_sub(obj->size, &dev->pin_memory);
29024+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
29025 }
29026 i915_verify_inactive(dev, __FILE__, __LINE__);
29027 }
29028diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c
29029--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29030+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29031@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29032 int irq_received;
29033 int ret = IRQ_NONE;
29034
29035- atomic_inc(&dev_priv->irq_received);
29036+ atomic_inc_unchecked(&dev_priv->irq_received);
29037
29038 if (IS_IGDNG(dev))
29039 return igdng_irq_handler(dev);
29040@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29041 {
29042 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29043
29044- atomic_set(&dev_priv->irq_received, 0);
29045+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29046
29047 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29048 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29049diff -urNp linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h
29050--- linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29051+++ linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29052@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29053 u32 clear_cmd;
29054 u32 maccess;
29055
29056- atomic_t vbl_received; /**< Number of vblanks received. */
29057+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29058 wait_queue_head_t fence_queue;
29059- atomic_t last_fence_retired;
29060+ atomic_unchecked_t last_fence_retired;
29061 u32 next_fence_to_post;
29062
29063 unsigned int fb_cpp;
29064diff -urNp linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c
29065--- linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29066+++ linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29067@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29068 if (crtc != 0)
29069 return 0;
29070
29071- return atomic_read(&dev_priv->vbl_received);
29072+ return atomic_read_unchecked(&dev_priv->vbl_received);
29073 }
29074
29075
29076@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29077 /* VBLANK interrupt */
29078 if (status & MGA_VLINEPEN) {
29079 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29080- atomic_inc(&dev_priv->vbl_received);
29081+ atomic_inc_unchecked(&dev_priv->vbl_received);
29082 drm_handle_vblank(dev, 0);
29083 handled = 1;
29084 }
29085@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29086 MGA_WRITE(MGA_PRIMEND, prim_end);
29087 }
29088
29089- atomic_inc(&dev_priv->last_fence_retired);
29090+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29091 DRM_WAKEUP(&dev_priv->fence_queue);
29092 handled = 1;
29093 }
29094@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29095 * using fences.
29096 */
29097 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29098- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29099+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29100 - *sequence) <= (1 << 23)));
29101
29102 *sequence = cur_fence;
29103diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c
29104--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29105+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29106@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29107
29108 /* GH: Simple idle check.
29109 */
29110- atomic_set(&dev_priv->idle_count, 0);
29111+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29112
29113 /* We don't support anything other than bus-mastering ring mode,
29114 * but the ring can be in either AGP or PCI space for the ring
29115diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h
29116--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29117+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29118@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29119 int is_pci;
29120 unsigned long cce_buffers_offset;
29121
29122- atomic_t idle_count;
29123+ atomic_unchecked_t idle_count;
29124
29125 int page_flipping;
29126 int current_page;
29127 u32 crtc_offset;
29128 u32 crtc_offset_cntl;
29129
29130- atomic_t vbl_received;
29131+ atomic_unchecked_t vbl_received;
29132
29133 u32 color_fmt;
29134 unsigned int front_offset;
29135diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c
29136--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
29137+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
29138@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
29139 if (crtc != 0)
29140 return 0;
29141
29142- return atomic_read(&dev_priv->vbl_received);
29143+ return atomic_read_unchecked(&dev_priv->vbl_received);
29144 }
29145
29146 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
29147@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
29148 /* VBLANK interrupt */
29149 if (status & R128_CRTC_VBLANK_INT) {
29150 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
29151- atomic_inc(&dev_priv->vbl_received);
29152+ atomic_inc_unchecked(&dev_priv->vbl_received);
29153 drm_handle_vblank(dev, 0);
29154 return IRQ_HANDLED;
29155 }
29156diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c
29157--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
29158+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
29159@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
29160
29161 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
29162 {
29163- if (atomic_read(&dev_priv->idle_count) == 0) {
29164+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
29165 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
29166 } else {
29167- atomic_set(&dev_priv->idle_count, 0);
29168+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29169 }
29170 }
29171
29172diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c
29173--- linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
29174+++ linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
29175@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
29176 char name[512];
29177 int i;
29178
29179+ pax_track_stack();
29180+
29181 ctx->card = card;
29182 ctx->bios = bios;
29183
29184diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c
29185--- linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
29186+++ linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
29187@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
29188 regex_t mask_rex;
29189 regmatch_t match[4];
29190 char buf[1024];
29191- size_t end;
29192+ long end;
29193 int len;
29194 int done = 0;
29195 int r;
29196 unsigned o;
29197 struct offset *offset;
29198 char last_reg_s[10];
29199- int last_reg;
29200+ unsigned long last_reg;
29201
29202 if (regcomp
29203 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
29204diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c
29205--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
29206+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
29207@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
29208 bool linkb;
29209 struct radeon_i2c_bus_rec ddc_bus;
29210
29211+ pax_track_stack();
29212+
29213 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29214
29215 if (data_offset == 0)
29216@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
29217 }
29218 }
29219
29220-struct bios_connector {
29221+static struct bios_connector {
29222 bool valid;
29223 uint16_t line_mux;
29224 uint16_t devices;
29225 int connector_type;
29226 struct radeon_i2c_bus_rec ddc_bus;
29227-};
29228+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29229
29230 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
29231 drm_device
29232@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
29233 uint8_t dac;
29234 union atom_supported_devices *supported_devices;
29235 int i, j;
29236- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
29237
29238 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
29239
29240diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c
29241--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
29242+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
29243@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
29244
29245 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
29246 error = freq - current_freq;
29247- error = error < 0 ? 0xffffffff : error;
29248+ error = (int32_t)error < 0 ? 0xffffffff : error;
29249 } else
29250 error = abs(current_freq - freq);
29251 vco_diff = abs(vco - best_vco);
29252diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h
29253--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
29254+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
29255@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
29256
29257 /* SW interrupt */
29258 wait_queue_head_t swi_queue;
29259- atomic_t swi_emitted;
29260+ atomic_unchecked_t swi_emitted;
29261 int vblank_crtc;
29262 uint32_t irq_enable_reg;
29263 uint32_t r500_disp_irq_reg;
29264diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c
29265--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
29266+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
29267@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
29268 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
29269 return 0;
29270 }
29271- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
29272+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
29273 if (!rdev->cp.ready) {
29274 /* FIXME: cp is not running assume everythings is done right
29275 * away
29276@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
29277 return r;
29278 }
29279 WREG32(rdev->fence_drv.scratch_reg, 0);
29280- atomic_set(&rdev->fence_drv.seq, 0);
29281+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
29282 INIT_LIST_HEAD(&rdev->fence_drv.created);
29283 INIT_LIST_HEAD(&rdev->fence_drv.emited);
29284 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
29285diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h
29286--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
29287+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h 2011-05-04 17:56:28.000000000 -0400
29288@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
29289 */
29290 struct radeon_fence_driver {
29291 uint32_t scratch_reg;
29292- atomic_t seq;
29293+ atomic_unchecked_t seq;
29294 uint32_t last_seq;
29295 unsigned long count_timeout;
29296 wait_queue_head_t queue;
29297diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c
29298--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
29299+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
29300@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
29301 request = compat_alloc_user_space(sizeof(*request));
29302 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
29303 || __put_user(req32.param, &request->param)
29304- || __put_user((void __user *)(unsigned long)req32.value,
29305+ || __put_user((unsigned long)req32.value,
29306 &request->value))
29307 return -EFAULT;
29308
29309diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c
29310--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
29311+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
29312@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
29313 unsigned int ret;
29314 RING_LOCALS;
29315
29316- atomic_inc(&dev_priv->swi_emitted);
29317- ret = atomic_read(&dev_priv->swi_emitted);
29318+ atomic_inc_unchecked(&dev_priv->swi_emitted);
29319+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
29320
29321 BEGIN_RING(4);
29322 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
29323@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
29324 drm_radeon_private_t *dev_priv =
29325 (drm_radeon_private_t *) dev->dev_private;
29326
29327- atomic_set(&dev_priv->swi_emitted, 0);
29328+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
29329 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
29330
29331 dev->max_vblank_count = 0x001fffff;
29332diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c
29333--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
29334+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
29335@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
29336 {
29337 drm_radeon_private_t *dev_priv = dev->dev_private;
29338 drm_radeon_getparam_t *param = data;
29339- int value;
29340+ int value = 0;
29341
29342 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
29343
29344diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c
29345--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
29346+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
29347@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
29348 DRM_INFO("radeon: ttm finalized\n");
29349 }
29350
29351-static struct vm_operations_struct radeon_ttm_vm_ops;
29352-static const struct vm_operations_struct *ttm_vm_ops = NULL;
29353-
29354-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
29355-{
29356- struct ttm_buffer_object *bo;
29357- int r;
29358-
29359- bo = (struct ttm_buffer_object *)vma->vm_private_data;
29360- if (bo == NULL) {
29361- return VM_FAULT_NOPAGE;
29362- }
29363- r = ttm_vm_ops->fault(vma, vmf);
29364- return r;
29365-}
29366-
29367 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
29368 {
29369 struct drm_file *file_priv;
29370 struct radeon_device *rdev;
29371- int r;
29372
29373 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
29374 return drm_mmap(filp, vma);
29375@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
29376
29377 file_priv = (struct drm_file *)filp->private_data;
29378 rdev = file_priv->minor->dev->dev_private;
29379- if (rdev == NULL) {
29380+ if (!rdev)
29381 return -EINVAL;
29382- }
29383- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29384- if (unlikely(r != 0)) {
29385- return r;
29386- }
29387- if (unlikely(ttm_vm_ops == NULL)) {
29388- ttm_vm_ops = vma->vm_ops;
29389- radeon_ttm_vm_ops = *ttm_vm_ops;
29390- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
29391- }
29392- vma->vm_ops = &radeon_ttm_vm_ops;
29393- return 0;
29394+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
29395 }
29396
29397
29398diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c
29399--- linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
29400+++ linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
29401@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
29402 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
29403 rdev->pm.sideport_bandwidth.full)
29404 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
29405- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
29406+ read_delay_latency.full = rfixed_const(800 * 1000);
29407 read_delay_latency.full = rfixed_div(read_delay_latency,
29408 rdev->pm.igp_sideport_mclk);
29409+ a.full = rfixed_const(370);
29410+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
29411 } else {
29412 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
29413 rdev->pm.k8_bandwidth.full)
29414diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c
29415--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
29416+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
29417@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
29418 NULL
29419 };
29420
29421-static struct sysfs_ops ttm_bo_global_ops = {
29422+static const struct sysfs_ops ttm_bo_global_ops = {
29423 .show = &ttm_bo_global_show
29424 };
29425
29426diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c
29427--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
29428+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
29429@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
29430 {
29431 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
29432 vma->vm_private_data;
29433- struct ttm_bo_device *bdev = bo->bdev;
29434+ struct ttm_bo_device *bdev;
29435 unsigned long bus_base;
29436 unsigned long bus_offset;
29437 unsigned long bus_size;
29438@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
29439 unsigned long address = (unsigned long)vmf->virtual_address;
29440 int retval = VM_FAULT_NOPAGE;
29441
29442+ if (!bo)
29443+ return VM_FAULT_NOPAGE;
29444+ bdev = bo->bdev;
29445+
29446 /*
29447 * Work around locking order reversal in fault / nopfn
29448 * between mmap_sem and bo_reserve: Perform a trylock operation
29449diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c
29450--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
29451+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
29452@@ -36,7 +36,7 @@
29453 struct ttm_global_item {
29454 struct mutex mutex;
29455 void *object;
29456- int refcount;
29457+ atomic_t refcount;
29458 };
29459
29460 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
29461@@ -49,7 +49,7 @@ void ttm_global_init(void)
29462 struct ttm_global_item *item = &glob[i];
29463 mutex_init(&item->mutex);
29464 item->object = NULL;
29465- item->refcount = 0;
29466+ atomic_set(&item->refcount, 0);
29467 }
29468 }
29469
29470@@ -59,7 +59,7 @@ void ttm_global_release(void)
29471 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
29472 struct ttm_global_item *item = &glob[i];
29473 BUG_ON(item->object != NULL);
29474- BUG_ON(item->refcount != 0);
29475+ BUG_ON(atomic_read(&item->refcount) != 0);
29476 }
29477 }
29478
29479@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
29480 void *object;
29481
29482 mutex_lock(&item->mutex);
29483- if (item->refcount == 0) {
29484+ if (atomic_read(&item->refcount) == 0) {
29485 item->object = kzalloc(ref->size, GFP_KERNEL);
29486 if (unlikely(item->object == NULL)) {
29487 ret = -ENOMEM;
29488@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
29489 goto out_err;
29490
29491 }
29492- ++item->refcount;
29493+ atomic_inc(&item->refcount);
29494 ref->object = item->object;
29495 object = item->object;
29496 mutex_unlock(&item->mutex);
29497@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
29498 struct ttm_global_item *item = &glob[ref->global_type];
29499
29500 mutex_lock(&item->mutex);
29501- BUG_ON(item->refcount == 0);
29502+ BUG_ON(atomic_read(&item->refcount) == 0);
29503 BUG_ON(ref->object != item->object);
29504- if (--item->refcount == 0) {
29505+ if (atomic_dec_and_test(&item->refcount)) {
29506 ref->release(ref);
29507 item->object = NULL;
29508 }
29509diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c
29510--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
29511+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
29512@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
29513 NULL
29514 };
29515
29516-static struct sysfs_ops ttm_mem_zone_ops = {
29517+static const struct sysfs_ops ttm_mem_zone_ops = {
29518 .show = &ttm_mem_zone_show,
29519 .store = &ttm_mem_zone_store
29520 };
29521diff -urNp linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h
29522--- linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
29523+++ linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
29524@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
29525 typedef uint32_t maskarray_t[5];
29526
29527 typedef struct drm_via_irq {
29528- atomic_t irq_received;
29529+ atomic_unchecked_t irq_received;
29530 uint32_t pending_mask;
29531 uint32_t enable_mask;
29532 wait_queue_head_t irq_queue;
29533@@ -75,7 +75,7 @@ typedef struct drm_via_private {
29534 struct timeval last_vblank;
29535 int last_vblank_valid;
29536 unsigned usec_per_vblank;
29537- atomic_t vbl_received;
29538+ atomic_unchecked_t vbl_received;
29539 drm_via_state_t hc_state;
29540 char pci_buf[VIA_PCI_BUF_SIZE];
29541 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
29542diff -urNp linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c
29543--- linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
29544+++ linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
29545@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
29546 if (crtc != 0)
29547 return 0;
29548
29549- return atomic_read(&dev_priv->vbl_received);
29550+ return atomic_read_unchecked(&dev_priv->vbl_received);
29551 }
29552
29553 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
29554@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
29555
29556 status = VIA_READ(VIA_REG_INTERRUPT);
29557 if (status & VIA_IRQ_VBLANK_PENDING) {
29558- atomic_inc(&dev_priv->vbl_received);
29559- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
29560+ atomic_inc_unchecked(&dev_priv->vbl_received);
29561+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
29562 do_gettimeofday(&cur_vblank);
29563 if (dev_priv->last_vblank_valid) {
29564 dev_priv->usec_per_vblank =
29565@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29566 dev_priv->last_vblank = cur_vblank;
29567 dev_priv->last_vblank_valid = 1;
29568 }
29569- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
29570+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
29571 DRM_DEBUG("US per vblank is: %u\n",
29572 dev_priv->usec_per_vblank);
29573 }
29574@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
29575
29576 for (i = 0; i < dev_priv->num_irqs; ++i) {
29577 if (status & cur_irq->pending_mask) {
29578- atomic_inc(&cur_irq->irq_received);
29579+ atomic_inc_unchecked(&cur_irq->irq_received);
29580 DRM_WAKEUP(&cur_irq->irq_queue);
29581 handled = 1;
29582 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
29583@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
29584 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29585 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
29586 masks[irq][4]));
29587- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
29588+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
29589 } else {
29590 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
29591 (((cur_irq_sequence =
29592- atomic_read(&cur_irq->irq_received)) -
29593+ atomic_read_unchecked(&cur_irq->irq_received)) -
29594 *sequence) <= (1 << 23)));
29595 }
29596 *sequence = cur_irq_sequence;
29597@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
29598 }
29599
29600 for (i = 0; i < dev_priv->num_irqs; ++i) {
29601- atomic_set(&cur_irq->irq_received, 0);
29602+ atomic_set_unchecked(&cur_irq->irq_received, 0);
29603 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
29604 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
29605 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
29606@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
29607 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
29608 case VIA_IRQ_RELATIVE:
29609 irqwait->request.sequence +=
29610- atomic_read(&cur_irq->irq_received);
29611+ atomic_read_unchecked(&cur_irq->irq_received);
29612 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
29613 case VIA_IRQ_ABSOLUTE:
29614 break;
29615diff -urNp linux-2.6.32.43/drivers/hid/hid-core.c linux-2.6.32.43/drivers/hid/hid-core.c
29616--- linux-2.6.32.43/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
29617+++ linux-2.6.32.43/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
29618@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
29619
29620 int hid_add_device(struct hid_device *hdev)
29621 {
29622- static atomic_t id = ATOMIC_INIT(0);
29623+ static atomic_unchecked_t id = ATOMIC_INIT(0);
29624 int ret;
29625
29626 if (WARN_ON(hdev->status & HID_STAT_ADDED))
29627@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
29628 /* XXX hack, any other cleaner solution after the driver core
29629 * is converted to allow more than 20 bytes as the device name? */
29630 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
29631- hdev->vendor, hdev->product, atomic_inc_return(&id));
29632+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
29633
29634 ret = device_add(&hdev->dev);
29635 if (!ret)
29636diff -urNp linux-2.6.32.43/drivers/hid/usbhid/hiddev.c linux-2.6.32.43/drivers/hid/usbhid/hiddev.c
29637--- linux-2.6.32.43/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
29638+++ linux-2.6.32.43/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
29639@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
29640 return put_user(HID_VERSION, (int __user *)arg);
29641
29642 case HIDIOCAPPLICATION:
29643- if (arg < 0 || arg >= hid->maxapplication)
29644+ if (arg >= hid->maxapplication)
29645 return -EINVAL;
29646
29647 for (i = 0; i < hid->maxcollection; i++)
29648diff -urNp linux-2.6.32.43/drivers/hwmon/lis3lv02d.c linux-2.6.32.43/drivers/hwmon/lis3lv02d.c
29649--- linux-2.6.32.43/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
29650+++ linux-2.6.32.43/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
29651@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
29652 * the lid is closed. This leads to interrupts as soon as a little move
29653 * is done.
29654 */
29655- atomic_inc(&lis3_dev.count);
29656+ atomic_inc_unchecked(&lis3_dev.count);
29657
29658 wake_up_interruptible(&lis3_dev.misc_wait);
29659 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
29660@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
29661 if (test_and_set_bit(0, &lis3_dev.misc_opened))
29662 return -EBUSY; /* already open */
29663
29664- atomic_set(&lis3_dev.count, 0);
29665+ atomic_set_unchecked(&lis3_dev.count, 0);
29666
29667 /*
29668 * The sensor can generate interrupts for free-fall and direction
29669@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
29670 add_wait_queue(&lis3_dev.misc_wait, &wait);
29671 while (true) {
29672 set_current_state(TASK_INTERRUPTIBLE);
29673- data = atomic_xchg(&lis3_dev.count, 0);
29674+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
29675 if (data)
29676 break;
29677
29678@@ -244,7 +244,7 @@ out:
29679 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
29680 {
29681 poll_wait(file, &lis3_dev.misc_wait, wait);
29682- if (atomic_read(&lis3_dev.count))
29683+ if (atomic_read_unchecked(&lis3_dev.count))
29684 return POLLIN | POLLRDNORM;
29685 return 0;
29686 }
29687diff -urNp linux-2.6.32.43/drivers/hwmon/lis3lv02d.h linux-2.6.32.43/drivers/hwmon/lis3lv02d.h
29688--- linux-2.6.32.43/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
29689+++ linux-2.6.32.43/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
29690@@ -201,7 +201,7 @@ struct lis3lv02d {
29691
29692 struct input_polled_dev *idev; /* input device */
29693 struct platform_device *pdev; /* platform device */
29694- atomic_t count; /* interrupt count after last read */
29695+ atomic_unchecked_t count; /* interrupt count after last read */
29696 int xcalib; /* calibrated null value for x */
29697 int ycalib; /* calibrated null value for y */
29698 int zcalib; /* calibrated null value for z */
29699diff -urNp linux-2.6.32.43/drivers/hwmon/sht15.c linux-2.6.32.43/drivers/hwmon/sht15.c
29700--- linux-2.6.32.43/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
29701+++ linux-2.6.32.43/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
29702@@ -112,7 +112,7 @@ struct sht15_data {
29703 int supply_uV;
29704 int supply_uV_valid;
29705 struct work_struct update_supply_work;
29706- atomic_t interrupt_handled;
29707+ atomic_unchecked_t interrupt_handled;
29708 };
29709
29710 /**
29711@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
29712 return ret;
29713
29714 gpio_direction_input(data->pdata->gpio_data);
29715- atomic_set(&data->interrupt_handled, 0);
29716+ atomic_set_unchecked(&data->interrupt_handled, 0);
29717
29718 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29719 if (gpio_get_value(data->pdata->gpio_data) == 0) {
29720 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
29721 /* Only relevant if the interrupt hasn't occured. */
29722- if (!atomic_read(&data->interrupt_handled))
29723+ if (!atomic_read_unchecked(&data->interrupt_handled))
29724 schedule_work(&data->read_work);
29725 }
29726 ret = wait_event_timeout(data->wait_queue,
29727@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
29728 struct sht15_data *data = d;
29729 /* First disable the interrupt */
29730 disable_irq_nosync(irq);
29731- atomic_inc(&data->interrupt_handled);
29732+ atomic_inc_unchecked(&data->interrupt_handled);
29733 /* Then schedule a reading work struct */
29734 if (data->flag != SHT15_READING_NOTHING)
29735 schedule_work(&data->read_work);
29736@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
29737 here as could have gone low in meantime so verify
29738 it hasn't!
29739 */
29740- atomic_set(&data->interrupt_handled, 0);
29741+ atomic_set_unchecked(&data->interrupt_handled, 0);
29742 enable_irq(gpio_to_irq(data->pdata->gpio_data));
29743 /* If still not occured or another handler has been scheduled */
29744 if (gpio_get_value(data->pdata->gpio_data)
29745- || atomic_read(&data->interrupt_handled))
29746+ || atomic_read_unchecked(&data->interrupt_handled))
29747 return;
29748 }
29749 /* Read the data back from the device */
29750diff -urNp linux-2.6.32.43/drivers/hwmon/w83791d.c linux-2.6.32.43/drivers/hwmon/w83791d.c
29751--- linux-2.6.32.43/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
29752+++ linux-2.6.32.43/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
29753@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
29754 struct i2c_board_info *info);
29755 static int w83791d_remove(struct i2c_client *client);
29756
29757-static int w83791d_read(struct i2c_client *client, u8 register);
29758-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
29759+static int w83791d_read(struct i2c_client *client, u8 reg);
29760+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
29761 static struct w83791d_data *w83791d_update_device(struct device *dev);
29762
29763 #ifdef DEBUG
29764diff -urNp linux-2.6.32.43/drivers/ide/ide-cd.c linux-2.6.32.43/drivers/ide/ide-cd.c
29765--- linux-2.6.32.43/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
29766+++ linux-2.6.32.43/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
29767@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
29768 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
29769 if ((unsigned long)buf & alignment
29770 || blk_rq_bytes(rq) & q->dma_pad_mask
29771- || object_is_on_stack(buf))
29772+ || object_starts_on_stack(buf))
29773 drive->dma = 0;
29774 }
29775 }
29776diff -urNp linux-2.6.32.43/drivers/ide/ide-floppy.c linux-2.6.32.43/drivers/ide/ide-floppy.c
29777--- linux-2.6.32.43/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
29778+++ linux-2.6.32.43/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
29779@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
29780 u8 pc_buf[256], header_len, desc_cnt;
29781 int i, rc = 1, blocks, length;
29782
29783+ pax_track_stack();
29784+
29785 ide_debug_log(IDE_DBG_FUNC, "enter");
29786
29787 drive->bios_cyl = 0;
29788diff -urNp linux-2.6.32.43/drivers/ide/setup-pci.c linux-2.6.32.43/drivers/ide/setup-pci.c
29789--- linux-2.6.32.43/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
29790+++ linux-2.6.32.43/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
29791@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
29792 int ret, i, n_ports = dev2 ? 4 : 2;
29793 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
29794
29795+ pax_track_stack();
29796+
29797 for (i = 0; i < n_ports / 2; i++) {
29798 ret = ide_setup_pci_controller(pdev[i], d, !i);
29799 if (ret < 0)
29800diff -urNp linux-2.6.32.43/drivers/ieee1394/dv1394.c linux-2.6.32.43/drivers/ieee1394/dv1394.c
29801--- linux-2.6.32.43/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
29802+++ linux-2.6.32.43/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
29803@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
29804 based upon DIF section and sequence
29805 */
29806
29807-static void inline
29808+static inline void
29809 frame_put_packet (struct frame *f, struct packet *p)
29810 {
29811 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
29812diff -urNp linux-2.6.32.43/drivers/ieee1394/hosts.c linux-2.6.32.43/drivers/ieee1394/hosts.c
29813--- linux-2.6.32.43/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
29814+++ linux-2.6.32.43/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
29815@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
29816 }
29817
29818 static struct hpsb_host_driver dummy_driver = {
29819+ .name = "dummy",
29820 .transmit_packet = dummy_transmit_packet,
29821 .devctl = dummy_devctl,
29822 .isoctl = dummy_isoctl
29823diff -urNp linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c
29824--- linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
29825+++ linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
29826@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
29827 for (func = 0; func < 8; func++) {
29828 u32 class = read_pci_config(num,slot,func,
29829 PCI_CLASS_REVISION);
29830- if ((class == 0xffffffff))
29831+ if (class == 0xffffffff)
29832 continue; /* No device at this func */
29833
29834 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
29835diff -urNp linux-2.6.32.43/drivers/ieee1394/ohci1394.c linux-2.6.32.43/drivers/ieee1394/ohci1394.c
29836--- linux-2.6.32.43/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
29837+++ linux-2.6.32.43/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
29838@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
29839 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
29840
29841 /* Module Parameters */
29842-static int phys_dma = 1;
29843+static int phys_dma;
29844 module_param(phys_dma, int, 0444);
29845-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
29846+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
29847
29848 static void dma_trm_tasklet(unsigned long data);
29849 static void dma_trm_reset(struct dma_trm_ctx *d);
29850diff -urNp linux-2.6.32.43/drivers/ieee1394/sbp2.c linux-2.6.32.43/drivers/ieee1394/sbp2.c
29851--- linux-2.6.32.43/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
29852+++ linux-2.6.32.43/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
29853@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
29854 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
29855 MODULE_LICENSE("GPL");
29856
29857-static int sbp2_module_init(void)
29858+static int __init sbp2_module_init(void)
29859 {
29860 int ret;
29861
29862diff -urNp linux-2.6.32.43/drivers/infiniband/core/cm.c linux-2.6.32.43/drivers/infiniband/core/cm.c
29863--- linux-2.6.32.43/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
29864+++ linux-2.6.32.43/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
29865@@ -112,7 +112,7 @@ static char const counter_group_names[CM
29866
29867 struct cm_counter_group {
29868 struct kobject obj;
29869- atomic_long_t counter[CM_ATTR_COUNT];
29870+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
29871 };
29872
29873 struct cm_counter_attribute {
29874@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
29875 struct ib_mad_send_buf *msg = NULL;
29876 int ret;
29877
29878- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29879+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29880 counter[CM_REQ_COUNTER]);
29881
29882 /* Quick state check to discard duplicate REQs. */
29883@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
29884 if (!cm_id_priv)
29885 return;
29886
29887- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29888+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29889 counter[CM_REP_COUNTER]);
29890 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
29891 if (ret)
29892@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
29893 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
29894 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
29895 spin_unlock_irq(&cm_id_priv->lock);
29896- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29897+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29898 counter[CM_RTU_COUNTER]);
29899 goto out;
29900 }
29901@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
29902 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
29903 dreq_msg->local_comm_id);
29904 if (!cm_id_priv) {
29905- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29906+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29907 counter[CM_DREQ_COUNTER]);
29908 cm_issue_drep(work->port, work->mad_recv_wc);
29909 return -EINVAL;
29910@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
29911 case IB_CM_MRA_REP_RCVD:
29912 break;
29913 case IB_CM_TIMEWAIT:
29914- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29915+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29916 counter[CM_DREQ_COUNTER]);
29917 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29918 goto unlock;
29919@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
29920 cm_free_msg(msg);
29921 goto deref;
29922 case IB_CM_DREQ_RCVD:
29923- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29924+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29925 counter[CM_DREQ_COUNTER]);
29926 goto unlock;
29927 default:
29928@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
29929 ib_modify_mad(cm_id_priv->av.port->mad_agent,
29930 cm_id_priv->msg, timeout)) {
29931 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
29932- atomic_long_inc(&work->port->
29933+ atomic_long_inc_unchecked(&work->port->
29934 counter_group[CM_RECV_DUPLICATES].
29935 counter[CM_MRA_COUNTER]);
29936 goto out;
29937@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
29938 break;
29939 case IB_CM_MRA_REQ_RCVD:
29940 case IB_CM_MRA_REP_RCVD:
29941- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29942+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29943 counter[CM_MRA_COUNTER]);
29944 /* fall through */
29945 default:
29946@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
29947 case IB_CM_LAP_IDLE:
29948 break;
29949 case IB_CM_MRA_LAP_SENT:
29950- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29951+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29952 counter[CM_LAP_COUNTER]);
29953 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
29954 goto unlock;
29955@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
29956 cm_free_msg(msg);
29957 goto deref;
29958 case IB_CM_LAP_RCVD:
29959- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29960+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29961 counter[CM_LAP_COUNTER]);
29962 goto unlock;
29963 default:
29964@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
29965 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
29966 if (cur_cm_id_priv) {
29967 spin_unlock_irq(&cm.lock);
29968- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
29969+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
29970 counter[CM_SIDR_REQ_COUNTER]);
29971 goto out; /* Duplicate message. */
29972 }
29973@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
29974 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
29975 msg->retries = 1;
29976
29977- atomic_long_add(1 + msg->retries,
29978+ atomic_long_add_unchecked(1 + msg->retries,
29979 &port->counter_group[CM_XMIT].counter[attr_index]);
29980 if (msg->retries)
29981- atomic_long_add(msg->retries,
29982+ atomic_long_add_unchecked(msg->retries,
29983 &port->counter_group[CM_XMIT_RETRIES].
29984 counter[attr_index]);
29985
29986@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
29987 }
29988
29989 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
29990- atomic_long_inc(&port->counter_group[CM_RECV].
29991+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
29992 counter[attr_id - CM_ATTR_ID_OFFSET]);
29993
29994 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
29995@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
29996 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
29997
29998 return sprintf(buf, "%ld\n",
29999- atomic_long_read(&group->counter[cm_attr->index]));
30000+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30001 }
30002
30003-static struct sysfs_ops cm_counter_ops = {
30004+static const struct sysfs_ops cm_counter_ops = {
30005 .show = cm_show_counter
30006 };
30007
30008diff -urNp linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c
30009--- linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30010+++ linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30011@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30012
30013 struct task_struct *thread;
30014
30015- atomic_t req_ser;
30016- atomic_t flush_ser;
30017+ atomic_unchecked_t req_ser;
30018+ atomic_unchecked_t flush_ser;
30019
30020 wait_queue_head_t force_wait;
30021 };
30022@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30023 struct ib_fmr_pool *pool = pool_ptr;
30024
30025 do {
30026- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30027+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30028 ib_fmr_batch_release(pool);
30029
30030- atomic_inc(&pool->flush_ser);
30031+ atomic_inc_unchecked(&pool->flush_ser);
30032 wake_up_interruptible(&pool->force_wait);
30033
30034 if (pool->flush_function)
30035@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30036 }
30037
30038 set_current_state(TASK_INTERRUPTIBLE);
30039- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30040+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30041 !kthread_should_stop())
30042 schedule();
30043 __set_current_state(TASK_RUNNING);
30044@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
30045 pool->dirty_watermark = params->dirty_watermark;
30046 pool->dirty_len = 0;
30047 spin_lock_init(&pool->pool_lock);
30048- atomic_set(&pool->req_ser, 0);
30049- atomic_set(&pool->flush_ser, 0);
30050+ atomic_set_unchecked(&pool->req_ser, 0);
30051+ atomic_set_unchecked(&pool->flush_ser, 0);
30052 init_waitqueue_head(&pool->force_wait);
30053
30054 pool->thread = kthread_run(ib_fmr_cleanup_thread,
30055@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
30056 }
30057 spin_unlock_irq(&pool->pool_lock);
30058
30059- serial = atomic_inc_return(&pool->req_ser);
30060+ serial = atomic_inc_return_unchecked(&pool->req_ser);
30061 wake_up_process(pool->thread);
30062
30063 if (wait_event_interruptible(pool->force_wait,
30064- atomic_read(&pool->flush_ser) - serial >= 0))
30065+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
30066 return -EINTR;
30067
30068 return 0;
30069@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
30070 } else {
30071 list_add_tail(&fmr->list, &pool->dirty_list);
30072 if (++pool->dirty_len >= pool->dirty_watermark) {
30073- atomic_inc(&pool->req_ser);
30074+ atomic_inc_unchecked(&pool->req_ser);
30075 wake_up_process(pool->thread);
30076 }
30077 }
30078diff -urNp linux-2.6.32.43/drivers/infiniband/core/sysfs.c linux-2.6.32.43/drivers/infiniband/core/sysfs.c
30079--- linux-2.6.32.43/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
30080+++ linux-2.6.32.43/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
30081@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
30082 return port_attr->show(p, port_attr, buf);
30083 }
30084
30085-static struct sysfs_ops port_sysfs_ops = {
30086+static const struct sysfs_ops port_sysfs_ops = {
30087 .show = port_attr_show
30088 };
30089
30090diff -urNp linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c
30091--- linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
30092+++ linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
30093@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
30094 dst->grh.sgid_index = src->grh.sgid_index;
30095 dst->grh.hop_limit = src->grh.hop_limit;
30096 dst->grh.traffic_class = src->grh.traffic_class;
30097+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
30098 dst->dlid = src->dlid;
30099 dst->sl = src->sl;
30100 dst->src_path_bits = src->src_path_bits;
30101 dst->static_rate = src->static_rate;
30102 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
30103 dst->port_num = src->port_num;
30104+ dst->reserved = 0;
30105 }
30106 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
30107
30108 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
30109 struct ib_qp_attr *src)
30110 {
30111+ dst->qp_state = src->qp_state;
30112 dst->cur_qp_state = src->cur_qp_state;
30113 dst->path_mtu = src->path_mtu;
30114 dst->path_mig_state = src->path_mig_state;
30115@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
30116 dst->rnr_retry = src->rnr_retry;
30117 dst->alt_port_num = src->alt_port_num;
30118 dst->alt_timeout = src->alt_timeout;
30119+ memset(dst->reserved, 0, sizeof(dst->reserved));
30120 }
30121 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
30122
30123diff -urNp linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c
30124--- linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
30125+++ linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
30126@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
30127 struct infinipath_counters counters;
30128 struct ipath_devdata *dd;
30129
30130+ pax_track_stack();
30131+
30132 dd = file->f_path.dentry->d_inode->i_private;
30133 dd->ipath_f_read_counters(dd, &counters);
30134
30135diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c
30136--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
30137+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
30138@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
30139 LIST_HEAD(nes_adapter_list);
30140 static LIST_HEAD(nes_dev_list);
30141
30142-atomic_t qps_destroyed;
30143+atomic_unchecked_t qps_destroyed;
30144
30145 static unsigned int ee_flsh_adapter;
30146 static unsigned int sysfs_nonidx_addr;
30147@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
30148 struct nes_adapter *nesadapter = nesdev->nesadapter;
30149 u32 qp_id;
30150
30151- atomic_inc(&qps_destroyed);
30152+ atomic_inc_unchecked(&qps_destroyed);
30153
30154 /* Free the control structures */
30155
30156diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c
30157--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
30158+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
30159@@ -69,11 +69,11 @@ u32 cm_packets_received;
30160 u32 cm_listens_created;
30161 u32 cm_listens_destroyed;
30162 u32 cm_backlog_drops;
30163-atomic_t cm_loopbacks;
30164-atomic_t cm_nodes_created;
30165-atomic_t cm_nodes_destroyed;
30166-atomic_t cm_accel_dropped_pkts;
30167-atomic_t cm_resets_recvd;
30168+atomic_unchecked_t cm_loopbacks;
30169+atomic_unchecked_t cm_nodes_created;
30170+atomic_unchecked_t cm_nodes_destroyed;
30171+atomic_unchecked_t cm_accel_dropped_pkts;
30172+atomic_unchecked_t cm_resets_recvd;
30173
30174 static inline int mini_cm_accelerated(struct nes_cm_core *,
30175 struct nes_cm_node *);
30176@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
30177
30178 static struct nes_cm_core *g_cm_core;
30179
30180-atomic_t cm_connects;
30181-atomic_t cm_accepts;
30182-atomic_t cm_disconnects;
30183-atomic_t cm_closes;
30184-atomic_t cm_connecteds;
30185-atomic_t cm_connect_reqs;
30186-atomic_t cm_rejects;
30187+atomic_unchecked_t cm_connects;
30188+atomic_unchecked_t cm_accepts;
30189+atomic_unchecked_t cm_disconnects;
30190+atomic_unchecked_t cm_closes;
30191+atomic_unchecked_t cm_connecteds;
30192+atomic_unchecked_t cm_connect_reqs;
30193+atomic_unchecked_t cm_rejects;
30194
30195
30196 /**
30197@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
30198 cm_node->rem_mac);
30199
30200 add_hte_node(cm_core, cm_node);
30201- atomic_inc(&cm_nodes_created);
30202+ atomic_inc_unchecked(&cm_nodes_created);
30203
30204 return cm_node;
30205 }
30206@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
30207 }
30208
30209 atomic_dec(&cm_core->node_cnt);
30210- atomic_inc(&cm_nodes_destroyed);
30211+ atomic_inc_unchecked(&cm_nodes_destroyed);
30212 nesqp = cm_node->nesqp;
30213 if (nesqp) {
30214 nesqp->cm_node = NULL;
30215@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
30216
30217 static void drop_packet(struct sk_buff *skb)
30218 {
30219- atomic_inc(&cm_accel_dropped_pkts);
30220+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30221 dev_kfree_skb_any(skb);
30222 }
30223
30224@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
30225
30226 int reset = 0; /* whether to send reset in case of err.. */
30227 int passive_state;
30228- atomic_inc(&cm_resets_recvd);
30229+ atomic_inc_unchecked(&cm_resets_recvd);
30230 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
30231 " refcnt=%d\n", cm_node, cm_node->state,
30232 atomic_read(&cm_node->ref_count));
30233@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
30234 rem_ref_cm_node(cm_node->cm_core, cm_node);
30235 return NULL;
30236 }
30237- atomic_inc(&cm_loopbacks);
30238+ atomic_inc_unchecked(&cm_loopbacks);
30239 loopbackremotenode->loopbackpartner = cm_node;
30240 loopbackremotenode->tcp_cntxt.rcv_wscale =
30241 NES_CM_DEFAULT_RCV_WND_SCALE;
30242@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
30243 add_ref_cm_node(cm_node);
30244 } else if (cm_node->state == NES_CM_STATE_TSA) {
30245 rem_ref_cm_node(cm_core, cm_node);
30246- atomic_inc(&cm_accel_dropped_pkts);
30247+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
30248 dev_kfree_skb_any(skb);
30249 break;
30250 }
30251@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
30252
30253 if ((cm_id) && (cm_id->event_handler)) {
30254 if (issue_disconn) {
30255- atomic_inc(&cm_disconnects);
30256+ atomic_inc_unchecked(&cm_disconnects);
30257 cm_event.event = IW_CM_EVENT_DISCONNECT;
30258 cm_event.status = disconn_status;
30259 cm_event.local_addr = cm_id->local_addr;
30260@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
30261 }
30262
30263 if (issue_close) {
30264- atomic_inc(&cm_closes);
30265+ atomic_inc_unchecked(&cm_closes);
30266 nes_disconnect(nesqp, 1);
30267
30268 cm_id->provider_data = nesqp;
30269@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
30270
30271 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
30272 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
30273- atomic_inc(&cm_accepts);
30274+ atomic_inc_unchecked(&cm_accepts);
30275
30276 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
30277 atomic_read(&nesvnic->netdev->refcnt));
30278@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
30279
30280 struct nes_cm_core *cm_core;
30281
30282- atomic_inc(&cm_rejects);
30283+ atomic_inc_unchecked(&cm_rejects);
30284 cm_node = (struct nes_cm_node *) cm_id->provider_data;
30285 loopback = cm_node->loopbackpartner;
30286 cm_core = cm_node->cm_core;
30287@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
30288 ntohl(cm_id->local_addr.sin_addr.s_addr),
30289 ntohs(cm_id->local_addr.sin_port));
30290
30291- atomic_inc(&cm_connects);
30292+ atomic_inc_unchecked(&cm_connects);
30293 nesqp->active_conn = 1;
30294
30295 /* cache the cm_id in the qp */
30296@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
30297 if (nesqp->destroyed) {
30298 return;
30299 }
30300- atomic_inc(&cm_connecteds);
30301+ atomic_inc_unchecked(&cm_connecteds);
30302 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
30303 " local port 0x%04X. jiffies = %lu.\n",
30304 nesqp->hwqp.qp_id,
30305@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
30306
30307 ret = cm_id->event_handler(cm_id, &cm_event);
30308 cm_id->add_ref(cm_id);
30309- atomic_inc(&cm_closes);
30310+ atomic_inc_unchecked(&cm_closes);
30311 cm_event.event = IW_CM_EVENT_CLOSE;
30312 cm_event.status = IW_CM_EVENT_STATUS_OK;
30313 cm_event.provider_data = cm_id->provider_data;
30314@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
30315 return;
30316 cm_id = cm_node->cm_id;
30317
30318- atomic_inc(&cm_connect_reqs);
30319+ atomic_inc_unchecked(&cm_connect_reqs);
30320 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30321 cm_node, cm_id, jiffies);
30322
30323@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
30324 return;
30325 cm_id = cm_node->cm_id;
30326
30327- atomic_inc(&cm_connect_reqs);
30328+ atomic_inc_unchecked(&cm_connect_reqs);
30329 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
30330 cm_node, cm_id, jiffies);
30331
30332diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h
30333--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
30334+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
30335@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
30336 extern unsigned int wqm_quanta;
30337 extern struct list_head nes_adapter_list;
30338
30339-extern atomic_t cm_connects;
30340-extern atomic_t cm_accepts;
30341-extern atomic_t cm_disconnects;
30342-extern atomic_t cm_closes;
30343-extern atomic_t cm_connecteds;
30344-extern atomic_t cm_connect_reqs;
30345-extern atomic_t cm_rejects;
30346-extern atomic_t mod_qp_timouts;
30347-extern atomic_t qps_created;
30348-extern atomic_t qps_destroyed;
30349-extern atomic_t sw_qps_destroyed;
30350+extern atomic_unchecked_t cm_connects;
30351+extern atomic_unchecked_t cm_accepts;
30352+extern atomic_unchecked_t cm_disconnects;
30353+extern atomic_unchecked_t cm_closes;
30354+extern atomic_unchecked_t cm_connecteds;
30355+extern atomic_unchecked_t cm_connect_reqs;
30356+extern atomic_unchecked_t cm_rejects;
30357+extern atomic_unchecked_t mod_qp_timouts;
30358+extern atomic_unchecked_t qps_created;
30359+extern atomic_unchecked_t qps_destroyed;
30360+extern atomic_unchecked_t sw_qps_destroyed;
30361 extern u32 mh_detected;
30362 extern u32 mh_pauses_sent;
30363 extern u32 cm_packets_sent;
30364@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
30365 extern u32 cm_listens_created;
30366 extern u32 cm_listens_destroyed;
30367 extern u32 cm_backlog_drops;
30368-extern atomic_t cm_loopbacks;
30369-extern atomic_t cm_nodes_created;
30370-extern atomic_t cm_nodes_destroyed;
30371-extern atomic_t cm_accel_dropped_pkts;
30372-extern atomic_t cm_resets_recvd;
30373+extern atomic_unchecked_t cm_loopbacks;
30374+extern atomic_unchecked_t cm_nodes_created;
30375+extern atomic_unchecked_t cm_nodes_destroyed;
30376+extern atomic_unchecked_t cm_accel_dropped_pkts;
30377+extern atomic_unchecked_t cm_resets_recvd;
30378
30379 extern u32 int_mod_timer_init;
30380 extern u32 int_mod_cq_depth_256;
30381diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c
30382--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
30383+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
30384@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
30385 target_stat_values[++index] = mh_detected;
30386 target_stat_values[++index] = mh_pauses_sent;
30387 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
30388- target_stat_values[++index] = atomic_read(&cm_connects);
30389- target_stat_values[++index] = atomic_read(&cm_accepts);
30390- target_stat_values[++index] = atomic_read(&cm_disconnects);
30391- target_stat_values[++index] = atomic_read(&cm_connecteds);
30392- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
30393- target_stat_values[++index] = atomic_read(&cm_rejects);
30394- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
30395- target_stat_values[++index] = atomic_read(&qps_created);
30396- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
30397- target_stat_values[++index] = atomic_read(&qps_destroyed);
30398- target_stat_values[++index] = atomic_read(&cm_closes);
30399+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
30400+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
30401+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
30402+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
30403+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
30404+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
30405+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
30406+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
30407+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
30408+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
30409+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
30410 target_stat_values[++index] = cm_packets_sent;
30411 target_stat_values[++index] = cm_packets_bounced;
30412 target_stat_values[++index] = cm_packets_created;
30413@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
30414 target_stat_values[++index] = cm_listens_created;
30415 target_stat_values[++index] = cm_listens_destroyed;
30416 target_stat_values[++index] = cm_backlog_drops;
30417- target_stat_values[++index] = atomic_read(&cm_loopbacks);
30418- target_stat_values[++index] = atomic_read(&cm_nodes_created);
30419- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
30420- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
30421- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
30422+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
30423+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
30424+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
30425+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
30426+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
30427 target_stat_values[++index] = int_mod_timer_init;
30428 target_stat_values[++index] = int_mod_cq_depth_1;
30429 target_stat_values[++index] = int_mod_cq_depth_4;
30430diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c
30431--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
30432+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
30433@@ -45,9 +45,9 @@
30434
30435 #include <rdma/ib_umem.h>
30436
30437-atomic_t mod_qp_timouts;
30438-atomic_t qps_created;
30439-atomic_t sw_qps_destroyed;
30440+atomic_unchecked_t mod_qp_timouts;
30441+atomic_unchecked_t qps_created;
30442+atomic_unchecked_t sw_qps_destroyed;
30443
30444 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
30445
30446@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
30447 if (init_attr->create_flags)
30448 return ERR_PTR(-EINVAL);
30449
30450- atomic_inc(&qps_created);
30451+ atomic_inc_unchecked(&qps_created);
30452 switch (init_attr->qp_type) {
30453 case IB_QPT_RC:
30454 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
30455@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
30456 struct iw_cm_event cm_event;
30457 int ret;
30458
30459- atomic_inc(&sw_qps_destroyed);
30460+ atomic_inc_unchecked(&sw_qps_destroyed);
30461 nesqp->destroyed = 1;
30462
30463 /* Blow away the connection if it exists. */
30464diff -urNp linux-2.6.32.43/drivers/input/gameport/gameport.c linux-2.6.32.43/drivers/input/gameport/gameport.c
30465--- linux-2.6.32.43/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
30466+++ linux-2.6.32.43/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
30467@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
30468 */
30469 static void gameport_init_port(struct gameport *gameport)
30470 {
30471- static atomic_t gameport_no = ATOMIC_INIT(0);
30472+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
30473
30474 __module_get(THIS_MODULE);
30475
30476 mutex_init(&gameport->drv_mutex);
30477 device_initialize(&gameport->dev);
30478- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
30479+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
30480 gameport->dev.bus = &gameport_bus;
30481 gameport->dev.release = gameport_release_port;
30482 if (gameport->parent)
30483diff -urNp linux-2.6.32.43/drivers/input/input.c linux-2.6.32.43/drivers/input/input.c
30484--- linux-2.6.32.43/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
30485+++ linux-2.6.32.43/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
30486@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
30487 */
30488 int input_register_device(struct input_dev *dev)
30489 {
30490- static atomic_t input_no = ATOMIC_INIT(0);
30491+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
30492 struct input_handler *handler;
30493 const char *path;
30494 int error;
30495@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
30496 dev->setkeycode = input_default_setkeycode;
30497
30498 dev_set_name(&dev->dev, "input%ld",
30499- (unsigned long) atomic_inc_return(&input_no) - 1);
30500+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
30501
30502 error = device_add(&dev->dev);
30503 if (error)
30504diff -urNp linux-2.6.32.43/drivers/input/joystick/sidewinder.c linux-2.6.32.43/drivers/input/joystick/sidewinder.c
30505--- linux-2.6.32.43/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
30506+++ linux-2.6.32.43/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
30507@@ -30,6 +30,7 @@
30508 #include <linux/kernel.h>
30509 #include <linux/module.h>
30510 #include <linux/slab.h>
30511+#include <linux/sched.h>
30512 #include <linux/init.h>
30513 #include <linux/input.h>
30514 #include <linux/gameport.h>
30515@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
30516 unsigned char buf[SW_LENGTH];
30517 int i;
30518
30519+ pax_track_stack();
30520+
30521 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
30522
30523 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
30524diff -urNp linux-2.6.32.43/drivers/input/joystick/xpad.c linux-2.6.32.43/drivers/input/joystick/xpad.c
30525--- linux-2.6.32.43/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
30526+++ linux-2.6.32.43/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
30527@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
30528
30529 static int xpad_led_probe(struct usb_xpad *xpad)
30530 {
30531- static atomic_t led_seq = ATOMIC_INIT(0);
30532+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
30533 long led_no;
30534 struct xpad_led *led;
30535 struct led_classdev *led_cdev;
30536@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
30537 if (!led)
30538 return -ENOMEM;
30539
30540- led_no = (long)atomic_inc_return(&led_seq) - 1;
30541+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
30542
30543 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
30544 led->xpad = xpad;
30545diff -urNp linux-2.6.32.43/drivers/input/serio/serio.c linux-2.6.32.43/drivers/input/serio/serio.c
30546--- linux-2.6.32.43/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
30547+++ linux-2.6.32.43/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
30548@@ -527,7 +527,7 @@ static void serio_release_port(struct de
30549 */
30550 static void serio_init_port(struct serio *serio)
30551 {
30552- static atomic_t serio_no = ATOMIC_INIT(0);
30553+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
30554
30555 __module_get(THIS_MODULE);
30556
30557@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
30558 mutex_init(&serio->drv_mutex);
30559 device_initialize(&serio->dev);
30560 dev_set_name(&serio->dev, "serio%ld",
30561- (long)atomic_inc_return(&serio_no) - 1);
30562+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
30563 serio->dev.bus = &serio_bus;
30564 serio->dev.release = serio_release_port;
30565 if (serio->parent) {
30566diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/common.c linux-2.6.32.43/drivers/isdn/gigaset/common.c
30567--- linux-2.6.32.43/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
30568+++ linux-2.6.32.43/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
30569@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
30570 cs->commands_pending = 0;
30571 cs->cur_at_seq = 0;
30572 cs->gotfwver = -1;
30573- cs->open_count = 0;
30574+ local_set(&cs->open_count, 0);
30575 cs->dev = NULL;
30576 cs->tty = NULL;
30577 cs->tty_dev = NULL;
30578diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h
30579--- linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
30580+++ linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
30581@@ -34,6 +34,7 @@
30582 #include <linux/tty_driver.h>
30583 #include <linux/list.h>
30584 #include <asm/atomic.h>
30585+#include <asm/local.h>
30586
30587 #define GIG_VERSION {0,5,0,0}
30588 #define GIG_COMPAT {0,4,0,0}
30589@@ -446,7 +447,7 @@ struct cardstate {
30590 spinlock_t cmdlock;
30591 unsigned curlen, cmdbytes;
30592
30593- unsigned open_count;
30594+ local_t open_count;
30595 struct tty_struct *tty;
30596 struct tasklet_struct if_wake_tasklet;
30597 unsigned control_state;
30598diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/interface.c linux-2.6.32.43/drivers/isdn/gigaset/interface.c
30599--- linux-2.6.32.43/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
30600+++ linux-2.6.32.43/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
30601@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
30602 return -ERESTARTSYS; // FIXME -EINTR?
30603 tty->driver_data = cs;
30604
30605- ++cs->open_count;
30606-
30607- if (cs->open_count == 1) {
30608+ if (local_inc_return(&cs->open_count) == 1) {
30609 spin_lock_irqsave(&cs->lock, flags);
30610 cs->tty = tty;
30611 spin_unlock_irqrestore(&cs->lock, flags);
30612@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
30613
30614 if (!cs->connected)
30615 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30616- else if (!cs->open_count)
30617+ else if (!local_read(&cs->open_count))
30618 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30619 else {
30620- if (!--cs->open_count) {
30621+ if (!local_dec_return(&cs->open_count)) {
30622 spin_lock_irqsave(&cs->lock, flags);
30623 cs->tty = NULL;
30624 spin_unlock_irqrestore(&cs->lock, flags);
30625@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
30626 if (!cs->connected) {
30627 gig_dbg(DEBUG_IF, "not connected");
30628 retval = -ENODEV;
30629- } else if (!cs->open_count)
30630+ } else if (!local_read(&cs->open_count))
30631 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30632 else {
30633 retval = 0;
30634@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
30635 if (!cs->connected) {
30636 gig_dbg(DEBUG_IF, "not connected");
30637 retval = -ENODEV;
30638- } else if (!cs->open_count)
30639+ } else if (!local_read(&cs->open_count))
30640 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30641 else if (cs->mstate != MS_LOCKED) {
30642 dev_warn(cs->dev, "can't write to unlocked device\n");
30643@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
30644 if (!cs->connected) {
30645 gig_dbg(DEBUG_IF, "not connected");
30646 retval = -ENODEV;
30647- } else if (!cs->open_count)
30648+ } else if (!local_read(&cs->open_count))
30649 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30650 else if (cs->mstate != MS_LOCKED) {
30651 dev_warn(cs->dev, "can't write to unlocked device\n");
30652@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
30653
30654 if (!cs->connected)
30655 gig_dbg(DEBUG_IF, "not connected");
30656- else if (!cs->open_count)
30657+ else if (!local_read(&cs->open_count))
30658 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30659 else if (cs->mstate != MS_LOCKED)
30660 dev_warn(cs->dev, "can't write to unlocked device\n");
30661@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
30662
30663 if (!cs->connected)
30664 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30665- else if (!cs->open_count)
30666+ else if (!local_read(&cs->open_count))
30667 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30668 else {
30669 //FIXME
30670@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
30671
30672 if (!cs->connected)
30673 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
30674- else if (!cs->open_count)
30675+ else if (!local_read(&cs->open_count))
30676 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30677 else {
30678 //FIXME
30679@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
30680 goto out;
30681 }
30682
30683- if (!cs->open_count) {
30684+ if (!local_read(&cs->open_count)) {
30685 dev_warn(cs->dev, "%s: device not opened\n", __func__);
30686 goto out;
30687 }
30688diff -urNp linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c
30689--- linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
30690+++ linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
30691@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
30692 }
30693 if (left) {
30694 if (t4file->user) {
30695- if (copy_from_user(buf, dp, left))
30696+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30697 return -EFAULT;
30698 } else {
30699 memcpy(buf, dp, left);
30700@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
30701 }
30702 if (left) {
30703 if (config->user) {
30704- if (copy_from_user(buf, dp, left))
30705+ if (left > sizeof buf || copy_from_user(buf, dp, left))
30706 return -EFAULT;
30707 } else {
30708 memcpy(buf, dp, left);
30709diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c
30710--- linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
30711+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
30712@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
30713 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
30714 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
30715
30716+ pax_track_stack();
30717
30718 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
30719 {
30720diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c
30721--- linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
30722+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
30723@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
30724 IDI_SYNC_REQ req;
30725 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30726
30727+ pax_track_stack();
30728+
30729 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30730
30731 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30732diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c
30733--- linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
30734+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
30735@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
30736 IDI_SYNC_REQ req;
30737 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30738
30739+ pax_track_stack();
30740+
30741 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30742
30743 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30744diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c
30745--- linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
30746+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
30747@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
30748 IDI_SYNC_REQ req;
30749 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30750
30751+ pax_track_stack();
30752+
30753 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30754
30755 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30756diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c
30757--- linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
30758+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
30759@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
30760 IDI_SYNC_REQ req;
30761 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30762
30763+ pax_track_stack();
30764+
30765 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30766
30767 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30768diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c
30769--- linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
30770+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
30771@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
30772 dword d;
30773 word w;
30774
30775+ pax_track_stack();
30776+
30777 a = plci->adapter;
30778 Id = ((word)plci->Id<<8)|a->Id;
30779 PUT_WORD(&SS_Ind[4],0x0000);
30780@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
30781 word j, n, w;
30782 dword d;
30783
30784+ pax_track_stack();
30785+
30786
30787 for(i=0;i<8;i++) bp_parms[i].length = 0;
30788 for(i=0;i<2;i++) global_config[i].length = 0;
30789@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
30790 const byte llc3[] = {4,3,2,2,6,6,0};
30791 const byte header[] = {0,2,3,3,0,0,0};
30792
30793+ pax_track_stack();
30794+
30795 for(i=0;i<8;i++) bp_parms[i].length = 0;
30796 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
30797 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
30798@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
30799 word appl_number_group_type[MAX_APPL];
30800 PLCI *auxplci;
30801
30802+ pax_track_stack();
30803+
30804 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
30805
30806 if(!a->group_optimization_enabled)
30807diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c
30808--- linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
30809+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
30810@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
30811 IDI_SYNC_REQ req;
30812 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
30813
30814+ pax_track_stack();
30815+
30816 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
30817
30818 for (x = 0; x < MAX_DESCRIPTORS; x++) {
30819diff -urNp linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c
30820--- linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
30821+++ linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
30822@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
30823 } iocpar;
30824 void __user *argp = (void __user *)arg;
30825
30826+ pax_track_stack();
30827+
30828 #define name iocpar.name
30829 #define bname iocpar.bname
30830 #define iocts iocpar.iocts
30831diff -urNp linux-2.6.32.43/drivers/isdn/icn/icn.c linux-2.6.32.43/drivers/isdn/icn/icn.c
30832--- linux-2.6.32.43/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
30833+++ linux-2.6.32.43/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
30834@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
30835 if (count > len)
30836 count = len;
30837 if (user) {
30838- if (copy_from_user(msg, buf, count))
30839+ if (count > sizeof msg || copy_from_user(msg, buf, count))
30840 return -EFAULT;
30841 } else
30842 memcpy(msg, buf, count);
30843diff -urNp linux-2.6.32.43/drivers/isdn/mISDN/socket.c linux-2.6.32.43/drivers/isdn/mISDN/socket.c
30844--- linux-2.6.32.43/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
30845+++ linux-2.6.32.43/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
30846@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
30847 if (dev) {
30848 struct mISDN_devinfo di;
30849
30850+ memset(&di, 0, sizeof(di));
30851 di.id = dev->id;
30852 di.Dprotocols = dev->Dprotocols;
30853 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30854@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
30855 if (dev) {
30856 struct mISDN_devinfo di;
30857
30858+ memset(&di, 0, sizeof(di));
30859 di.id = dev->id;
30860 di.Dprotocols = dev->Dprotocols;
30861 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
30862diff -urNp linux-2.6.32.43/drivers/isdn/sc/interrupt.c linux-2.6.32.43/drivers/isdn/sc/interrupt.c
30863--- linux-2.6.32.43/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
30864+++ linux-2.6.32.43/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
30865@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
30866 }
30867 else if(callid>=0x0000 && callid<=0x7FFF)
30868 {
30869+ int len;
30870+
30871 pr_debug("%s: Got Incoming Call\n",
30872 sc_adapter[card]->devicename);
30873- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
30874- strcpy(setup.eazmsn,
30875- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
30876+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
30877+ sizeof(setup.phone));
30878+ if (len >= sizeof(setup.phone))
30879+ continue;
30880+ len = strlcpy(setup.eazmsn,
30881+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30882+ sizeof(setup.eazmsn));
30883+ if (len >= sizeof(setup.eazmsn))
30884+ continue;
30885 setup.si1 = 7;
30886 setup.si2 = 0;
30887 setup.plan = 0;
30888@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
30889 * Handle a GetMyNumber Rsp
30890 */
30891 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
30892- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
30893+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
30894+ rcvmsg.msg_data.byte_array,
30895+ sizeof(rcvmsg.msg_data.byte_array));
30896 continue;
30897 }
30898
30899diff -urNp linux-2.6.32.43/drivers/lguest/core.c linux-2.6.32.43/drivers/lguest/core.c
30900--- linux-2.6.32.43/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
30901+++ linux-2.6.32.43/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
30902@@ -91,9 +91,17 @@ static __init int map_switcher(void)
30903 * it's worked so far. The end address needs +1 because __get_vm_area
30904 * allocates an extra guard page, so we need space for that.
30905 */
30906+
30907+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
30908+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30909+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
30910+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30911+#else
30912 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
30913 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
30914 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
30915+#endif
30916+
30917 if (!switcher_vma) {
30918 err = -ENOMEM;
30919 printk("lguest: could not map switcher pages high\n");
30920@@ -118,7 +126,7 @@ static __init int map_switcher(void)
30921 * Now the Switcher is mapped at the right address, we can't fail!
30922 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
30923 */
30924- memcpy(switcher_vma->addr, start_switcher_text,
30925+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
30926 end_switcher_text - start_switcher_text);
30927
30928 printk(KERN_INFO "lguest: mapped switcher at %p\n",
30929diff -urNp linux-2.6.32.43/drivers/lguest/x86/core.c linux-2.6.32.43/drivers/lguest/x86/core.c
30930--- linux-2.6.32.43/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
30931+++ linux-2.6.32.43/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
30932@@ -59,7 +59,7 @@ static struct {
30933 /* Offset from where switcher.S was compiled to where we've copied it */
30934 static unsigned long switcher_offset(void)
30935 {
30936- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
30937+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
30938 }
30939
30940 /* This cpu's struct lguest_pages. */
30941@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
30942 * These copies are pretty cheap, so we do them unconditionally: */
30943 /* Save the current Host top-level page directory.
30944 */
30945+
30946+#ifdef CONFIG_PAX_PER_CPU_PGD
30947+ pages->state.host_cr3 = read_cr3();
30948+#else
30949 pages->state.host_cr3 = __pa(current->mm->pgd);
30950+#endif
30951+
30952 /*
30953 * Set up the Guest's page tables to see this CPU's pages (and no
30954 * other CPU's pages).
30955@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
30956 * compiled-in switcher code and the high-mapped copy we just made.
30957 */
30958 for (i = 0; i < IDT_ENTRIES; i++)
30959- default_idt_entries[i] += switcher_offset();
30960+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
30961
30962 /*
30963 * Set up the Switcher's per-cpu areas.
30964@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
30965 * it will be undisturbed when we switch. To change %cs and jump we
30966 * need this structure to feed to Intel's "lcall" instruction.
30967 */
30968- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
30969+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
30970 lguest_entry.segment = LGUEST_CS;
30971
30972 /*
30973diff -urNp linux-2.6.32.43/drivers/lguest/x86/switcher_32.S linux-2.6.32.43/drivers/lguest/x86/switcher_32.S
30974--- linux-2.6.32.43/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
30975+++ linux-2.6.32.43/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
30976@@ -87,6 +87,7 @@
30977 #include <asm/page.h>
30978 #include <asm/segment.h>
30979 #include <asm/lguest.h>
30980+#include <asm/processor-flags.h>
30981
30982 // We mark the start of the code to copy
30983 // It's placed in .text tho it's never run here
30984@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
30985 // Changes type when we load it: damn Intel!
30986 // For after we switch over our page tables
30987 // That entry will be read-only: we'd crash.
30988+
30989+#ifdef CONFIG_PAX_KERNEXEC
30990+ mov %cr0, %edx
30991+ xor $X86_CR0_WP, %edx
30992+ mov %edx, %cr0
30993+#endif
30994+
30995 movl $(GDT_ENTRY_TSS*8), %edx
30996 ltr %dx
30997
30998@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
30999 // Let's clear it again for our return.
31000 // The GDT descriptor of the Host
31001 // Points to the table after two "size" bytes
31002- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31003+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31004 // Clear "used" from type field (byte 5, bit 2)
31005- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31006+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31007+
31008+#ifdef CONFIG_PAX_KERNEXEC
31009+ mov %cr0, %eax
31010+ xor $X86_CR0_WP, %eax
31011+ mov %eax, %cr0
31012+#endif
31013
31014 // Once our page table's switched, the Guest is live!
31015 // The Host fades as we run this final step.
31016@@ -295,13 +309,12 @@ deliver_to_host:
31017 // I consulted gcc, and it gave
31018 // These instructions, which I gladly credit:
31019 leal (%edx,%ebx,8), %eax
31020- movzwl (%eax),%edx
31021- movl 4(%eax), %eax
31022- xorw %ax, %ax
31023- orl %eax, %edx
31024+ movl 4(%eax), %edx
31025+ movw (%eax), %dx
31026 // Now the address of the handler's in %edx
31027 // We call it now: its "iret" drops us home.
31028- jmp *%edx
31029+ ljmp $__KERNEL_CS, $1f
31030+1: jmp *%edx
31031
31032 // Every interrupt can come to us here
31033 // But we must truly tell each apart.
31034diff -urNp linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c
31035--- linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
31036+++ linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
31037@@ -15,7 +15,7 @@
31038
31039 #define MAX_PMU_LEVEL 0xFF
31040
31041-static struct backlight_ops pmu_backlight_data;
31042+static const struct backlight_ops pmu_backlight_data;
31043 static DEFINE_SPINLOCK(pmu_backlight_lock);
31044 static int sleeping, uses_pmu_bl;
31045 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
31046@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
31047 return bd->props.brightness;
31048 }
31049
31050-static struct backlight_ops pmu_backlight_data = {
31051+static const struct backlight_ops pmu_backlight_data = {
31052 .get_brightness = pmu_backlight_get_brightness,
31053 .update_status = pmu_backlight_update_status,
31054
31055diff -urNp linux-2.6.32.43/drivers/macintosh/via-pmu.c linux-2.6.32.43/drivers/macintosh/via-pmu.c
31056--- linux-2.6.32.43/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
31057+++ linux-2.6.32.43/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
31058@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
31059 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
31060 }
31061
31062-static struct platform_suspend_ops pmu_pm_ops = {
31063+static const struct platform_suspend_ops pmu_pm_ops = {
31064 .enter = powerbook_sleep,
31065 .valid = pmu_sleep_valid,
31066 };
31067diff -urNp linux-2.6.32.43/drivers/md/dm.c linux-2.6.32.43/drivers/md/dm.c
31068--- linux-2.6.32.43/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
31069+++ linux-2.6.32.43/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
31070@@ -163,9 +163,9 @@ struct mapped_device {
31071 /*
31072 * Event handling.
31073 */
31074- atomic_t event_nr;
31075+ atomic_unchecked_t event_nr;
31076 wait_queue_head_t eventq;
31077- atomic_t uevent_seq;
31078+ atomic_unchecked_t uevent_seq;
31079 struct list_head uevent_list;
31080 spinlock_t uevent_lock; /* Protect access to uevent_list */
31081
31082@@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
31083 rwlock_init(&md->map_lock);
31084 atomic_set(&md->holders, 1);
31085 atomic_set(&md->open_count, 0);
31086- atomic_set(&md->event_nr, 0);
31087- atomic_set(&md->uevent_seq, 0);
31088+ atomic_set_unchecked(&md->event_nr, 0);
31089+ atomic_set_unchecked(&md->uevent_seq, 0);
31090 INIT_LIST_HEAD(&md->uevent_list);
31091 spin_lock_init(&md->uevent_lock);
31092
31093@@ -1921,7 +1921,7 @@ static void event_callback(void *context
31094
31095 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
31096
31097- atomic_inc(&md->event_nr);
31098+ atomic_inc_unchecked(&md->event_nr);
31099 wake_up(&md->eventq);
31100 }
31101
31102@@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
31103
31104 uint32_t dm_next_uevent_seq(struct mapped_device *md)
31105 {
31106- return atomic_add_return(1, &md->uevent_seq);
31107+ return atomic_add_return_unchecked(1, &md->uevent_seq);
31108 }
31109
31110 uint32_t dm_get_event_nr(struct mapped_device *md)
31111 {
31112- return atomic_read(&md->event_nr);
31113+ return atomic_read_unchecked(&md->event_nr);
31114 }
31115
31116 int dm_wait_event(struct mapped_device *md, int event_nr)
31117 {
31118 return wait_event_interruptible(md->eventq,
31119- (event_nr != atomic_read(&md->event_nr)));
31120+ (event_nr != atomic_read_unchecked(&md->event_nr)));
31121 }
31122
31123 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
31124diff -urNp linux-2.6.32.43/drivers/md/dm-ioctl.c linux-2.6.32.43/drivers/md/dm-ioctl.c
31125--- linux-2.6.32.43/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
31126+++ linux-2.6.32.43/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
31127@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
31128 cmd == DM_LIST_VERSIONS_CMD)
31129 return 0;
31130
31131- if ((cmd == DM_DEV_CREATE_CMD)) {
31132+ if (cmd == DM_DEV_CREATE_CMD) {
31133 if (!*param->name) {
31134 DMWARN("name not supplied when creating device");
31135 return -EINVAL;
31136diff -urNp linux-2.6.32.43/drivers/md/dm-raid1.c linux-2.6.32.43/drivers/md/dm-raid1.c
31137--- linux-2.6.32.43/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
31138+++ linux-2.6.32.43/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
31139@@ -41,7 +41,7 @@ enum dm_raid1_error {
31140
31141 struct mirror {
31142 struct mirror_set *ms;
31143- atomic_t error_count;
31144+ atomic_unchecked_t error_count;
31145 unsigned long error_type;
31146 struct dm_dev *dev;
31147 sector_t offset;
31148@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
31149 * simple way to tell if a device has encountered
31150 * errors.
31151 */
31152- atomic_inc(&m->error_count);
31153+ atomic_inc_unchecked(&m->error_count);
31154
31155 if (test_and_set_bit(error_type, &m->error_type))
31156 return;
31157@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
31158 }
31159
31160 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
31161- if (!atomic_read(&new->error_count)) {
31162+ if (!atomic_read_unchecked(&new->error_count)) {
31163 set_default_mirror(new);
31164 break;
31165 }
31166@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
31167 struct mirror *m = get_default_mirror(ms);
31168
31169 do {
31170- if (likely(!atomic_read(&m->error_count)))
31171+ if (likely(!atomic_read_unchecked(&m->error_count)))
31172 return m;
31173
31174 if (m-- == ms->mirror)
31175@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
31176 {
31177 struct mirror *default_mirror = get_default_mirror(m->ms);
31178
31179- return !atomic_read(&default_mirror->error_count);
31180+ return !atomic_read_unchecked(&default_mirror->error_count);
31181 }
31182
31183 static int mirror_available(struct mirror_set *ms, struct bio *bio)
31184@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
31185 */
31186 if (likely(region_in_sync(ms, region, 1)))
31187 m = choose_mirror(ms, bio->bi_sector);
31188- else if (m && atomic_read(&m->error_count))
31189+ else if (m && atomic_read_unchecked(&m->error_count))
31190 m = NULL;
31191
31192 if (likely(m))
31193@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
31194 }
31195
31196 ms->mirror[mirror].ms = ms;
31197- atomic_set(&(ms->mirror[mirror].error_count), 0);
31198+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
31199 ms->mirror[mirror].error_type = 0;
31200 ms->mirror[mirror].offset = offset;
31201
31202@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
31203 */
31204 static char device_status_char(struct mirror *m)
31205 {
31206- if (!atomic_read(&(m->error_count)))
31207+ if (!atomic_read_unchecked(&(m->error_count)))
31208 return 'A';
31209
31210 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
31211diff -urNp linux-2.6.32.43/drivers/md/dm-stripe.c linux-2.6.32.43/drivers/md/dm-stripe.c
31212--- linux-2.6.32.43/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
31213+++ linux-2.6.32.43/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
31214@@ -20,7 +20,7 @@ struct stripe {
31215 struct dm_dev *dev;
31216 sector_t physical_start;
31217
31218- atomic_t error_count;
31219+ atomic_unchecked_t error_count;
31220 };
31221
31222 struct stripe_c {
31223@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
31224 kfree(sc);
31225 return r;
31226 }
31227- atomic_set(&(sc->stripe[i].error_count), 0);
31228+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
31229 }
31230
31231 ti->private = sc;
31232@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
31233 DMEMIT("%d ", sc->stripes);
31234 for (i = 0; i < sc->stripes; i++) {
31235 DMEMIT("%s ", sc->stripe[i].dev->name);
31236- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
31237+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
31238 'D' : 'A';
31239 }
31240 buffer[i] = '\0';
31241@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
31242 */
31243 for (i = 0; i < sc->stripes; i++)
31244 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
31245- atomic_inc(&(sc->stripe[i].error_count));
31246- if (atomic_read(&(sc->stripe[i].error_count)) <
31247+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
31248+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
31249 DM_IO_ERROR_THRESHOLD)
31250 queue_work(kstriped, &sc->kstriped_ws);
31251 }
31252diff -urNp linux-2.6.32.43/drivers/md/dm-sysfs.c linux-2.6.32.43/drivers/md/dm-sysfs.c
31253--- linux-2.6.32.43/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
31254+++ linux-2.6.32.43/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
31255@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
31256 NULL,
31257 };
31258
31259-static struct sysfs_ops dm_sysfs_ops = {
31260+static const struct sysfs_ops dm_sysfs_ops = {
31261 .show = dm_attr_show,
31262 };
31263
31264diff -urNp linux-2.6.32.43/drivers/md/dm-table.c linux-2.6.32.43/drivers/md/dm-table.c
31265--- linux-2.6.32.43/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
31266+++ linux-2.6.32.43/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
31267@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
31268 if (!dev_size)
31269 return 0;
31270
31271- if ((start >= dev_size) || (start + len > dev_size)) {
31272+ if ((start >= dev_size) || (len > dev_size - start)) {
31273 DMWARN("%s: %s too small for target: "
31274 "start=%llu, len=%llu, dev_size=%llu",
31275 dm_device_name(ti->table->md), bdevname(bdev, b),
31276diff -urNp linux-2.6.32.43/drivers/md/md.c linux-2.6.32.43/drivers/md/md.c
31277--- linux-2.6.32.43/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
31278+++ linux-2.6.32.43/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
31279@@ -153,10 +153,10 @@ static int start_readonly;
31280 * start build, activate spare
31281 */
31282 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
31283-static atomic_t md_event_count;
31284+static atomic_unchecked_t md_event_count;
31285 void md_new_event(mddev_t *mddev)
31286 {
31287- atomic_inc(&md_event_count);
31288+ atomic_inc_unchecked(&md_event_count);
31289 wake_up(&md_event_waiters);
31290 }
31291 EXPORT_SYMBOL_GPL(md_new_event);
31292@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
31293 */
31294 static void md_new_event_inintr(mddev_t *mddev)
31295 {
31296- atomic_inc(&md_event_count);
31297+ atomic_inc_unchecked(&md_event_count);
31298 wake_up(&md_event_waiters);
31299 }
31300
31301@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
31302
31303 rdev->preferred_minor = 0xffff;
31304 rdev->data_offset = le64_to_cpu(sb->data_offset);
31305- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31306+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
31307
31308 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
31309 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
31310@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
31311 else
31312 sb->resync_offset = cpu_to_le64(0);
31313
31314- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
31315+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
31316
31317 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
31318 sb->size = cpu_to_le64(mddev->dev_sectors);
31319@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
31320 static ssize_t
31321 errors_show(mdk_rdev_t *rdev, char *page)
31322 {
31323- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
31324+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
31325 }
31326
31327 static ssize_t
31328@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
31329 char *e;
31330 unsigned long n = simple_strtoul(buf, &e, 10);
31331 if (*buf && (*e == 0 || *e == '\n')) {
31332- atomic_set(&rdev->corrected_errors, n);
31333+ atomic_set_unchecked(&rdev->corrected_errors, n);
31334 return len;
31335 }
31336 return -EINVAL;
31337@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
31338 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
31339 kfree(rdev);
31340 }
31341-static struct sysfs_ops rdev_sysfs_ops = {
31342+static const struct sysfs_ops rdev_sysfs_ops = {
31343 .show = rdev_attr_show,
31344 .store = rdev_attr_store,
31345 };
31346@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
31347 rdev->data_offset = 0;
31348 rdev->sb_events = 0;
31349 atomic_set(&rdev->nr_pending, 0);
31350- atomic_set(&rdev->read_errors, 0);
31351- atomic_set(&rdev->corrected_errors, 0);
31352+ atomic_set_unchecked(&rdev->read_errors, 0);
31353+ atomic_set_unchecked(&rdev->corrected_errors, 0);
31354
31355 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
31356 if (!size) {
31357@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
31358 kfree(mddev);
31359 }
31360
31361-static struct sysfs_ops md_sysfs_ops = {
31362+static const struct sysfs_ops md_sysfs_ops = {
31363 .show = md_attr_show,
31364 .store = md_attr_store,
31365 };
31366@@ -4474,7 +4474,8 @@ out:
31367 err = 0;
31368 blk_integrity_unregister(disk);
31369 md_new_event(mddev);
31370- sysfs_notify_dirent(mddev->sysfs_state);
31371+ if (mddev->sysfs_state)
31372+ sysfs_notify_dirent(mddev->sysfs_state);
31373 return err;
31374 }
31375
31376@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
31377
31378 spin_unlock(&pers_lock);
31379 seq_printf(seq, "\n");
31380- mi->event = atomic_read(&md_event_count);
31381+ mi->event = atomic_read_unchecked(&md_event_count);
31382 return 0;
31383 }
31384 if (v == (void*)2) {
31385@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
31386 chunk_kb ? "KB" : "B");
31387 if (bitmap->file) {
31388 seq_printf(seq, ", file: ");
31389- seq_path(seq, &bitmap->file->f_path, " \t\n");
31390+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
31391 }
31392
31393 seq_printf(seq, "\n");
31394@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
31395 else {
31396 struct seq_file *p = file->private_data;
31397 p->private = mi;
31398- mi->event = atomic_read(&md_event_count);
31399+ mi->event = atomic_read_unchecked(&md_event_count);
31400 }
31401 return error;
31402 }
31403@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
31404 /* always allow read */
31405 mask = POLLIN | POLLRDNORM;
31406
31407- if (mi->event != atomic_read(&md_event_count))
31408+ if (mi->event != atomic_read_unchecked(&md_event_count))
31409 mask |= POLLERR | POLLPRI;
31410 return mask;
31411 }
31412@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
31413 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
31414 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
31415 (int)part_stat_read(&disk->part0, sectors[1]) -
31416- atomic_read(&disk->sync_io);
31417+ atomic_read_unchecked(&disk->sync_io);
31418 /* sync IO will cause sync_io to increase before the disk_stats
31419 * as sync_io is counted when a request starts, and
31420 * disk_stats is counted when it completes.
31421diff -urNp linux-2.6.32.43/drivers/md/md.h linux-2.6.32.43/drivers/md/md.h
31422--- linux-2.6.32.43/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
31423+++ linux-2.6.32.43/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
31424@@ -94,10 +94,10 @@ struct mdk_rdev_s
31425 * only maintained for arrays that
31426 * support hot removal
31427 */
31428- atomic_t read_errors; /* number of consecutive read errors that
31429+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
31430 * we have tried to ignore.
31431 */
31432- atomic_t corrected_errors; /* number of corrected read errors,
31433+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
31434 * for reporting to userspace and storing
31435 * in superblock.
31436 */
31437@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
31438
31439 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
31440 {
31441- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31442+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
31443 }
31444
31445 struct mdk_personality
31446diff -urNp linux-2.6.32.43/drivers/md/raid10.c linux-2.6.32.43/drivers/md/raid10.c
31447--- linux-2.6.32.43/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
31448+++ linux-2.6.32.43/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
31449@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
31450 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
31451 set_bit(R10BIO_Uptodate, &r10_bio->state);
31452 else {
31453- atomic_add(r10_bio->sectors,
31454+ atomic_add_unchecked(r10_bio->sectors,
31455 &conf->mirrors[d].rdev->corrected_errors);
31456 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
31457 md_error(r10_bio->mddev,
31458@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
31459 test_bit(In_sync, &rdev->flags)) {
31460 atomic_inc(&rdev->nr_pending);
31461 rcu_read_unlock();
31462- atomic_add(s, &rdev->corrected_errors);
31463+ atomic_add_unchecked(s, &rdev->corrected_errors);
31464 if (sync_page_io(rdev->bdev,
31465 r10_bio->devs[sl].addr +
31466 sect + rdev->data_offset,
31467diff -urNp linux-2.6.32.43/drivers/md/raid1.c linux-2.6.32.43/drivers/md/raid1.c
31468--- linux-2.6.32.43/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
31469+++ linux-2.6.32.43/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
31470@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
31471 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
31472 continue;
31473 rdev = conf->mirrors[d].rdev;
31474- atomic_add(s, &rdev->corrected_errors);
31475+ atomic_add_unchecked(s, &rdev->corrected_errors);
31476 if (sync_page_io(rdev->bdev,
31477 sect + rdev->data_offset,
31478 s<<9,
31479@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
31480 /* Well, this device is dead */
31481 md_error(mddev, rdev);
31482 else {
31483- atomic_add(s, &rdev->corrected_errors);
31484+ atomic_add_unchecked(s, &rdev->corrected_errors);
31485 printk(KERN_INFO
31486 "raid1:%s: read error corrected "
31487 "(%d sectors at %llu on %s)\n",
31488diff -urNp linux-2.6.32.43/drivers/md/raid5.c linux-2.6.32.43/drivers/md/raid5.c
31489--- linux-2.6.32.43/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
31490+++ linux-2.6.32.43/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
31491@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
31492 bi->bi_next = NULL;
31493 if ((rw & WRITE) &&
31494 test_bit(R5_ReWrite, &sh->dev[i].flags))
31495- atomic_add(STRIPE_SECTORS,
31496+ atomic_add_unchecked(STRIPE_SECTORS,
31497 &rdev->corrected_errors);
31498 generic_make_request(bi);
31499 } else {
31500@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
31501 clear_bit(R5_ReadError, &sh->dev[i].flags);
31502 clear_bit(R5_ReWrite, &sh->dev[i].flags);
31503 }
31504- if (atomic_read(&conf->disks[i].rdev->read_errors))
31505- atomic_set(&conf->disks[i].rdev->read_errors, 0);
31506+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
31507+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
31508 } else {
31509 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
31510 int retry = 0;
31511 rdev = conf->disks[i].rdev;
31512
31513 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
31514- atomic_inc(&rdev->read_errors);
31515+ atomic_inc_unchecked(&rdev->read_errors);
31516 if (conf->mddev->degraded >= conf->max_degraded)
31517 printk_rl(KERN_WARNING
31518 "raid5:%s: read error not correctable "
31519@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
31520 (unsigned long long)(sh->sector
31521 + rdev->data_offset),
31522 bdn);
31523- else if (atomic_read(&rdev->read_errors)
31524+ else if (atomic_read_unchecked(&rdev->read_errors)
31525 > conf->max_nr_stripes)
31526 printk(KERN_WARNING
31527 "raid5:%s: Too many read errors, failing device %s.\n",
31528@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
31529 sector_t r_sector;
31530 struct stripe_head sh2;
31531
31532+ pax_track_stack();
31533
31534 chunk_offset = sector_div(new_sector, sectors_per_chunk);
31535 stripe = new_sector;
31536diff -urNp linux-2.6.32.43/drivers/media/common/saa7146_hlp.c linux-2.6.32.43/drivers/media/common/saa7146_hlp.c
31537--- linux-2.6.32.43/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
31538+++ linux-2.6.32.43/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
31539@@ -353,6 +353,8 @@ static void calculate_clipping_registers
31540
31541 int x[32], y[32], w[32], h[32];
31542
31543+ pax_track_stack();
31544+
31545 /* clear out memory */
31546 memset(&line_list[0], 0x00, sizeof(u32)*32);
31547 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
31548diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
31549--- linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
31550+++ linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
31551@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
31552 u8 buf[HOST_LINK_BUF_SIZE];
31553 int i;
31554
31555+ pax_track_stack();
31556+
31557 dprintk("%s\n", __func__);
31558
31559 /* check if we have space for a link buf in the rx_buffer */
31560@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
31561 unsigned long timeout;
31562 int written;
31563
31564+ pax_track_stack();
31565+
31566 dprintk("%s\n", __func__);
31567
31568 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
31569diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c
31570--- linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
31571+++ linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c 2011-04-17 15:56:46.000000000 -0400
31572@@ -191,6 +191,7 @@ int dvb_register_device(struct dvb_adapt
31573 const struct dvb_device *template, void *priv, int type)
31574 {
31575 struct dvb_device *dvbdev;
31576+ /* cannot be const */
31577 struct file_operations *dvbdevfops;
31578 struct device *clsdev;
31579 int minor;
31580diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c
31581--- linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
31582+++ linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
31583@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
31584
31585 u8 buf[260];
31586
31587+ pax_track_stack();
31588+
31589 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
31590 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
31591
31592diff -urNp linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c
31593--- linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
31594+++ linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
31595@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
31596 u8 tudata[585];
31597 int i;
31598
31599+ pax_track_stack();
31600+
31601 dprintk("Firmware is %zd bytes\n",fw->size);
31602
31603 /* Get eprom data */
31604diff -urNp linux-2.6.32.43/drivers/media/radio/radio-cadet.c linux-2.6.32.43/drivers/media/radio/radio-cadet.c
31605--- linux-2.6.32.43/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
31606+++ linux-2.6.32.43/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
31607@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
31608 while (i < count && dev->rdsin != dev->rdsout)
31609 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
31610
31611- if (copy_to_user(data, readbuf, i))
31612+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
31613 return -EFAULT;
31614 return i;
31615 }
31616diff -urNp linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c
31617--- linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
31618+++ linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
31619@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
31620
31621 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
31622
31623-static atomic_t cx18_instance = ATOMIC_INIT(0);
31624+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
31625
31626 /* Parameter declarations */
31627 static int cardtype[CX18_MAX_CARDS];
31628@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
31629 struct i2c_client c;
31630 u8 eedata[256];
31631
31632+ pax_track_stack();
31633+
31634 memset(&c, 0, sizeof(c));
31635 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
31636 c.adapter = &cx->i2c_adap[0];
31637@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
31638 struct cx18 *cx;
31639
31640 /* FIXME - module parameter arrays constrain max instances */
31641- i = atomic_inc_return(&cx18_instance) - 1;
31642+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
31643 if (i >= CX18_MAX_CARDS) {
31644 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
31645 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
31646diff -urNp linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c
31647--- linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
31648+++ linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
31649@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
31650 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
31651
31652 /* ivtv instance counter */
31653-static atomic_t ivtv_instance = ATOMIC_INIT(0);
31654+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
31655
31656 /* Parameter declarations */
31657 static int cardtype[IVTV_MAX_CARDS];
31658diff -urNp linux-2.6.32.43/drivers/media/video/omap24xxcam.c linux-2.6.32.43/drivers/media/video/omap24xxcam.c
31659--- linux-2.6.32.43/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
31660+++ linux-2.6.32.43/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
31661@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
31662 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
31663
31664 do_gettimeofday(&vb->ts);
31665- vb->field_count = atomic_add_return(2, &fh->field_count);
31666+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
31667 if (csr & csr_error) {
31668 vb->state = VIDEOBUF_ERROR;
31669 if (!atomic_read(&fh->cam->in_reset)) {
31670diff -urNp linux-2.6.32.43/drivers/media/video/omap24xxcam.h linux-2.6.32.43/drivers/media/video/omap24xxcam.h
31671--- linux-2.6.32.43/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
31672+++ linux-2.6.32.43/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
31673@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
31674 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
31675 struct videobuf_queue vbq;
31676 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
31677- atomic_t field_count; /* field counter for videobuf_buffer */
31678+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
31679 /* accessing cam here doesn't need serialisation: it's constant */
31680 struct omap24xxcam_device *cam;
31681 };
31682diff -urNp linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
31683--- linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
31684+++ linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
31685@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
31686 u8 *eeprom;
31687 struct tveeprom tvdata;
31688
31689+ pax_track_stack();
31690+
31691 memset(&tvdata,0,sizeof(tvdata));
31692
31693 eeprom = pvr2_eeprom_fetch(hdw);
31694diff -urNp linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c
31695--- linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
31696+++ linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
31697@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
31698 unsigned char localPAT[256];
31699 unsigned char localPMT[256];
31700
31701+ pax_track_stack();
31702+
31703 /* Set video format - must be done first as it resets other settings */
31704 set_reg8(client, 0x41, h->video_format);
31705
31706diff -urNp linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c
31707--- linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
31708+++ linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
31709@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
31710 wait_queue_head_t *q = 0;
31711 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31712
31713+ pax_track_stack();
31714+
31715 /* While any outstand message on the bus exists... */
31716 do {
31717
31718@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
31719 u8 tmp[512];
31720 dprintk(DBGLVL_CMD, "%s()\n", __func__);
31721
31722+ pax_track_stack();
31723+
31724 while (loop) {
31725
31726 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
31727diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c
31728--- linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
31729+++ linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c 2011-04-17 15:56:46.000000000 -0400
31730@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
31731 int error;
31732
31733 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31734- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31735+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31736
31737 cam->input = input_dev = input_allocate_device();
31738 if (!input_dev) {
31739diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c
31740--- linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
31741+++ linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
31742@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
31743 int error;
31744
31745 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
31746- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31747+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
31748
31749 cam->input = input_dev = input_allocate_device();
31750 if (!input_dev) {
31751diff -urNp linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c
31752--- linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
31753+++ linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
31754@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
31755 unsigned char rv, gv, bv;
31756 static unsigned char *Y, *U, *V;
31757
31758+ pax_track_stack();
31759+
31760 frame = usbvision->curFrame;
31761 imageSize = frame->frmwidth * frame->frmheight;
31762 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
31763diff -urNp linux-2.6.32.43/drivers/media/video/v4l2-device.c linux-2.6.32.43/drivers/media/video/v4l2-device.c
31764--- linux-2.6.32.43/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
31765+++ linux-2.6.32.43/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
31766@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
31767 EXPORT_SYMBOL_GPL(v4l2_device_register);
31768
31769 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
31770- atomic_t *instance)
31771+ atomic_unchecked_t *instance)
31772 {
31773- int num = atomic_inc_return(instance) - 1;
31774+ int num = atomic_inc_return_unchecked(instance) - 1;
31775 int len = strlen(basename);
31776
31777 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
31778diff -urNp linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c
31779--- linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
31780+++ linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
31781@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
31782 {
31783 struct videobuf_queue q;
31784
31785+ pax_track_stack();
31786+
31787 /* Required to make generic handler to call __videobuf_alloc */
31788 q.int_ops = &sg_ops;
31789
31790diff -urNp linux-2.6.32.43/drivers/message/fusion/mptbase.c linux-2.6.32.43/drivers/message/fusion/mptbase.c
31791--- linux-2.6.32.43/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
31792+++ linux-2.6.32.43/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
31793@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
31794 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
31795 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
31796
31797+#ifdef CONFIG_GRKERNSEC_HIDESYM
31798+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31799+ NULL, NULL);
31800+#else
31801 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
31802 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
31803+#endif
31804+
31805 /*
31806 * Rounding UP to nearest 4-kB boundary here...
31807 */
31808diff -urNp linux-2.6.32.43/drivers/message/fusion/mptsas.c linux-2.6.32.43/drivers/message/fusion/mptsas.c
31809--- linux-2.6.32.43/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
31810+++ linux-2.6.32.43/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
31811@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
31812 return 0;
31813 }
31814
31815+static inline void
31816+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31817+{
31818+ if (phy_info->port_details) {
31819+ phy_info->port_details->rphy = rphy;
31820+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31821+ ioc->name, rphy));
31822+ }
31823+
31824+ if (rphy) {
31825+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31826+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31827+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31828+ ioc->name, rphy, rphy->dev.release));
31829+ }
31830+}
31831+
31832 /* no mutex */
31833 static void
31834 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
31835@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
31836 return NULL;
31837 }
31838
31839-static inline void
31840-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
31841-{
31842- if (phy_info->port_details) {
31843- phy_info->port_details->rphy = rphy;
31844- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
31845- ioc->name, rphy));
31846- }
31847-
31848- if (rphy) {
31849- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
31850- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
31851- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
31852- ioc->name, rphy, rphy->dev.release));
31853- }
31854-}
31855-
31856 static inline struct sas_port *
31857 mptsas_get_port(struct mptsas_phyinfo *phy_info)
31858 {
31859diff -urNp linux-2.6.32.43/drivers/message/fusion/mptscsih.c linux-2.6.32.43/drivers/message/fusion/mptscsih.c
31860--- linux-2.6.32.43/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
31861+++ linux-2.6.32.43/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
31862@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
31863
31864 h = shost_priv(SChost);
31865
31866- if (h) {
31867- if (h->info_kbuf == NULL)
31868- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31869- return h->info_kbuf;
31870- h->info_kbuf[0] = '\0';
31871+ if (!h)
31872+ return NULL;
31873
31874- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31875- h->info_kbuf[size-1] = '\0';
31876- }
31877+ if (h->info_kbuf == NULL)
31878+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
31879+ return h->info_kbuf;
31880+ h->info_kbuf[0] = '\0';
31881+
31882+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
31883+ h->info_kbuf[size-1] = '\0';
31884
31885 return h->info_kbuf;
31886 }
31887diff -urNp linux-2.6.32.43/drivers/message/i2o/i2o_config.c linux-2.6.32.43/drivers/message/i2o/i2o_config.c
31888--- linux-2.6.32.43/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
31889+++ linux-2.6.32.43/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
31890@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
31891 struct i2o_message *msg;
31892 unsigned int iop;
31893
31894+ pax_track_stack();
31895+
31896 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
31897 return -EFAULT;
31898
31899diff -urNp linux-2.6.32.43/drivers/message/i2o/i2o_proc.c linux-2.6.32.43/drivers/message/i2o/i2o_proc.c
31900--- linux-2.6.32.43/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
31901+++ linux-2.6.32.43/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
31902@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
31903 "Array Controller Device"
31904 };
31905
31906-static char *chtostr(u8 * chars, int n)
31907-{
31908- char tmp[256];
31909- tmp[0] = 0;
31910- return strncat(tmp, (char *)chars, n);
31911-}
31912-
31913 static int i2o_report_query_status(struct seq_file *seq, int block_status,
31914 char *group)
31915 {
31916@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
31917
31918 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
31919 seq_printf(seq, "%-#8x", ddm_table.module_id);
31920- seq_printf(seq, "%-29s",
31921- chtostr(ddm_table.module_name_version, 28));
31922+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
31923 seq_printf(seq, "%9d ", ddm_table.data_size);
31924 seq_printf(seq, "%8d", ddm_table.code_size);
31925
31926@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
31927
31928 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
31929 seq_printf(seq, "%-#8x", dst->module_id);
31930- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
31931- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
31932+ seq_printf(seq, "%-.28s", dst->module_name_version);
31933+ seq_printf(seq, "%-.8s", dst->date);
31934 seq_printf(seq, "%8d ", dst->module_size);
31935 seq_printf(seq, "%8d ", dst->mpb_size);
31936 seq_printf(seq, "0x%04x", dst->module_flags);
31937@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
31938 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
31939 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
31940 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
31941- seq_printf(seq, "Vendor info : %s\n",
31942- chtostr((u8 *) (work32 + 2), 16));
31943- seq_printf(seq, "Product info : %s\n",
31944- chtostr((u8 *) (work32 + 6), 16));
31945- seq_printf(seq, "Description : %s\n",
31946- chtostr((u8 *) (work32 + 10), 16));
31947- seq_printf(seq, "Product rev. : %s\n",
31948- chtostr((u8 *) (work32 + 14), 8));
31949+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
31950+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
31951+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
31952+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
31953
31954 seq_printf(seq, "Serial number : ");
31955 print_serial_number(seq, (u8 *) (work32 + 16),
31956@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
31957 }
31958
31959 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
31960- seq_printf(seq, "Module name : %s\n",
31961- chtostr(result.module_name, 24));
31962- seq_printf(seq, "Module revision : %s\n",
31963- chtostr(result.module_rev, 8));
31964+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
31965+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
31966
31967 seq_printf(seq, "Serial number : ");
31968 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
31969@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
31970 return 0;
31971 }
31972
31973- seq_printf(seq, "Device name : %s\n",
31974- chtostr(result.device_name, 64));
31975- seq_printf(seq, "Service name : %s\n",
31976- chtostr(result.service_name, 64));
31977- seq_printf(seq, "Physical name : %s\n",
31978- chtostr(result.physical_location, 64));
31979- seq_printf(seq, "Instance number : %s\n",
31980- chtostr(result.instance_number, 4));
31981+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
31982+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
31983+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
31984+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
31985
31986 return 0;
31987 }
31988diff -urNp linux-2.6.32.43/drivers/message/i2o/iop.c linux-2.6.32.43/drivers/message/i2o/iop.c
31989--- linux-2.6.32.43/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
31990+++ linux-2.6.32.43/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
31991@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
31992
31993 spin_lock_irqsave(&c->context_list_lock, flags);
31994
31995- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
31996- atomic_inc(&c->context_list_counter);
31997+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
31998+ atomic_inc_unchecked(&c->context_list_counter);
31999
32000- entry->context = atomic_read(&c->context_list_counter);
32001+ entry->context = atomic_read_unchecked(&c->context_list_counter);
32002
32003 list_add(&entry->list, &c->context_list);
32004
32005@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
32006
32007 #if BITS_PER_LONG == 64
32008 spin_lock_init(&c->context_list_lock);
32009- atomic_set(&c->context_list_counter, 0);
32010+ atomic_set_unchecked(&c->context_list_counter, 0);
32011 INIT_LIST_HEAD(&c->context_list);
32012 #endif
32013
32014diff -urNp linux-2.6.32.43/drivers/mfd/wm8350-i2c.c linux-2.6.32.43/drivers/mfd/wm8350-i2c.c
32015--- linux-2.6.32.43/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
32016+++ linux-2.6.32.43/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
32017@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
32018 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
32019 int ret;
32020
32021+ pax_track_stack();
32022+
32023 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
32024 return -EINVAL;
32025
32026diff -urNp linux-2.6.32.43/drivers/misc/kgdbts.c linux-2.6.32.43/drivers/misc/kgdbts.c
32027--- linux-2.6.32.43/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
32028+++ linux-2.6.32.43/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
32029@@ -118,7 +118,7 @@
32030 } while (0)
32031 #define MAX_CONFIG_LEN 40
32032
32033-static struct kgdb_io kgdbts_io_ops;
32034+static const struct kgdb_io kgdbts_io_ops;
32035 static char get_buf[BUFMAX];
32036 static int get_buf_cnt;
32037 static char put_buf[BUFMAX];
32038@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
32039 module_put(THIS_MODULE);
32040 }
32041
32042-static struct kgdb_io kgdbts_io_ops = {
32043+static const struct kgdb_io kgdbts_io_ops = {
32044 .name = "kgdbts",
32045 .read_char = kgdbts_get_char,
32046 .write_char = kgdbts_put_char,
32047diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c
32048--- linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
32049+++ linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
32050@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
32051
32052 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
32053 {
32054- atomic_long_inc(&mcs_op_statistics[op].count);
32055- atomic_long_add(clks, &mcs_op_statistics[op].total);
32056+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
32057+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
32058 if (mcs_op_statistics[op].max < clks)
32059 mcs_op_statistics[op].max = clks;
32060 }
32061diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c
32062--- linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
32063+++ linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
32064@@ -32,9 +32,9 @@
32065
32066 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
32067
32068-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
32069+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
32070 {
32071- unsigned long val = atomic_long_read(v);
32072+ unsigned long val = atomic_long_read_unchecked(v);
32073
32074 if (val)
32075 seq_printf(s, "%16lu %s\n", val, id);
32076@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
32077 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
32078
32079 for (op = 0; op < mcsop_last; op++) {
32080- count = atomic_long_read(&mcs_op_statistics[op].count);
32081- total = atomic_long_read(&mcs_op_statistics[op].total);
32082+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
32083+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
32084 max = mcs_op_statistics[op].max;
32085 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
32086 count ? total / count : 0, max);
32087diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h
32088--- linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
32089+++ linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
32090@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
32091 * GRU statistics.
32092 */
32093 struct gru_stats_s {
32094- atomic_long_t vdata_alloc;
32095- atomic_long_t vdata_free;
32096- atomic_long_t gts_alloc;
32097- atomic_long_t gts_free;
32098- atomic_long_t vdata_double_alloc;
32099- atomic_long_t gts_double_allocate;
32100- atomic_long_t assign_context;
32101- atomic_long_t assign_context_failed;
32102- atomic_long_t free_context;
32103- atomic_long_t load_user_context;
32104- atomic_long_t load_kernel_context;
32105- atomic_long_t lock_kernel_context;
32106- atomic_long_t unlock_kernel_context;
32107- atomic_long_t steal_user_context;
32108- atomic_long_t steal_kernel_context;
32109- atomic_long_t steal_context_failed;
32110- atomic_long_t nopfn;
32111- atomic_long_t break_cow;
32112- atomic_long_t asid_new;
32113- atomic_long_t asid_next;
32114- atomic_long_t asid_wrap;
32115- atomic_long_t asid_reuse;
32116- atomic_long_t intr;
32117- atomic_long_t intr_mm_lock_failed;
32118- atomic_long_t call_os;
32119- atomic_long_t call_os_offnode_reference;
32120- atomic_long_t call_os_check_for_bug;
32121- atomic_long_t call_os_wait_queue;
32122- atomic_long_t user_flush_tlb;
32123- atomic_long_t user_unload_context;
32124- atomic_long_t user_exception;
32125- atomic_long_t set_context_option;
32126- atomic_long_t migrate_check;
32127- atomic_long_t migrated_retarget;
32128- atomic_long_t migrated_unload;
32129- atomic_long_t migrated_unload_delay;
32130- atomic_long_t migrated_nopfn_retarget;
32131- atomic_long_t migrated_nopfn_unload;
32132- atomic_long_t tlb_dropin;
32133- atomic_long_t tlb_dropin_fail_no_asid;
32134- atomic_long_t tlb_dropin_fail_upm;
32135- atomic_long_t tlb_dropin_fail_invalid;
32136- atomic_long_t tlb_dropin_fail_range_active;
32137- atomic_long_t tlb_dropin_fail_idle;
32138- atomic_long_t tlb_dropin_fail_fmm;
32139- atomic_long_t tlb_dropin_fail_no_exception;
32140- atomic_long_t tlb_dropin_fail_no_exception_war;
32141- atomic_long_t tfh_stale_on_fault;
32142- atomic_long_t mmu_invalidate_range;
32143- atomic_long_t mmu_invalidate_page;
32144- atomic_long_t mmu_clear_flush_young;
32145- atomic_long_t flush_tlb;
32146- atomic_long_t flush_tlb_gru;
32147- atomic_long_t flush_tlb_gru_tgh;
32148- atomic_long_t flush_tlb_gru_zero_asid;
32149-
32150- atomic_long_t copy_gpa;
32151-
32152- atomic_long_t mesq_receive;
32153- atomic_long_t mesq_receive_none;
32154- atomic_long_t mesq_send;
32155- atomic_long_t mesq_send_failed;
32156- atomic_long_t mesq_noop;
32157- atomic_long_t mesq_send_unexpected_error;
32158- atomic_long_t mesq_send_lb_overflow;
32159- atomic_long_t mesq_send_qlimit_reached;
32160- atomic_long_t mesq_send_amo_nacked;
32161- atomic_long_t mesq_send_put_nacked;
32162- atomic_long_t mesq_qf_not_full;
32163- atomic_long_t mesq_qf_locked;
32164- atomic_long_t mesq_qf_noop_not_full;
32165- atomic_long_t mesq_qf_switch_head_failed;
32166- atomic_long_t mesq_qf_unexpected_error;
32167- atomic_long_t mesq_noop_unexpected_error;
32168- atomic_long_t mesq_noop_lb_overflow;
32169- atomic_long_t mesq_noop_qlimit_reached;
32170- atomic_long_t mesq_noop_amo_nacked;
32171- atomic_long_t mesq_noop_put_nacked;
32172+ atomic_long_unchecked_t vdata_alloc;
32173+ atomic_long_unchecked_t vdata_free;
32174+ atomic_long_unchecked_t gts_alloc;
32175+ atomic_long_unchecked_t gts_free;
32176+ atomic_long_unchecked_t vdata_double_alloc;
32177+ atomic_long_unchecked_t gts_double_allocate;
32178+ atomic_long_unchecked_t assign_context;
32179+ atomic_long_unchecked_t assign_context_failed;
32180+ atomic_long_unchecked_t free_context;
32181+ atomic_long_unchecked_t load_user_context;
32182+ atomic_long_unchecked_t load_kernel_context;
32183+ atomic_long_unchecked_t lock_kernel_context;
32184+ atomic_long_unchecked_t unlock_kernel_context;
32185+ atomic_long_unchecked_t steal_user_context;
32186+ atomic_long_unchecked_t steal_kernel_context;
32187+ atomic_long_unchecked_t steal_context_failed;
32188+ atomic_long_unchecked_t nopfn;
32189+ atomic_long_unchecked_t break_cow;
32190+ atomic_long_unchecked_t asid_new;
32191+ atomic_long_unchecked_t asid_next;
32192+ atomic_long_unchecked_t asid_wrap;
32193+ atomic_long_unchecked_t asid_reuse;
32194+ atomic_long_unchecked_t intr;
32195+ atomic_long_unchecked_t intr_mm_lock_failed;
32196+ atomic_long_unchecked_t call_os;
32197+ atomic_long_unchecked_t call_os_offnode_reference;
32198+ atomic_long_unchecked_t call_os_check_for_bug;
32199+ atomic_long_unchecked_t call_os_wait_queue;
32200+ atomic_long_unchecked_t user_flush_tlb;
32201+ atomic_long_unchecked_t user_unload_context;
32202+ atomic_long_unchecked_t user_exception;
32203+ atomic_long_unchecked_t set_context_option;
32204+ atomic_long_unchecked_t migrate_check;
32205+ atomic_long_unchecked_t migrated_retarget;
32206+ atomic_long_unchecked_t migrated_unload;
32207+ atomic_long_unchecked_t migrated_unload_delay;
32208+ atomic_long_unchecked_t migrated_nopfn_retarget;
32209+ atomic_long_unchecked_t migrated_nopfn_unload;
32210+ atomic_long_unchecked_t tlb_dropin;
32211+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
32212+ atomic_long_unchecked_t tlb_dropin_fail_upm;
32213+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
32214+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
32215+ atomic_long_unchecked_t tlb_dropin_fail_idle;
32216+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
32217+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
32218+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
32219+ atomic_long_unchecked_t tfh_stale_on_fault;
32220+ atomic_long_unchecked_t mmu_invalidate_range;
32221+ atomic_long_unchecked_t mmu_invalidate_page;
32222+ atomic_long_unchecked_t mmu_clear_flush_young;
32223+ atomic_long_unchecked_t flush_tlb;
32224+ atomic_long_unchecked_t flush_tlb_gru;
32225+ atomic_long_unchecked_t flush_tlb_gru_tgh;
32226+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
32227+
32228+ atomic_long_unchecked_t copy_gpa;
32229+
32230+ atomic_long_unchecked_t mesq_receive;
32231+ atomic_long_unchecked_t mesq_receive_none;
32232+ atomic_long_unchecked_t mesq_send;
32233+ atomic_long_unchecked_t mesq_send_failed;
32234+ atomic_long_unchecked_t mesq_noop;
32235+ atomic_long_unchecked_t mesq_send_unexpected_error;
32236+ atomic_long_unchecked_t mesq_send_lb_overflow;
32237+ atomic_long_unchecked_t mesq_send_qlimit_reached;
32238+ atomic_long_unchecked_t mesq_send_amo_nacked;
32239+ atomic_long_unchecked_t mesq_send_put_nacked;
32240+ atomic_long_unchecked_t mesq_qf_not_full;
32241+ atomic_long_unchecked_t mesq_qf_locked;
32242+ atomic_long_unchecked_t mesq_qf_noop_not_full;
32243+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
32244+ atomic_long_unchecked_t mesq_qf_unexpected_error;
32245+ atomic_long_unchecked_t mesq_noop_unexpected_error;
32246+ atomic_long_unchecked_t mesq_noop_lb_overflow;
32247+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
32248+ atomic_long_unchecked_t mesq_noop_amo_nacked;
32249+ atomic_long_unchecked_t mesq_noop_put_nacked;
32250
32251 };
32252
32253@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
32254 cchop_deallocate, tghop_invalidate, mcsop_last};
32255
32256 struct mcs_op_statistic {
32257- atomic_long_t count;
32258- atomic_long_t total;
32259+ atomic_long_unchecked_t count;
32260+ atomic_long_unchecked_t total;
32261 unsigned long max;
32262 };
32263
32264@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
32265
32266 #define STAT(id) do { \
32267 if (gru_options & OPT_STATS) \
32268- atomic_long_inc(&gru_stats.id); \
32269+ atomic_long_inc_unchecked(&gru_stats.id); \
32270 } while (0)
32271
32272 #ifdef CONFIG_SGI_GRU_DEBUG
32273diff -urNp linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c
32274--- linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
32275+++ linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
32276@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
32277 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
32278 unsigned long timeo = jiffies + HZ;
32279
32280+ pax_track_stack();
32281+
32282 /* Prevent setting state FL_SYNCING for chip in suspended state. */
32283 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
32284 goto sleep;
32285@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
32286 unsigned long initial_adr;
32287 int initial_len = len;
32288
32289+ pax_track_stack();
32290+
32291 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
32292 adr += chip->start;
32293 initial_adr = adr;
32294@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
32295 int retries = 3;
32296 int ret;
32297
32298+ pax_track_stack();
32299+
32300 adr += chip->start;
32301
32302 retry:
32303diff -urNp linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c
32304--- linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
32305+++ linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
32306@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
32307 unsigned long cmd_addr;
32308 struct cfi_private *cfi = map->fldrv_priv;
32309
32310+ pax_track_stack();
32311+
32312 adr += chip->start;
32313
32314 /* Ensure cmd read/writes are aligned. */
32315@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
32316 DECLARE_WAITQUEUE(wait, current);
32317 int wbufsize, z;
32318
32319+ pax_track_stack();
32320+
32321 /* M58LW064A requires bus alignment for buffer wriets -- saw */
32322 if (adr & (map_bankwidth(map)-1))
32323 return -EINVAL;
32324@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
32325 DECLARE_WAITQUEUE(wait, current);
32326 int ret = 0;
32327
32328+ pax_track_stack();
32329+
32330 adr += chip->start;
32331
32332 /* Let's determine this according to the interleave only once */
32333@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
32334 unsigned long timeo = jiffies + HZ;
32335 DECLARE_WAITQUEUE(wait, current);
32336
32337+ pax_track_stack();
32338+
32339 adr += chip->start;
32340
32341 /* Let's determine this according to the interleave only once */
32342@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
32343 unsigned long timeo = jiffies + HZ;
32344 DECLARE_WAITQUEUE(wait, current);
32345
32346+ pax_track_stack();
32347+
32348 adr += chip->start;
32349
32350 /* Let's determine this according to the interleave only once */
32351diff -urNp linux-2.6.32.43/drivers/mtd/devices/doc2000.c linux-2.6.32.43/drivers/mtd/devices/doc2000.c
32352--- linux-2.6.32.43/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
32353+++ linux-2.6.32.43/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
32354@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
32355
32356 /* The ECC will not be calculated correctly if less than 512 is written */
32357 /* DBB-
32358- if (len != 0x200 && eccbuf)
32359+ if (len != 0x200)
32360 printk(KERN_WARNING
32361 "ECC needs a full sector write (adr: %lx size %lx)\n",
32362 (long) to, (long) len);
32363diff -urNp linux-2.6.32.43/drivers/mtd/devices/doc2001.c linux-2.6.32.43/drivers/mtd/devices/doc2001.c
32364--- linux-2.6.32.43/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
32365+++ linux-2.6.32.43/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
32366@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
32367 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
32368
32369 /* Don't allow read past end of device */
32370- if (from >= this->totlen)
32371+ if (from >= this->totlen || !len)
32372 return -EINVAL;
32373
32374 /* Don't allow a single read to cross a 512-byte block boundary */
32375diff -urNp linux-2.6.32.43/drivers/mtd/ftl.c linux-2.6.32.43/drivers/mtd/ftl.c
32376--- linux-2.6.32.43/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
32377+++ linux-2.6.32.43/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
32378@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
32379 loff_t offset;
32380 uint16_t srcunitswap = cpu_to_le16(srcunit);
32381
32382+ pax_track_stack();
32383+
32384 eun = &part->EUNInfo[srcunit];
32385 xfer = &part->XferInfo[xferunit];
32386 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
32387diff -urNp linux-2.6.32.43/drivers/mtd/inftlcore.c linux-2.6.32.43/drivers/mtd/inftlcore.c
32388--- linux-2.6.32.43/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
32389+++ linux-2.6.32.43/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
32390@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
32391 struct inftl_oob oob;
32392 size_t retlen;
32393
32394+ pax_track_stack();
32395+
32396 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
32397 "pending=%d)\n", inftl, thisVUC, pendingblock);
32398
32399diff -urNp linux-2.6.32.43/drivers/mtd/inftlmount.c linux-2.6.32.43/drivers/mtd/inftlmount.c
32400--- linux-2.6.32.43/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
32401+++ linux-2.6.32.43/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
32402@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
32403 struct INFTLPartition *ip;
32404 size_t retlen;
32405
32406+ pax_track_stack();
32407+
32408 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
32409
32410 /*
32411diff -urNp linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c
32412--- linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
32413+++ linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
32414@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
32415 {
32416 map_word pfow_val[4];
32417
32418+ pax_track_stack();
32419+
32420 /* Check identification string */
32421 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
32422 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
32423diff -urNp linux-2.6.32.43/drivers/mtd/mtdchar.c linux-2.6.32.43/drivers/mtd/mtdchar.c
32424--- linux-2.6.32.43/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
32425+++ linux-2.6.32.43/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
32426@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
32427 u_long size;
32428 struct mtd_info_user info;
32429
32430+ pax_track_stack();
32431+
32432 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
32433
32434 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
32435diff -urNp linux-2.6.32.43/drivers/mtd/nftlcore.c linux-2.6.32.43/drivers/mtd/nftlcore.c
32436--- linux-2.6.32.43/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
32437+++ linux-2.6.32.43/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
32438@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
32439 int inplace = 1;
32440 size_t retlen;
32441
32442+ pax_track_stack();
32443+
32444 memset(BlockMap, 0xff, sizeof(BlockMap));
32445 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
32446
32447diff -urNp linux-2.6.32.43/drivers/mtd/nftlmount.c linux-2.6.32.43/drivers/mtd/nftlmount.c
32448--- linux-2.6.32.43/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
32449+++ linux-2.6.32.43/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
32450@@ -23,6 +23,7 @@
32451 #include <asm/errno.h>
32452 #include <linux/delay.h>
32453 #include <linux/slab.h>
32454+#include <linux/sched.h>
32455 #include <linux/mtd/mtd.h>
32456 #include <linux/mtd/nand.h>
32457 #include <linux/mtd/nftl.h>
32458@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
32459 struct mtd_info *mtd = nftl->mbd.mtd;
32460 unsigned int i;
32461
32462+ pax_track_stack();
32463+
32464 /* Assume logical EraseSize == physical erasesize for starting the scan.
32465 We'll sort it out later if we find a MediaHeader which says otherwise */
32466 /* Actually, we won't. The new DiskOnChip driver has already scanned
32467diff -urNp linux-2.6.32.43/drivers/mtd/ubi/build.c linux-2.6.32.43/drivers/mtd/ubi/build.c
32468--- linux-2.6.32.43/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
32469+++ linux-2.6.32.43/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
32470@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
32471 static int __init bytes_str_to_int(const char *str)
32472 {
32473 char *endp;
32474- unsigned long result;
32475+ unsigned long result, scale = 1;
32476
32477 result = simple_strtoul(str, &endp, 0);
32478 if (str == endp || result >= INT_MAX) {
32479@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
32480
32481 switch (*endp) {
32482 case 'G':
32483- result *= 1024;
32484+ scale *= 1024;
32485 case 'M':
32486- result *= 1024;
32487+ scale *= 1024;
32488 case 'K':
32489- result *= 1024;
32490+ scale *= 1024;
32491 if (endp[1] == 'i' && endp[2] == 'B')
32492 endp += 2;
32493 case '\0':
32494@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
32495 return -EINVAL;
32496 }
32497
32498- return result;
32499+ if ((intoverflow_t)result*scale >= INT_MAX) {
32500+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
32501+ str);
32502+ return -EINVAL;
32503+ }
32504+
32505+ return result*scale;
32506 }
32507
32508 /**
32509diff -urNp linux-2.6.32.43/drivers/net/bnx2.c linux-2.6.32.43/drivers/net/bnx2.c
32510--- linux-2.6.32.43/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
32511+++ linux-2.6.32.43/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
32512@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
32513 int rc = 0;
32514 u32 magic, csum;
32515
32516+ pax_track_stack();
32517+
32518 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
32519 goto test_nvram_done;
32520
32521diff -urNp linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c
32522--- linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
32523+++ linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
32524@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
32525 int i, addr, ret;
32526 struct t3_vpd vpd;
32527
32528+ pax_track_stack();
32529+
32530 /*
32531 * Card information is normally at VPD_BASE but some early cards had
32532 * it at 0.
32533diff -urNp linux-2.6.32.43/drivers/net/e1000e/82571.c linux-2.6.32.43/drivers/net/e1000e/82571.c
32534--- linux-2.6.32.43/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
32535+++ linux-2.6.32.43/drivers/net/e1000e/82571.c 2011-04-17 15:56:46.000000000 -0400
32536@@ -212,6 +212,7 @@ static s32 e1000_init_mac_params_82571(s
32537 {
32538 struct e1000_hw *hw = &adapter->hw;
32539 struct e1000_mac_info *mac = &hw->mac;
32540+ /* cannot be const */
32541 struct e1000_mac_operations *func = &mac->ops;
32542 u32 swsm = 0;
32543 u32 swsm2 = 0;
32544@@ -1656,7 +1657,7 @@ static void e1000_clear_hw_cntrs_82571(s
32545 temp = er32(ICRXDMTC);
32546 }
32547
32548-static struct e1000_mac_operations e82571_mac_ops = {
32549+static const struct e1000_mac_operations e82571_mac_ops = {
32550 /* .check_mng_mode: mac type dependent */
32551 /* .check_for_link: media type dependent */
32552 .id_led_init = e1000e_id_led_init,
32553@@ -1674,7 +1675,7 @@ static struct e1000_mac_operations e8257
32554 .setup_led = e1000e_setup_led_generic,
32555 };
32556
32557-static struct e1000_phy_operations e82_phy_ops_igp = {
32558+static const struct e1000_phy_operations e82_phy_ops_igp = {
32559 .acquire_phy = e1000_get_hw_semaphore_82571,
32560 .check_reset_block = e1000e_check_reset_block_generic,
32561 .commit_phy = NULL,
32562@@ -1691,7 +1692,7 @@ static struct e1000_phy_operations e82_p
32563 .cfg_on_link_up = NULL,
32564 };
32565
32566-static struct e1000_phy_operations e82_phy_ops_m88 = {
32567+static const struct e1000_phy_operations e82_phy_ops_m88 = {
32568 .acquire_phy = e1000_get_hw_semaphore_82571,
32569 .check_reset_block = e1000e_check_reset_block_generic,
32570 .commit_phy = e1000e_phy_sw_reset,
32571@@ -1708,7 +1709,7 @@ static struct e1000_phy_operations e82_p
32572 .cfg_on_link_up = NULL,
32573 };
32574
32575-static struct e1000_phy_operations e82_phy_ops_bm = {
32576+static const struct e1000_phy_operations e82_phy_ops_bm = {
32577 .acquire_phy = e1000_get_hw_semaphore_82571,
32578 .check_reset_block = e1000e_check_reset_block_generic,
32579 .commit_phy = e1000e_phy_sw_reset,
32580@@ -1725,7 +1726,7 @@ static struct e1000_phy_operations e82_p
32581 .cfg_on_link_up = NULL,
32582 };
32583
32584-static struct e1000_nvm_operations e82571_nvm_ops = {
32585+static const struct e1000_nvm_operations e82571_nvm_ops = {
32586 .acquire_nvm = e1000_acquire_nvm_82571,
32587 .read_nvm = e1000e_read_nvm_eerd,
32588 .release_nvm = e1000_release_nvm_82571,
32589diff -urNp linux-2.6.32.43/drivers/net/e1000e/e1000.h linux-2.6.32.43/drivers/net/e1000e/e1000.h
32590--- linux-2.6.32.43/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
32591+++ linux-2.6.32.43/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
32592@@ -375,9 +375,9 @@ struct e1000_info {
32593 u32 pba;
32594 u32 max_hw_frame_size;
32595 s32 (*get_variants)(struct e1000_adapter *);
32596- struct e1000_mac_operations *mac_ops;
32597- struct e1000_phy_operations *phy_ops;
32598- struct e1000_nvm_operations *nvm_ops;
32599+ const struct e1000_mac_operations *mac_ops;
32600+ const struct e1000_phy_operations *phy_ops;
32601+ const struct e1000_nvm_operations *nvm_ops;
32602 };
32603
32604 /* hardware capability, feature, and workaround flags */
32605diff -urNp linux-2.6.32.43/drivers/net/e1000e/es2lan.c linux-2.6.32.43/drivers/net/e1000e/es2lan.c
32606--- linux-2.6.32.43/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
32607+++ linux-2.6.32.43/drivers/net/e1000e/es2lan.c 2011-04-17 15:56:46.000000000 -0400
32608@@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_80003es
32609 {
32610 struct e1000_hw *hw = &adapter->hw;
32611 struct e1000_mac_info *mac = &hw->mac;
32612+ /* cannot be const */
32613 struct e1000_mac_operations *func = &mac->ops;
32614
32615 /* Set media type */
32616@@ -1365,7 +1366,7 @@ static void e1000_clear_hw_cntrs_80003es
32617 temp = er32(ICRXDMTC);
32618 }
32619
32620-static struct e1000_mac_operations es2_mac_ops = {
32621+static const struct e1000_mac_operations es2_mac_ops = {
32622 .id_led_init = e1000e_id_led_init,
32623 .check_mng_mode = e1000e_check_mng_mode_generic,
32624 /* check_for_link dependent on media type */
32625@@ -1383,7 +1384,7 @@ static struct e1000_mac_operations es2_m
32626 .setup_led = e1000e_setup_led_generic,
32627 };
32628
32629-static struct e1000_phy_operations es2_phy_ops = {
32630+static const struct e1000_phy_operations es2_phy_ops = {
32631 .acquire_phy = e1000_acquire_phy_80003es2lan,
32632 .check_reset_block = e1000e_check_reset_block_generic,
32633 .commit_phy = e1000e_phy_sw_reset,
32634@@ -1400,7 +1401,7 @@ static struct e1000_phy_operations es2_p
32635 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
32636 };
32637
32638-static struct e1000_nvm_operations es2_nvm_ops = {
32639+static const struct e1000_nvm_operations es2_nvm_ops = {
32640 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
32641 .read_nvm = e1000e_read_nvm_eerd,
32642 .release_nvm = e1000_release_nvm_80003es2lan,
32643diff -urNp linux-2.6.32.43/drivers/net/e1000e/hw.h linux-2.6.32.43/drivers/net/e1000e/hw.h
32644--- linux-2.6.32.43/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
32645+++ linux-2.6.32.43/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
32646@@ -756,34 +756,34 @@ struct e1000_mac_operations {
32647
32648 /* Function pointers for the PHY. */
32649 struct e1000_phy_operations {
32650- s32 (*acquire_phy)(struct e1000_hw *);
32651- s32 (*check_polarity)(struct e1000_hw *);
32652- s32 (*check_reset_block)(struct e1000_hw *);
32653- s32 (*commit_phy)(struct e1000_hw *);
32654- s32 (*force_speed_duplex)(struct e1000_hw *);
32655- s32 (*get_cfg_done)(struct e1000_hw *hw);
32656- s32 (*get_cable_length)(struct e1000_hw *);
32657- s32 (*get_phy_info)(struct e1000_hw *);
32658- s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
32659- s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32660- void (*release_phy)(struct e1000_hw *);
32661- s32 (*reset_phy)(struct e1000_hw *);
32662- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
32663- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
32664- s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
32665- s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32666- s32 (*cfg_on_link_up)(struct e1000_hw *);
32667+ s32 (* acquire_phy)(struct e1000_hw *);
32668+ s32 (* check_polarity)(struct e1000_hw *);
32669+ s32 (* check_reset_block)(struct e1000_hw *);
32670+ s32 (* commit_phy)(struct e1000_hw *);
32671+ s32 (* force_speed_duplex)(struct e1000_hw *);
32672+ s32 (* get_cfg_done)(struct e1000_hw *hw);
32673+ s32 (* get_cable_length)(struct e1000_hw *);
32674+ s32 (* get_phy_info)(struct e1000_hw *);
32675+ s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
32676+ s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
32677+ void (* release_phy)(struct e1000_hw *);
32678+ s32 (* reset_phy)(struct e1000_hw *);
32679+ s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
32680+ s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
32681+ s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
32682+ s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
32683+ s32 (* cfg_on_link_up)(struct e1000_hw *);
32684 };
32685
32686 /* Function pointers for the NVM. */
32687 struct e1000_nvm_operations {
32688- s32 (*acquire_nvm)(struct e1000_hw *);
32689- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32690- void (*release_nvm)(struct e1000_hw *);
32691- s32 (*update_nvm)(struct e1000_hw *);
32692- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
32693- s32 (*validate_nvm)(struct e1000_hw *);
32694- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32695+ s32 (* const acquire_nvm)(struct e1000_hw *);
32696+ s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
32697+ void (* const release_nvm)(struct e1000_hw *);
32698+ s32 (* const update_nvm)(struct e1000_hw *);
32699+ s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
32700+ s32 (* const validate_nvm)(struct e1000_hw *);
32701+ s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
32702 };
32703
32704 struct e1000_mac_info {
32705diff -urNp linux-2.6.32.43/drivers/net/e1000e/ich8lan.c linux-2.6.32.43/drivers/net/e1000e/ich8lan.c
32706--- linux-2.6.32.43/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
32707+++ linux-2.6.32.43/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:32.000000000 -0400
32708@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
32709 }
32710 }
32711
32712-static struct e1000_mac_operations ich8_mac_ops = {
32713+static const struct e1000_mac_operations ich8_mac_ops = {
32714 .id_led_init = e1000e_id_led_init,
32715 .check_mng_mode = e1000_check_mng_mode_ich8lan,
32716 .check_for_link = e1000_check_for_copper_link_ich8lan,
32717@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
32718 /* id_led_init dependent on mac type */
32719 };
32720
32721-static struct e1000_phy_operations ich8_phy_ops = {
32722+static const struct e1000_phy_operations ich8_phy_ops = {
32723 .acquire_phy = e1000_acquire_swflag_ich8lan,
32724 .check_reset_block = e1000_check_reset_block_ich8lan,
32725 .commit_phy = NULL,
32726@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
32727 .write_phy_reg = e1000e_write_phy_reg_igp,
32728 };
32729
32730-static struct e1000_nvm_operations ich8_nvm_ops = {
32731+static const struct e1000_nvm_operations ich8_nvm_ops = {
32732 .acquire_nvm = e1000_acquire_nvm_ich8lan,
32733 .read_nvm = e1000_read_nvm_ich8lan,
32734 .release_nvm = e1000_release_nvm_ich8lan,
32735diff -urNp linux-2.6.32.43/drivers/net/hamradio/6pack.c linux-2.6.32.43/drivers/net/hamradio/6pack.c
32736--- linux-2.6.32.43/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
32737+++ linux-2.6.32.43/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
32738@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
32739 unsigned char buf[512];
32740 int count1;
32741
32742+ pax_track_stack();
32743+
32744 if (!count)
32745 return;
32746
32747diff -urNp linux-2.6.32.43/drivers/net/ibmveth.c linux-2.6.32.43/drivers/net/ibmveth.c
32748--- linux-2.6.32.43/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
32749+++ linux-2.6.32.43/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
32750@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
32751 NULL,
32752 };
32753
32754-static struct sysfs_ops veth_pool_ops = {
32755+static const struct sysfs_ops veth_pool_ops = {
32756 .show = veth_pool_show,
32757 .store = veth_pool_store,
32758 };
32759diff -urNp linux-2.6.32.43/drivers/net/igb/e1000_82575.c linux-2.6.32.43/drivers/net/igb/e1000_82575.c
32760--- linux-2.6.32.43/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
32761+++ linux-2.6.32.43/drivers/net/igb/e1000_82575.c 2011-04-17 15:56:46.000000000 -0400
32762@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
32763 wr32(E1000_VT_CTL, vt_ctl);
32764 }
32765
32766-static struct e1000_mac_operations e1000_mac_ops_82575 = {
32767+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
32768 .reset_hw = igb_reset_hw_82575,
32769 .init_hw = igb_init_hw_82575,
32770 .check_for_link = igb_check_for_link_82575,
32771@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
32772 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
32773 };
32774
32775-static struct e1000_phy_operations e1000_phy_ops_82575 = {
32776+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
32777 .acquire = igb_acquire_phy_82575,
32778 .get_cfg_done = igb_get_cfg_done_82575,
32779 .release = igb_release_phy_82575,
32780 };
32781
32782-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32783+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
32784 .acquire = igb_acquire_nvm_82575,
32785 .read = igb_read_nvm_eerd,
32786 .release = igb_release_nvm_82575,
32787diff -urNp linux-2.6.32.43/drivers/net/igb/e1000_hw.h linux-2.6.32.43/drivers/net/igb/e1000_hw.h
32788--- linux-2.6.32.43/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
32789+++ linux-2.6.32.43/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
32790@@ -305,17 +305,17 @@ struct e1000_phy_operations {
32791 };
32792
32793 struct e1000_nvm_operations {
32794- s32 (*acquire)(struct e1000_hw *);
32795- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
32796- void (*release)(struct e1000_hw *);
32797- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
32798+ s32 (* const acquire)(struct e1000_hw *);
32799+ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
32800+ void (* const release)(struct e1000_hw *);
32801+ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
32802 };
32803
32804 struct e1000_info {
32805 s32 (*get_invariants)(struct e1000_hw *);
32806- struct e1000_mac_operations *mac_ops;
32807- struct e1000_phy_operations *phy_ops;
32808- struct e1000_nvm_operations *nvm_ops;
32809+ const struct e1000_mac_operations *mac_ops;
32810+ const struct e1000_phy_operations *phy_ops;
32811+ const struct e1000_nvm_operations *nvm_ops;
32812 };
32813
32814 extern const struct e1000_info e1000_82575_info;
32815diff -urNp linux-2.6.32.43/drivers/net/iseries_veth.c linux-2.6.32.43/drivers/net/iseries_veth.c
32816--- linux-2.6.32.43/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
32817+++ linux-2.6.32.43/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
32818@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
32819 NULL
32820 };
32821
32822-static struct sysfs_ops veth_cnx_sysfs_ops = {
32823+static const struct sysfs_ops veth_cnx_sysfs_ops = {
32824 .show = veth_cnx_attribute_show
32825 };
32826
32827@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
32828 NULL
32829 };
32830
32831-static struct sysfs_ops veth_port_sysfs_ops = {
32832+static const struct sysfs_ops veth_port_sysfs_ops = {
32833 .show = veth_port_attribute_show
32834 };
32835
32836diff -urNp linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c
32837--- linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
32838+++ linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
32839@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
32840 u32 rctl;
32841 int i;
32842
32843+ pax_track_stack();
32844+
32845 /* Check for Promiscuous and All Multicast modes */
32846
32847 rctl = IXGB_READ_REG(hw, RCTL);
32848diff -urNp linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c
32849--- linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
32850+++ linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
32851@@ -260,6 +260,9 @@ void __devinit
32852 ixgb_check_options(struct ixgb_adapter *adapter)
32853 {
32854 int bd = adapter->bd_number;
32855+
32856+ pax_track_stack();
32857+
32858 if (bd >= IXGB_MAX_NIC) {
32859 printk(KERN_NOTICE
32860 "Warning: no configuration for board #%i\n", bd);
32861diff -urNp linux-2.6.32.43/drivers/net/mlx4/main.c linux-2.6.32.43/drivers/net/mlx4/main.c
32862--- linux-2.6.32.43/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
32863+++ linux-2.6.32.43/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
32864@@ -38,6 +38,7 @@
32865 #include <linux/errno.h>
32866 #include <linux/pci.h>
32867 #include <linux/dma-mapping.h>
32868+#include <linux/sched.h>
32869
32870 #include <linux/mlx4/device.h>
32871 #include <linux/mlx4/doorbell.h>
32872@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
32873 u64 icm_size;
32874 int err;
32875
32876+ pax_track_stack();
32877+
32878 err = mlx4_QUERY_FW(dev);
32879 if (err) {
32880 if (err == -EACCES)
32881diff -urNp linux-2.6.32.43/drivers/net/niu.c linux-2.6.32.43/drivers/net/niu.c
32882--- linux-2.6.32.43/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
32883+++ linux-2.6.32.43/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
32884@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
32885 int i, num_irqs, err;
32886 u8 first_ldg;
32887
32888+ pax_track_stack();
32889+
32890 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
32891 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
32892 ldg_num_map[i] = first_ldg + i;
32893diff -urNp linux-2.6.32.43/drivers/net/pcnet32.c linux-2.6.32.43/drivers/net/pcnet32.c
32894--- linux-2.6.32.43/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
32895+++ linux-2.6.32.43/drivers/net/pcnet32.c 2011-04-17 15:56:46.000000000 -0400
32896@@ -79,7 +79,7 @@ static int cards_found;
32897 /*
32898 * VLB I/O addresses
32899 */
32900-static unsigned int pcnet32_portlist[] __initdata =
32901+static unsigned int pcnet32_portlist[] __devinitdata =
32902 { 0x300, 0x320, 0x340, 0x360, 0 };
32903
32904 static int pcnet32_debug = 0;
32905diff -urNp linux-2.6.32.43/drivers/net/tg3.h linux-2.6.32.43/drivers/net/tg3.h
32906--- linux-2.6.32.43/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
32907+++ linux-2.6.32.43/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
32908@@ -95,6 +95,7 @@
32909 #define CHIPREV_ID_5750_A0 0x4000
32910 #define CHIPREV_ID_5750_A1 0x4001
32911 #define CHIPREV_ID_5750_A3 0x4003
32912+#define CHIPREV_ID_5750_C1 0x4201
32913 #define CHIPREV_ID_5750_C2 0x4202
32914 #define CHIPREV_ID_5752_A0_HW 0x5000
32915 #define CHIPREV_ID_5752_A0 0x6000
32916diff -urNp linux-2.6.32.43/drivers/net/tulip/de2104x.c linux-2.6.32.43/drivers/net/tulip/de2104x.c
32917--- linux-2.6.32.43/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
32918+++ linux-2.6.32.43/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
32919@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
32920 struct de_srom_info_leaf *il;
32921 void *bufp;
32922
32923+ pax_track_stack();
32924+
32925 /* download entire eeprom */
32926 for (i = 0; i < DE_EEPROM_WORDS; i++)
32927 ((__le16 *)ee_data)[i] =
32928diff -urNp linux-2.6.32.43/drivers/net/tulip/de4x5.c linux-2.6.32.43/drivers/net/tulip/de4x5.c
32929--- linux-2.6.32.43/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
32930+++ linux-2.6.32.43/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
32931@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
32932 for (i=0; i<ETH_ALEN; i++) {
32933 tmp.addr[i] = dev->dev_addr[i];
32934 }
32935- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32936+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
32937 break;
32938
32939 case DE4X5_SET_HWADDR: /* Set the hardware address */
32940@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
32941 spin_lock_irqsave(&lp->lock, flags);
32942 memcpy(&statbuf, &lp->pktStats, ioc->len);
32943 spin_unlock_irqrestore(&lp->lock, flags);
32944- if (copy_to_user(ioc->data, &statbuf, ioc->len))
32945+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
32946 return -EFAULT;
32947 break;
32948 }
32949diff -urNp linux-2.6.32.43/drivers/net/usb/hso.c linux-2.6.32.43/drivers/net/usb/hso.c
32950--- linux-2.6.32.43/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
32951+++ linux-2.6.32.43/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
32952@@ -71,7 +71,7 @@
32953 #include <asm/byteorder.h>
32954 #include <linux/serial_core.h>
32955 #include <linux/serial.h>
32956-
32957+#include <asm/local.h>
32958
32959 #define DRIVER_VERSION "1.2"
32960 #define MOD_AUTHOR "Option Wireless"
32961@@ -258,7 +258,7 @@ struct hso_serial {
32962
32963 /* from usb_serial_port */
32964 struct tty_struct *tty;
32965- int open_count;
32966+ local_t open_count;
32967 spinlock_t serial_lock;
32968
32969 int (*write_data) (struct hso_serial *serial);
32970@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
32971 struct urb *urb;
32972
32973 urb = serial->rx_urb[0];
32974- if (serial->open_count > 0) {
32975+ if (local_read(&serial->open_count) > 0) {
32976 count = put_rxbuf_data(urb, serial);
32977 if (count == -1)
32978 return;
32979@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
32980 DUMP1(urb->transfer_buffer, urb->actual_length);
32981
32982 /* Anyone listening? */
32983- if (serial->open_count == 0)
32984+ if (local_read(&serial->open_count) == 0)
32985 return;
32986
32987 if (status == 0) {
32988@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
32989 spin_unlock_irq(&serial->serial_lock);
32990
32991 /* check for port already opened, if not set the termios */
32992- serial->open_count++;
32993- if (serial->open_count == 1) {
32994+ if (local_inc_return(&serial->open_count) == 1) {
32995 tty->low_latency = 1;
32996 serial->rx_state = RX_IDLE;
32997 /* Force default termio settings */
32998@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
32999 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
33000 if (result) {
33001 hso_stop_serial_device(serial->parent);
33002- serial->open_count--;
33003+ local_dec(&serial->open_count);
33004 kref_put(&serial->parent->ref, hso_serial_ref_free);
33005 }
33006 } else {
33007@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
33008
33009 /* reset the rts and dtr */
33010 /* do the actual close */
33011- serial->open_count--;
33012+ local_dec(&serial->open_count);
33013
33014- if (serial->open_count <= 0) {
33015- serial->open_count = 0;
33016+ if (local_read(&serial->open_count) <= 0) {
33017+ local_set(&serial->open_count, 0);
33018 spin_lock_irq(&serial->serial_lock);
33019 if (serial->tty == tty) {
33020 serial->tty->driver_data = NULL;
33021@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
33022
33023 /* the actual setup */
33024 spin_lock_irqsave(&serial->serial_lock, flags);
33025- if (serial->open_count)
33026+ if (local_read(&serial->open_count))
33027 _hso_serial_set_termios(tty, old);
33028 else
33029 tty->termios = old;
33030@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
33031 /* Start all serial ports */
33032 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
33033 if (serial_table[i] && (serial_table[i]->interface == iface)) {
33034- if (dev2ser(serial_table[i])->open_count) {
33035+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
33036 result =
33037 hso_start_serial_device(serial_table[i], GFP_NOIO);
33038 hso_kick_transmit(dev2ser(serial_table[i]));
33039diff -urNp linux-2.6.32.43/drivers/net/vxge/vxge-main.c linux-2.6.32.43/drivers/net/vxge/vxge-main.c
33040--- linux-2.6.32.43/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
33041+++ linux-2.6.32.43/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
33042@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
33043 struct sk_buff *completed[NR_SKB_COMPLETED];
33044 int more;
33045
33046+ pax_track_stack();
33047+
33048 do {
33049 more = 0;
33050 skb_ptr = completed;
33051@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
33052 u8 mtable[256] = {0}; /* CPU to vpath mapping */
33053 int index;
33054
33055+ pax_track_stack();
33056+
33057 /*
33058 * Filling
33059 * - itable with bucket numbers
33060diff -urNp linux-2.6.32.43/drivers/net/wan/cycx_x25.c linux-2.6.32.43/drivers/net/wan/cycx_x25.c
33061--- linux-2.6.32.43/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
33062+++ linux-2.6.32.43/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
33063@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
33064 unsigned char hex[1024],
33065 * phex = hex;
33066
33067+ pax_track_stack();
33068+
33069 if (len >= (sizeof(hex) / 2))
33070 len = (sizeof(hex) / 2) - 1;
33071
33072diff -urNp linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c
33073--- linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
33074+++ linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
33075@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
33076 int do_autopm = 1;
33077 DECLARE_COMPLETION_ONSTACK(notif_completion);
33078
33079+ pax_track_stack();
33080+
33081 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
33082 i2400m, ack, ack_size);
33083 BUG_ON(_ack == i2400m->bm_ack_buf);
33084diff -urNp linux-2.6.32.43/drivers/net/wireless/airo.c linux-2.6.32.43/drivers/net/wireless/airo.c
33085--- linux-2.6.32.43/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
33086+++ linux-2.6.32.43/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
33087@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
33088 BSSListElement * loop_net;
33089 BSSListElement * tmp_net;
33090
33091+ pax_track_stack();
33092+
33093 /* Blow away current list of scan results */
33094 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
33095 list_move_tail (&loop_net->list, &ai->network_free_list);
33096@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
33097 WepKeyRid wkr;
33098 int rc;
33099
33100+ pax_track_stack();
33101+
33102 memset( &mySsid, 0, sizeof( mySsid ) );
33103 kfree (ai->flash);
33104 ai->flash = NULL;
33105@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
33106 __le32 *vals = stats.vals;
33107 int len;
33108
33109+ pax_track_stack();
33110+
33111 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33112 return -ENOMEM;
33113 data = (struct proc_data *)file->private_data;
33114@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
33115 /* If doLoseSync is not 1, we won't do a Lose Sync */
33116 int doLoseSync = -1;
33117
33118+ pax_track_stack();
33119+
33120 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
33121 return -ENOMEM;
33122 data = (struct proc_data *)file->private_data;
33123@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
33124 int i;
33125 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
33126
33127+ pax_track_stack();
33128+
33129 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
33130 if (!qual)
33131 return -ENOMEM;
33132@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
33133 CapabilityRid cap_rid;
33134 __le32 *vals = stats_rid.vals;
33135
33136+ pax_track_stack();
33137+
33138 /* Get stats out of the card */
33139 clear_bit(JOB_WSTATS, &local->jobs);
33140 if (local->power.event) {
33141diff -urNp linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c
33142--- linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
33143+++ linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
33144@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
33145 unsigned int v;
33146 u64 tsf;
33147
33148+ pax_track_stack();
33149+
33150 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
33151 len += snprintf(buf+len, sizeof(buf)-len,
33152 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
33153@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
33154 unsigned int len = 0;
33155 unsigned int i;
33156
33157+ pax_track_stack();
33158+
33159 len += snprintf(buf+len, sizeof(buf)-len,
33160 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
33161
33162diff -urNp linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c
33163--- linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
33164+++ linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
33165@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
33166 char buf[512];
33167 unsigned int len = 0;
33168
33169+ pax_track_stack();
33170+
33171 len += snprintf(buf + len, sizeof(buf) - len,
33172 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
33173 len += snprintf(buf + len, sizeof(buf) - len,
33174@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
33175 int i;
33176 u8 addr[ETH_ALEN];
33177
33178+ pax_track_stack();
33179+
33180 len += snprintf(buf + len, sizeof(buf) - len,
33181 "primary: %s (%s chan=%d ht=%d)\n",
33182 wiphy_name(sc->pri_wiphy->hw->wiphy),
33183diff -urNp linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c
33184--- linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33185+++ linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33186@@ -43,7 +43,7 @@ static struct dentry *rootdir;
33187 struct b43_debugfs_fops {
33188 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
33189 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
33190- struct file_operations fops;
33191+ const struct file_operations fops;
33192 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
33193 size_t file_struct_offset;
33194 };
33195diff -urNp linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c
33196--- linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33197+++ linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33198@@ -44,7 +44,7 @@ static struct dentry *rootdir;
33199 struct b43legacy_debugfs_fops {
33200 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
33201 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
33202- struct file_operations fops;
33203+ const struct file_operations fops;
33204 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
33205 size_t file_struct_offset;
33206 /* Take wl->irq_lock before calling read/write? */
33207diff -urNp linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c
33208--- linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
33209+++ linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
33210@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
33211 int err;
33212 DECLARE_SSID_BUF(ssid);
33213
33214+ pax_track_stack();
33215+
33216 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
33217
33218 if (ssid_len)
33219@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
33220 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
33221 int err;
33222
33223+ pax_track_stack();
33224+
33225 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
33226 idx, keylen, len);
33227
33228diff -urNp linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c
33229--- linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
33230+++ linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
33231@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
33232 unsigned long flags;
33233 DECLARE_SSID_BUF(ssid);
33234
33235+ pax_track_stack();
33236+
33237 LIBIPW_DEBUG_SCAN("'%s' (%pM"
33238 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
33239 print_ssid(ssid, info_element->data, info_element->len),
33240diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c
33241--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
33242+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
33243@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
33244 },
33245 };
33246
33247-static struct iwl_ops iwl1000_ops = {
33248+static const struct iwl_ops iwl1000_ops = {
33249 .ucode = &iwl5000_ucode,
33250 .lib = &iwl1000_lib,
33251 .hcmd = &iwl5000_hcmd,
33252diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c
33253--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
33254+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
33255@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
33256 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
33257 };
33258
33259-static struct iwl_ops iwl3945_ops = {
33260+static const struct iwl_ops iwl3945_ops = {
33261 .ucode = &iwl3945_ucode,
33262 .lib = &iwl3945_lib,
33263 .hcmd = &iwl3945_hcmd,
33264diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c
33265--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
33266+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
33267@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
33268 },
33269 };
33270
33271-static struct iwl_ops iwl4965_ops = {
33272+static const struct iwl_ops iwl4965_ops = {
33273 .ucode = &iwl4965_ucode,
33274 .lib = &iwl4965_lib,
33275 .hcmd = &iwl4965_hcmd,
33276diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c
33277--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
33278+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
33279@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
33280 },
33281 };
33282
33283-struct iwl_ops iwl5000_ops = {
33284+const struct iwl_ops iwl5000_ops = {
33285 .ucode = &iwl5000_ucode,
33286 .lib = &iwl5000_lib,
33287 .hcmd = &iwl5000_hcmd,
33288 .utils = &iwl5000_hcmd_utils,
33289 };
33290
33291-static struct iwl_ops iwl5150_ops = {
33292+static const struct iwl_ops iwl5150_ops = {
33293 .ucode = &iwl5000_ucode,
33294 .lib = &iwl5150_lib,
33295 .hcmd = &iwl5000_hcmd,
33296diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c
33297--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
33298+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
33299@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
33300 .calc_rssi = iwl5000_calc_rssi,
33301 };
33302
33303-static struct iwl_ops iwl6000_ops = {
33304+static const struct iwl_ops iwl6000_ops = {
33305 .ucode = &iwl5000_ucode,
33306 .lib = &iwl6000_lib,
33307 .hcmd = &iwl5000_hcmd,
33308diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
33309--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
33310+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
33311@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
33312 u8 active_index = 0;
33313 s32 tpt = 0;
33314
33315+ pax_track_stack();
33316+
33317 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
33318
33319 if (!ieee80211_is_data(hdr->frame_control) ||
33320@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
33321 u8 valid_tx_ant = 0;
33322 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
33323
33324+ pax_track_stack();
33325+
33326 /* Override starting rate (index 0) if needed for debug purposes */
33327 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
33328
33329diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c
33330--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
33331+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
33332@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
33333 int pos = 0;
33334 const size_t bufsz = sizeof(buf);
33335
33336+ pax_track_stack();
33337+
33338 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
33339 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
33340 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
33341@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
33342 const size_t bufsz = sizeof(buf);
33343 ssize_t ret;
33344
33345+ pax_track_stack();
33346+
33347 for (i = 0; i < AC_NUM; i++) {
33348 pos += scnprintf(buf + pos, bufsz - pos,
33349 "\tcw_min\tcw_max\taifsn\ttxop\n");
33350diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h
33351--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
33352+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
33353@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
33354 #endif
33355
33356 #else
33357-#define IWL_DEBUG(__priv, level, fmt, args...)
33358-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
33359+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
33360+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
33361 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
33362 void *p, u32 len)
33363 {}
33364diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h
33365--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
33366+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
33367@@ -68,7 +68,7 @@ struct iwl_tx_queue;
33368
33369 /* shared structures from iwl-5000.c */
33370 extern struct iwl_mod_params iwl50_mod_params;
33371-extern struct iwl_ops iwl5000_ops;
33372+extern const struct iwl_ops iwl5000_ops;
33373 extern struct iwl_ucode_ops iwl5000_ucode;
33374 extern struct iwl_lib_ops iwl5000_lib;
33375 extern struct iwl_hcmd_ops iwl5000_hcmd;
33376diff -urNp linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c
33377--- linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33378+++ linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
33379@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
33380 int buf_len = 512;
33381 size_t len = 0;
33382
33383+ pax_track_stack();
33384+
33385 if (*ppos != 0)
33386 return 0;
33387 if (count < sizeof(buf))
33388diff -urNp linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c
33389--- linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
33390+++ linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
33391@@ -708,7 +708,7 @@ out_unlock:
33392 struct lbs_debugfs_files {
33393 const char *name;
33394 int perm;
33395- struct file_operations fops;
33396+ const struct file_operations fops;
33397 };
33398
33399 static const struct lbs_debugfs_files debugfs_files[] = {
33400diff -urNp linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c
33401--- linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
33402+++ linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
33403@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
33404
33405 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
33406
33407- if (rts_threshold < 0 || rts_threshold > 2347)
33408+ if (rts_threshold > 2347)
33409 rts_threshold = 2347;
33410
33411 tmp = cpu_to_le32(rts_threshold);
33412diff -urNp linux-2.6.32.43/drivers/oprofile/buffer_sync.c linux-2.6.32.43/drivers/oprofile/buffer_sync.c
33413--- linux-2.6.32.43/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
33414+++ linux-2.6.32.43/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
33415@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
33416 if (cookie == NO_COOKIE)
33417 offset = pc;
33418 if (cookie == INVALID_COOKIE) {
33419- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33420+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33421 offset = pc;
33422 }
33423 if (cookie != last_cookie) {
33424@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
33425 /* add userspace sample */
33426
33427 if (!mm) {
33428- atomic_inc(&oprofile_stats.sample_lost_no_mm);
33429+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
33430 return 0;
33431 }
33432
33433 cookie = lookup_dcookie(mm, s->eip, &offset);
33434
33435 if (cookie == INVALID_COOKIE) {
33436- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
33437+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
33438 return 0;
33439 }
33440
33441@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
33442 /* ignore backtraces if failed to add a sample */
33443 if (state == sb_bt_start) {
33444 state = sb_bt_ignore;
33445- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
33446+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
33447 }
33448 }
33449 release_mm(mm);
33450diff -urNp linux-2.6.32.43/drivers/oprofile/event_buffer.c linux-2.6.32.43/drivers/oprofile/event_buffer.c
33451--- linux-2.6.32.43/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
33452+++ linux-2.6.32.43/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
33453@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
33454 }
33455
33456 if (buffer_pos == buffer_size) {
33457- atomic_inc(&oprofile_stats.event_lost_overflow);
33458+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
33459 return;
33460 }
33461
33462diff -urNp linux-2.6.32.43/drivers/oprofile/oprof.c linux-2.6.32.43/drivers/oprofile/oprof.c
33463--- linux-2.6.32.43/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
33464+++ linux-2.6.32.43/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
33465@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
33466 if (oprofile_ops.switch_events())
33467 return;
33468
33469- atomic_inc(&oprofile_stats.multiplex_counter);
33470+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
33471 start_switch_worker();
33472 }
33473
33474diff -urNp linux-2.6.32.43/drivers/oprofile/oprofilefs.c linux-2.6.32.43/drivers/oprofile/oprofilefs.c
33475--- linux-2.6.32.43/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
33476+++ linux-2.6.32.43/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
33477@@ -187,7 +187,7 @@ static const struct file_operations atom
33478
33479
33480 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
33481- char const *name, atomic_t *val)
33482+ char const *name, atomic_unchecked_t *val)
33483 {
33484 struct dentry *d = __oprofilefs_create_file(sb, root, name,
33485 &atomic_ro_fops, 0444);
33486diff -urNp linux-2.6.32.43/drivers/oprofile/oprofile_stats.c linux-2.6.32.43/drivers/oprofile/oprofile_stats.c
33487--- linux-2.6.32.43/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
33488+++ linux-2.6.32.43/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
33489@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
33490 cpu_buf->sample_invalid_eip = 0;
33491 }
33492
33493- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
33494- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
33495- atomic_set(&oprofile_stats.event_lost_overflow, 0);
33496- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
33497- atomic_set(&oprofile_stats.multiplex_counter, 0);
33498+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
33499+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
33500+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
33501+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
33502+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
33503 }
33504
33505
33506diff -urNp linux-2.6.32.43/drivers/oprofile/oprofile_stats.h linux-2.6.32.43/drivers/oprofile/oprofile_stats.h
33507--- linux-2.6.32.43/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
33508+++ linux-2.6.32.43/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
33509@@ -13,11 +13,11 @@
33510 #include <asm/atomic.h>
33511
33512 struct oprofile_stat_struct {
33513- atomic_t sample_lost_no_mm;
33514- atomic_t sample_lost_no_mapping;
33515- atomic_t bt_lost_no_mapping;
33516- atomic_t event_lost_overflow;
33517- atomic_t multiplex_counter;
33518+ atomic_unchecked_t sample_lost_no_mm;
33519+ atomic_unchecked_t sample_lost_no_mapping;
33520+ atomic_unchecked_t bt_lost_no_mapping;
33521+ atomic_unchecked_t event_lost_overflow;
33522+ atomic_unchecked_t multiplex_counter;
33523 };
33524
33525 extern struct oprofile_stat_struct oprofile_stats;
33526diff -urNp linux-2.6.32.43/drivers/parisc/pdc_stable.c linux-2.6.32.43/drivers/parisc/pdc_stable.c
33527--- linux-2.6.32.43/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
33528+++ linux-2.6.32.43/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
33529@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
33530 return ret;
33531 }
33532
33533-static struct sysfs_ops pdcspath_attr_ops = {
33534+static const struct sysfs_ops pdcspath_attr_ops = {
33535 .show = pdcspath_attr_show,
33536 .store = pdcspath_attr_store,
33537 };
33538diff -urNp linux-2.6.32.43/drivers/parport/procfs.c linux-2.6.32.43/drivers/parport/procfs.c
33539--- linux-2.6.32.43/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
33540+++ linux-2.6.32.43/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
33541@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
33542
33543 *ppos += len;
33544
33545- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
33546+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
33547 }
33548
33549 #ifdef CONFIG_PARPORT_1284
33550@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
33551
33552 *ppos += len;
33553
33554- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
33555+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
33556 }
33557 #endif /* IEEE1284.3 support. */
33558
33559diff -urNp linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c
33560--- linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
33561+++ linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
33562@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
33563 }
33564
33565
33566-static struct acpi_dock_ops acpiphp_dock_ops = {
33567+static const struct acpi_dock_ops acpiphp_dock_ops = {
33568 .handler = handle_hotplug_event_func,
33569 };
33570
33571diff -urNp linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c
33572--- linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
33573+++ linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
33574@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
33575
33576 void compaq_nvram_init (void __iomem *rom_start)
33577 {
33578+
33579+#ifndef CONFIG_PAX_KERNEXEC
33580 if (rom_start) {
33581 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
33582 }
33583+#endif
33584+
33585 dbg("int15 entry = %p\n", compaq_int15_entry_point);
33586
33587 /* initialize our int15 lock */
33588diff -urNp linux-2.6.32.43/drivers/pci/hotplug/fakephp.c linux-2.6.32.43/drivers/pci/hotplug/fakephp.c
33589--- linux-2.6.32.43/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
33590+++ linux-2.6.32.43/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
33591@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
33592 }
33593
33594 static struct kobj_type legacy_ktype = {
33595- .sysfs_ops = &(struct sysfs_ops){
33596+ .sysfs_ops = &(const struct sysfs_ops){
33597 .store = legacy_store, .show = legacy_show
33598 },
33599 .release = &legacy_release,
33600diff -urNp linux-2.6.32.43/drivers/pci/intel-iommu.c linux-2.6.32.43/drivers/pci/intel-iommu.c
33601--- linux-2.6.32.43/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
33602+++ linux-2.6.32.43/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
33603@@ -2643,7 +2643,7 @@ error:
33604 return 0;
33605 }
33606
33607-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
33608+dma_addr_t intel_map_page(struct device *dev, struct page *page,
33609 unsigned long offset, size_t size,
33610 enum dma_data_direction dir,
33611 struct dma_attrs *attrs)
33612@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
33613 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
33614 }
33615
33616-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33617+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
33618 size_t size, enum dma_data_direction dir,
33619 struct dma_attrs *attrs)
33620 {
33621@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
33622 }
33623 }
33624
33625-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
33626+void *intel_alloc_coherent(struct device *hwdev, size_t size,
33627 dma_addr_t *dma_handle, gfp_t flags)
33628 {
33629 void *vaddr;
33630@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
33631 return NULL;
33632 }
33633
33634-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33635+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
33636 dma_addr_t dma_handle)
33637 {
33638 int order;
33639@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
33640 free_pages((unsigned long)vaddr, order);
33641 }
33642
33643-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33644+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
33645 int nelems, enum dma_data_direction dir,
33646 struct dma_attrs *attrs)
33647 {
33648@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
33649 return nelems;
33650 }
33651
33652-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33653+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
33654 enum dma_data_direction dir, struct dma_attrs *attrs)
33655 {
33656 int i;
33657@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
33658 return nelems;
33659 }
33660
33661-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33662+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
33663 {
33664 return !dma_addr;
33665 }
33666
33667-struct dma_map_ops intel_dma_ops = {
33668+const struct dma_map_ops intel_dma_ops = {
33669 .alloc_coherent = intel_alloc_coherent,
33670 .free_coherent = intel_free_coherent,
33671 .map_sg = intel_map_sg,
33672diff -urNp linux-2.6.32.43/drivers/pci/pcie/aspm.c linux-2.6.32.43/drivers/pci/pcie/aspm.c
33673--- linux-2.6.32.43/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
33674+++ linux-2.6.32.43/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
33675@@ -27,9 +27,9 @@
33676 #define MODULE_PARAM_PREFIX "pcie_aspm."
33677
33678 /* Note: those are not register definitions */
33679-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
33680-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
33681-#define ASPM_STATE_L1 (4) /* L1 state */
33682+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
33683+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
33684+#define ASPM_STATE_L1 (4U) /* L1 state */
33685 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
33686 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
33687
33688diff -urNp linux-2.6.32.43/drivers/pci/probe.c linux-2.6.32.43/drivers/pci/probe.c
33689--- linux-2.6.32.43/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
33690+++ linux-2.6.32.43/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
33691@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
33692 return ret;
33693 }
33694
33695-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
33696+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
33697 struct device_attribute *attr,
33698 char *buf)
33699 {
33700 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
33701 }
33702
33703-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
33704+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
33705 struct device_attribute *attr,
33706 char *buf)
33707 {
33708diff -urNp linux-2.6.32.43/drivers/pci/proc.c linux-2.6.32.43/drivers/pci/proc.c
33709--- linux-2.6.32.43/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
33710+++ linux-2.6.32.43/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
33711@@ -480,7 +480,16 @@ static const struct file_operations proc
33712 static int __init pci_proc_init(void)
33713 {
33714 struct pci_dev *dev = NULL;
33715+
33716+#ifdef CONFIG_GRKERNSEC_PROC_ADD
33717+#ifdef CONFIG_GRKERNSEC_PROC_USER
33718+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
33719+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
33720+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
33721+#endif
33722+#else
33723 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
33724+#endif
33725 proc_create("devices", 0, proc_bus_pci_dir,
33726 &proc_bus_pci_dev_operations);
33727 proc_initialized = 1;
33728diff -urNp linux-2.6.32.43/drivers/pci/slot.c linux-2.6.32.43/drivers/pci/slot.c
33729--- linux-2.6.32.43/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
33730+++ linux-2.6.32.43/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
33731@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
33732 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
33733 }
33734
33735-static struct sysfs_ops pci_slot_sysfs_ops = {
33736+static const struct sysfs_ops pci_slot_sysfs_ops = {
33737 .show = pci_slot_attr_show,
33738 .store = pci_slot_attr_store,
33739 };
33740diff -urNp linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c
33741--- linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
33742+++ linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
33743@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
33744 return -EFAULT;
33745 }
33746 }
33747- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33748+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
33749 if (!buf)
33750 return -ENOMEM;
33751
33752diff -urNp linux-2.6.32.43/drivers/platform/x86/acer-wmi.c linux-2.6.32.43/drivers/platform/x86/acer-wmi.c
33753--- linux-2.6.32.43/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
33754+++ linux-2.6.32.43/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
33755@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
33756 return 0;
33757 }
33758
33759-static struct backlight_ops acer_bl_ops = {
33760+static const struct backlight_ops acer_bl_ops = {
33761 .get_brightness = read_brightness,
33762 .update_status = update_bl_status,
33763 };
33764diff -urNp linux-2.6.32.43/drivers/platform/x86/asus_acpi.c linux-2.6.32.43/drivers/platform/x86/asus_acpi.c
33765--- linux-2.6.32.43/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
33766+++ linux-2.6.32.43/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
33767@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
33768 return 0;
33769 }
33770
33771-static struct backlight_ops asus_backlight_data = {
33772+static const struct backlight_ops asus_backlight_data = {
33773 .get_brightness = read_brightness,
33774 .update_status = set_brightness_status,
33775 };
33776diff -urNp linux-2.6.32.43/drivers/platform/x86/asus-laptop.c linux-2.6.32.43/drivers/platform/x86/asus-laptop.c
33777--- linux-2.6.32.43/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
33778+++ linux-2.6.32.43/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
33779@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
33780 */
33781 static int read_brightness(struct backlight_device *bd);
33782 static int update_bl_status(struct backlight_device *bd);
33783-static struct backlight_ops asusbl_ops = {
33784+static const struct backlight_ops asusbl_ops = {
33785 .get_brightness = read_brightness,
33786 .update_status = update_bl_status,
33787 };
33788diff -urNp linux-2.6.32.43/drivers/platform/x86/compal-laptop.c linux-2.6.32.43/drivers/platform/x86/compal-laptop.c
33789--- linux-2.6.32.43/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
33790+++ linux-2.6.32.43/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
33791@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
33792 return set_lcd_level(b->props.brightness);
33793 }
33794
33795-static struct backlight_ops compalbl_ops = {
33796+static const struct backlight_ops compalbl_ops = {
33797 .get_brightness = bl_get_brightness,
33798 .update_status = bl_update_status,
33799 };
33800diff -urNp linux-2.6.32.43/drivers/platform/x86/dell-laptop.c linux-2.6.32.43/drivers/platform/x86/dell-laptop.c
33801--- linux-2.6.32.43/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
33802+++ linux-2.6.32.43/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
33803@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
33804 return buffer.output[1];
33805 }
33806
33807-static struct backlight_ops dell_ops = {
33808+static const struct backlight_ops dell_ops = {
33809 .get_brightness = dell_get_intensity,
33810 .update_status = dell_send_intensity,
33811 };
33812diff -urNp linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c
33813--- linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
33814+++ linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
33815@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
33816 */
33817 static int read_brightness(struct backlight_device *bd);
33818 static int update_bl_status(struct backlight_device *bd);
33819-static struct backlight_ops eeepcbl_ops = {
33820+static const struct backlight_ops eeepcbl_ops = {
33821 .get_brightness = read_brightness,
33822 .update_status = update_bl_status,
33823 };
33824diff -urNp linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c
33825--- linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
33826+++ linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
33827@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
33828 return ret;
33829 }
33830
33831-static struct backlight_ops fujitsubl_ops = {
33832+static const struct backlight_ops fujitsubl_ops = {
33833 .get_brightness = bl_get_brightness,
33834 .update_status = bl_update_status,
33835 };
33836diff -urNp linux-2.6.32.43/drivers/platform/x86/msi-laptop.c linux-2.6.32.43/drivers/platform/x86/msi-laptop.c
33837--- linux-2.6.32.43/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
33838+++ linux-2.6.32.43/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
33839@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
33840 return set_lcd_level(b->props.brightness);
33841 }
33842
33843-static struct backlight_ops msibl_ops = {
33844+static const struct backlight_ops msibl_ops = {
33845 .get_brightness = bl_get_brightness,
33846 .update_status = bl_update_status,
33847 };
33848diff -urNp linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c
33849--- linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
33850+++ linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
33851@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
33852 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
33853 }
33854
33855-static struct backlight_ops pcc_backlight_ops = {
33856+static const struct backlight_ops pcc_backlight_ops = {
33857 .get_brightness = bl_get,
33858 .update_status = bl_set_status,
33859 };
33860diff -urNp linux-2.6.32.43/drivers/platform/x86/sony-laptop.c linux-2.6.32.43/drivers/platform/x86/sony-laptop.c
33861--- linux-2.6.32.43/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
33862+++ linux-2.6.32.43/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
33863@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
33864 }
33865
33866 static struct backlight_device *sony_backlight_device;
33867-static struct backlight_ops sony_backlight_ops = {
33868+static const struct backlight_ops sony_backlight_ops = {
33869 .update_status = sony_backlight_update_status,
33870 .get_brightness = sony_backlight_get_brightness,
33871 };
33872diff -urNp linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c
33873--- linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
33874+++ linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c 2011-04-17 15:56:46.000000000 -0400
33875@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
33876 BACKLIGHT_UPDATE_HOTKEY);
33877 }
33878
33879-static struct backlight_ops ibm_backlight_data = {
33880+static const struct backlight_ops ibm_backlight_data = {
33881 .get_brightness = brightness_get,
33882 .update_status = brightness_update_status,
33883 };
33884diff -urNp linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c
33885--- linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
33886+++ linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
33887@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
33888 return AE_OK;
33889 }
33890
33891-static struct backlight_ops toshiba_backlight_data = {
33892+static const struct backlight_ops toshiba_backlight_data = {
33893 .get_brightness = get_lcd,
33894 .update_status = set_lcd_status,
33895 };
33896diff -urNp linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c
33897--- linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
33898+++ linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
33899@@ -60,7 +60,7 @@ do { \
33900 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
33901 } while(0)
33902
33903-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
33904+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
33905 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
33906
33907 /*
33908@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
33909
33910 cpu = get_cpu();
33911 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
33912+
33913+ pax_open_kernel();
33914 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
33915+ pax_close_kernel();
33916
33917 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
33918 spin_lock_irqsave(&pnp_bios_lock, flags);
33919@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
33920 :"memory");
33921 spin_unlock_irqrestore(&pnp_bios_lock, flags);
33922
33923+ pax_open_kernel();
33924 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
33925+ pax_close_kernel();
33926+
33927 put_cpu();
33928
33929 /* If we get here and this is set then the PnP BIOS faulted on us. */
33930@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
33931 return status;
33932 }
33933
33934-void pnpbios_calls_init(union pnp_bios_install_struct *header)
33935+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
33936 {
33937 int i;
33938
33939@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
33940 pnp_bios_callpoint.offset = header->fields.pm16offset;
33941 pnp_bios_callpoint.segment = PNP_CS16;
33942
33943+ pax_open_kernel();
33944+
33945 for_each_possible_cpu(i) {
33946 struct desc_struct *gdt = get_cpu_gdt_table(i);
33947 if (!gdt)
33948@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
33949 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
33950 (unsigned long)__va(header->fields.pm16dseg));
33951 }
33952+
33953+ pax_close_kernel();
33954 }
33955diff -urNp linux-2.6.32.43/drivers/pnp/resource.c linux-2.6.32.43/drivers/pnp/resource.c
33956--- linux-2.6.32.43/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
33957+++ linux-2.6.32.43/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
33958@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
33959 return 1;
33960
33961 /* check if the resource is valid */
33962- if (*irq < 0 || *irq > 15)
33963+ if (*irq > 15)
33964 return 0;
33965
33966 /* check if the resource is reserved */
33967@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
33968 return 1;
33969
33970 /* check if the resource is valid */
33971- if (*dma < 0 || *dma == 4 || *dma > 7)
33972+ if (*dma == 4 || *dma > 7)
33973 return 0;
33974
33975 /* check if the resource is reserved */
33976diff -urNp linux-2.6.32.43/drivers/rtc/rtc-dev.c linux-2.6.32.43/drivers/rtc/rtc-dev.c
33977--- linux-2.6.32.43/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
33978+++ linux-2.6.32.43/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
33979@@ -14,6 +14,7 @@
33980 #include <linux/module.h>
33981 #include <linux/rtc.h>
33982 #include <linux/sched.h>
33983+#include <linux/grsecurity.h>
33984 #include "rtc-core.h"
33985
33986 static dev_t rtc_devt;
33987@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
33988 if (copy_from_user(&tm, uarg, sizeof(tm)))
33989 return -EFAULT;
33990
33991+ gr_log_timechange();
33992+
33993 return rtc_set_time(rtc, &tm);
33994
33995 case RTC_PIE_ON:
33996diff -urNp linux-2.6.32.43/drivers/s390/cio/qdio_perf.c linux-2.6.32.43/drivers/s390/cio/qdio_perf.c
33997--- linux-2.6.32.43/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
33998+++ linux-2.6.32.43/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
33999@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
34000 static int qdio_perf_proc_show(struct seq_file *m, void *v)
34001 {
34002 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
34003- (long)atomic_long_read(&perf_stats.qdio_int));
34004+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
34005 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
34006- (long)atomic_long_read(&perf_stats.pci_int));
34007+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
34008 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
34009- (long)atomic_long_read(&perf_stats.thin_int));
34010+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
34011 seq_printf(m, "\n");
34012 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
34013- (long)atomic_long_read(&perf_stats.tasklet_inbound));
34014+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
34015 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
34016- (long)atomic_long_read(&perf_stats.tasklet_outbound));
34017+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
34018 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
34019- (long)atomic_long_read(&perf_stats.tasklet_thinint),
34020- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
34021+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
34022+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
34023 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
34024- (long)atomic_long_read(&perf_stats.thinint_inbound),
34025- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
34026+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
34027+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
34028 seq_printf(m, "\n");
34029 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
34030- (long)atomic_long_read(&perf_stats.siga_in));
34031+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
34032 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
34033- (long)atomic_long_read(&perf_stats.siga_out));
34034+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
34035 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
34036- (long)atomic_long_read(&perf_stats.siga_sync));
34037+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
34038 seq_printf(m, "\n");
34039 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
34040- (long)atomic_long_read(&perf_stats.inbound_handler));
34041+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
34042 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
34043- (long)atomic_long_read(&perf_stats.outbound_handler));
34044+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
34045 seq_printf(m, "\n");
34046 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
34047- (long)atomic_long_read(&perf_stats.fast_requeue));
34048+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
34049 seq_printf(m, "Number of outbound target full condition\t: %li\n",
34050- (long)atomic_long_read(&perf_stats.outbound_target_full));
34051+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
34052 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
34053- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
34054+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
34055 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
34056- (long)atomic_long_read(&perf_stats.debug_stop_polling));
34057+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
34058 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
34059- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
34060+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
34061 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
34062- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
34063- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
34064+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
34065+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
34066 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
34067- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
34068- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
34069+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
34070+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
34071 seq_printf(m, "\n");
34072 return 0;
34073 }
34074diff -urNp linux-2.6.32.43/drivers/s390/cio/qdio_perf.h linux-2.6.32.43/drivers/s390/cio/qdio_perf.h
34075--- linux-2.6.32.43/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
34076+++ linux-2.6.32.43/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
34077@@ -13,46 +13,46 @@
34078
34079 struct qdio_perf_stats {
34080 /* interrupt handler calls */
34081- atomic_long_t qdio_int;
34082- atomic_long_t pci_int;
34083- atomic_long_t thin_int;
34084+ atomic_long_unchecked_t qdio_int;
34085+ atomic_long_unchecked_t pci_int;
34086+ atomic_long_unchecked_t thin_int;
34087
34088 /* tasklet runs */
34089- atomic_long_t tasklet_inbound;
34090- atomic_long_t tasklet_outbound;
34091- atomic_long_t tasklet_thinint;
34092- atomic_long_t tasklet_thinint_loop;
34093- atomic_long_t thinint_inbound;
34094- atomic_long_t thinint_inbound_loop;
34095- atomic_long_t thinint_inbound_loop2;
34096+ atomic_long_unchecked_t tasklet_inbound;
34097+ atomic_long_unchecked_t tasklet_outbound;
34098+ atomic_long_unchecked_t tasklet_thinint;
34099+ atomic_long_unchecked_t tasklet_thinint_loop;
34100+ atomic_long_unchecked_t thinint_inbound;
34101+ atomic_long_unchecked_t thinint_inbound_loop;
34102+ atomic_long_unchecked_t thinint_inbound_loop2;
34103
34104 /* signal adapter calls */
34105- atomic_long_t siga_out;
34106- atomic_long_t siga_in;
34107- atomic_long_t siga_sync;
34108+ atomic_long_unchecked_t siga_out;
34109+ atomic_long_unchecked_t siga_in;
34110+ atomic_long_unchecked_t siga_sync;
34111
34112 /* misc */
34113- atomic_long_t inbound_handler;
34114- atomic_long_t outbound_handler;
34115- atomic_long_t fast_requeue;
34116- atomic_long_t outbound_target_full;
34117+ atomic_long_unchecked_t inbound_handler;
34118+ atomic_long_unchecked_t outbound_handler;
34119+ atomic_long_unchecked_t fast_requeue;
34120+ atomic_long_unchecked_t outbound_target_full;
34121
34122 /* for debugging */
34123- atomic_long_t debug_tl_out_timer;
34124- atomic_long_t debug_stop_polling;
34125- atomic_long_t debug_eqbs_all;
34126- atomic_long_t debug_eqbs_incomplete;
34127- atomic_long_t debug_sqbs_all;
34128- atomic_long_t debug_sqbs_incomplete;
34129+ atomic_long_unchecked_t debug_tl_out_timer;
34130+ atomic_long_unchecked_t debug_stop_polling;
34131+ atomic_long_unchecked_t debug_eqbs_all;
34132+ atomic_long_unchecked_t debug_eqbs_incomplete;
34133+ atomic_long_unchecked_t debug_sqbs_all;
34134+ atomic_long_unchecked_t debug_sqbs_incomplete;
34135 };
34136
34137 extern struct qdio_perf_stats perf_stats;
34138 extern int qdio_performance_stats;
34139
34140-static inline void qdio_perf_stat_inc(atomic_long_t *count)
34141+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
34142 {
34143 if (qdio_performance_stats)
34144- atomic_long_inc(count);
34145+ atomic_long_inc_unchecked(count);
34146 }
34147
34148 int qdio_setup_perf_stats(void);
34149diff -urNp linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c
34150--- linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
34151+++ linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
34152@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
34153 u32 actual_fibsize64, actual_fibsize = 0;
34154 int i;
34155
34156+ pax_track_stack();
34157
34158 if (dev->in_reset) {
34159 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
34160diff -urNp linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c
34161--- linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
34162+++ linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
34163@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
34164 flash_error_table[i].reason);
34165 }
34166
34167-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
34168+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
34169 asd_show_update_bios, asd_store_update_bios);
34170
34171 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
34172diff -urNp linux-2.6.32.43/drivers/scsi/BusLogic.c linux-2.6.32.43/drivers/scsi/BusLogic.c
34173--- linux-2.6.32.43/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
34174+++ linux-2.6.32.43/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
34175@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
34176 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
34177 *PrototypeHostAdapter)
34178 {
34179+ pax_track_stack();
34180+
34181 /*
34182 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
34183 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
34184diff -urNp linux-2.6.32.43/drivers/scsi/dpt_i2o.c linux-2.6.32.43/drivers/scsi/dpt_i2o.c
34185--- linux-2.6.32.43/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
34186+++ linux-2.6.32.43/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
34187@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
34188 dma_addr_t addr;
34189 ulong flags = 0;
34190
34191+ pax_track_stack();
34192+
34193 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
34194 // get user msg size in u32s
34195 if(get_user(size, &user_msg[0])){
34196@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
34197 s32 rcode;
34198 dma_addr_t addr;
34199
34200+ pax_track_stack();
34201+
34202 memset(msg, 0 , sizeof(msg));
34203 len = scsi_bufflen(cmd);
34204 direction = 0x00000000;
34205diff -urNp linux-2.6.32.43/drivers/scsi/eata.c linux-2.6.32.43/drivers/scsi/eata.c
34206--- linux-2.6.32.43/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
34207+++ linux-2.6.32.43/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
34208@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
34209 struct hostdata *ha;
34210 char name[16];
34211
34212+ pax_track_stack();
34213+
34214 sprintf(name, "%s%d", driver_name, j);
34215
34216 if (!request_region(port_base, REGION_SIZE, driver_name)) {
34217diff -urNp linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c
34218--- linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
34219+++ linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
34220@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
34221 size_t rlen;
34222 size_t dlen;
34223
34224+ pax_track_stack();
34225+
34226 fiph = (struct fip_header *)skb->data;
34227 sub = fiph->fip_subcode;
34228 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
34229diff -urNp linux-2.6.32.43/drivers/scsi/gdth.c linux-2.6.32.43/drivers/scsi/gdth.c
34230--- linux-2.6.32.43/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
34231+++ linux-2.6.32.43/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
34232@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
34233 ulong flags;
34234 gdth_ha_str *ha;
34235
34236+ pax_track_stack();
34237+
34238 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
34239 return -EFAULT;
34240 ha = gdth_find_ha(ldrv.ionode);
34241@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
34242 gdth_ha_str *ha;
34243 int rval;
34244
34245+ pax_track_stack();
34246+
34247 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
34248 res.number >= MAX_HDRIVES)
34249 return -EFAULT;
34250@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
34251 gdth_ha_str *ha;
34252 int rval;
34253
34254+ pax_track_stack();
34255+
34256 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
34257 return -EFAULT;
34258 ha = gdth_find_ha(gen.ionode);
34259@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
34260 int i;
34261 gdth_cmd_str gdtcmd;
34262 char cmnd[MAX_COMMAND_SIZE];
34263+
34264+ pax_track_stack();
34265+
34266 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
34267
34268 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
34269diff -urNp linux-2.6.32.43/drivers/scsi/gdth_proc.c linux-2.6.32.43/drivers/scsi/gdth_proc.c
34270--- linux-2.6.32.43/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
34271+++ linux-2.6.32.43/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
34272@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
34273 ulong64 paddr;
34274
34275 char cmnd[MAX_COMMAND_SIZE];
34276+
34277+ pax_track_stack();
34278+
34279 memset(cmnd, 0xff, 12);
34280 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
34281
34282@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
34283 gdth_hget_str *phg;
34284 char cmnd[MAX_COMMAND_SIZE];
34285
34286+ pax_track_stack();
34287+
34288 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
34289 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
34290 if (!gdtcmd || !estr)
34291diff -urNp linux-2.6.32.43/drivers/scsi/hosts.c linux-2.6.32.43/drivers/scsi/hosts.c
34292--- linux-2.6.32.43/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
34293+++ linux-2.6.32.43/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
34294@@ -40,7 +40,7 @@
34295 #include "scsi_logging.h"
34296
34297
34298-static atomic_t scsi_host_next_hn; /* host_no for next new host */
34299+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
34300
34301
34302 static void scsi_host_cls_release(struct device *dev)
34303@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
34304 * subtract one because we increment first then return, but we need to
34305 * know what the next host number was before increment
34306 */
34307- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
34308+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
34309 shost->dma_channel = 0xff;
34310
34311 /* These three are default values which can be overridden */
34312diff -urNp linux-2.6.32.43/drivers/scsi/ipr.c linux-2.6.32.43/drivers/scsi/ipr.c
34313--- linux-2.6.32.43/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
34314+++ linux-2.6.32.43/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
34315@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
34316 return true;
34317 }
34318
34319-static struct ata_port_operations ipr_sata_ops = {
34320+static const struct ata_port_operations ipr_sata_ops = {
34321 .phy_reset = ipr_ata_phy_reset,
34322 .hardreset = ipr_sata_reset,
34323 .post_internal_cmd = ipr_ata_post_internal,
34324diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c
34325--- linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
34326+++ linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c 2011-04-17 15:56:46.000000000 -0400
34327@@ -86,12 +86,12 @@ struct fc_exch_mgr {
34328 * all together if not used XXX
34329 */
34330 struct {
34331- atomic_t no_free_exch;
34332- atomic_t no_free_exch_xid;
34333- atomic_t xid_not_found;
34334- atomic_t xid_busy;
34335- atomic_t seq_not_found;
34336- atomic_t non_bls_resp;
34337+ atomic_unchecked_t no_free_exch;
34338+ atomic_unchecked_t no_free_exch_xid;
34339+ atomic_unchecked_t xid_not_found;
34340+ atomic_unchecked_t xid_busy;
34341+ atomic_unchecked_t seq_not_found;
34342+ atomic_unchecked_t non_bls_resp;
34343 } stats;
34344 };
34345 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
34346@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
34347 /* allocate memory for exchange */
34348 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
34349 if (!ep) {
34350- atomic_inc(&mp->stats.no_free_exch);
34351+ atomic_inc_unchecked(&mp->stats.no_free_exch);
34352 goto out;
34353 }
34354 memset(ep, 0, sizeof(*ep));
34355@@ -557,7 +557,7 @@ out:
34356 return ep;
34357 err:
34358 spin_unlock_bh(&pool->lock);
34359- atomic_inc(&mp->stats.no_free_exch_xid);
34360+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
34361 mempool_free(ep, mp->ep_pool);
34362 return NULL;
34363 }
34364@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34365 xid = ntohs(fh->fh_ox_id); /* we originated exch */
34366 ep = fc_exch_find(mp, xid);
34367 if (!ep) {
34368- atomic_inc(&mp->stats.xid_not_found);
34369+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34370 reject = FC_RJT_OX_ID;
34371 goto out;
34372 }
34373@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34374 ep = fc_exch_find(mp, xid);
34375 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
34376 if (ep) {
34377- atomic_inc(&mp->stats.xid_busy);
34378+ atomic_inc_unchecked(&mp->stats.xid_busy);
34379 reject = FC_RJT_RX_ID;
34380 goto rel;
34381 }
34382@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34383 }
34384 xid = ep->xid; /* get our XID */
34385 } else if (!ep) {
34386- atomic_inc(&mp->stats.xid_not_found);
34387+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34388 reject = FC_RJT_RX_ID; /* XID not found */
34389 goto out;
34390 }
34391@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
34392 } else {
34393 sp = &ep->seq;
34394 if (sp->id != fh->fh_seq_id) {
34395- atomic_inc(&mp->stats.seq_not_found);
34396+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34397 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
34398 goto rel;
34399 }
34400@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
34401
34402 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
34403 if (!ep) {
34404- atomic_inc(&mp->stats.xid_not_found);
34405+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34406 goto out;
34407 }
34408 if (ep->esb_stat & ESB_ST_COMPLETE) {
34409- atomic_inc(&mp->stats.xid_not_found);
34410+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34411 goto out;
34412 }
34413 if (ep->rxid == FC_XID_UNKNOWN)
34414 ep->rxid = ntohs(fh->fh_rx_id);
34415 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
34416- atomic_inc(&mp->stats.xid_not_found);
34417+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34418 goto rel;
34419 }
34420 if (ep->did != ntoh24(fh->fh_s_id) &&
34421 ep->did != FC_FID_FLOGI) {
34422- atomic_inc(&mp->stats.xid_not_found);
34423+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34424 goto rel;
34425 }
34426 sof = fr_sof(fp);
34427@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
34428 } else {
34429 sp = &ep->seq;
34430 if (sp->id != fh->fh_seq_id) {
34431- atomic_inc(&mp->stats.seq_not_found);
34432+ atomic_inc_unchecked(&mp->stats.seq_not_found);
34433 goto rel;
34434 }
34435 }
34436@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
34437 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
34438
34439 if (!sp)
34440- atomic_inc(&mp->stats.xid_not_found);
34441+ atomic_inc_unchecked(&mp->stats.xid_not_found);
34442 else
34443- atomic_inc(&mp->stats.non_bls_resp);
34444+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
34445
34446 fc_frame_free(fp);
34447 }
34448diff -urNp linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c
34449--- linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
34450+++ linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
34451@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
34452 }
34453 }
34454
34455-static struct ata_port_operations sas_sata_ops = {
34456+static const struct ata_port_operations sas_sata_ops = {
34457 .phy_reset = sas_ata_phy_reset,
34458 .post_internal_cmd = sas_ata_post_internal,
34459 .qc_defer = ata_std_qc_defer,
34460diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c
34461--- linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
34462+++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
34463@@ -124,7 +124,7 @@ struct lpfc_debug {
34464 int len;
34465 };
34466
34467-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34468+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
34469 static unsigned long lpfc_debugfs_start_time = 0L;
34470
34471 /**
34472@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
34473 lpfc_debugfs_enable = 0;
34474
34475 len = 0;
34476- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
34477+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
34478 (lpfc_debugfs_max_disc_trc - 1);
34479 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
34480 dtp = vport->disc_trc + i;
34481@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
34482 lpfc_debugfs_enable = 0;
34483
34484 len = 0;
34485- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
34486+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
34487 (lpfc_debugfs_max_slow_ring_trc - 1);
34488 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
34489 dtp = phba->slow_ring_trc + i;
34490@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
34491 uint32_t *ptr;
34492 char buffer[1024];
34493
34494+ pax_track_stack();
34495+
34496 off = 0;
34497 spin_lock_irq(&phba->hbalock);
34498
34499@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
34500 !vport || !vport->disc_trc)
34501 return;
34502
34503- index = atomic_inc_return(&vport->disc_trc_cnt) &
34504+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
34505 (lpfc_debugfs_max_disc_trc - 1);
34506 dtp = vport->disc_trc + index;
34507 dtp->fmt = fmt;
34508 dtp->data1 = data1;
34509 dtp->data2 = data2;
34510 dtp->data3 = data3;
34511- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34512+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34513 dtp->jif = jiffies;
34514 #endif
34515 return;
34516@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
34517 !phba || !phba->slow_ring_trc)
34518 return;
34519
34520- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
34521+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
34522 (lpfc_debugfs_max_slow_ring_trc - 1);
34523 dtp = phba->slow_ring_trc + index;
34524 dtp->fmt = fmt;
34525 dtp->data1 = data1;
34526 dtp->data2 = data2;
34527 dtp->data3 = data3;
34528- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
34529+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
34530 dtp->jif = jiffies;
34531 #endif
34532 return;
34533@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34534 "slow_ring buffer\n");
34535 goto debug_failed;
34536 }
34537- atomic_set(&phba->slow_ring_trc_cnt, 0);
34538+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
34539 memset(phba->slow_ring_trc, 0,
34540 (sizeof(struct lpfc_debugfs_trc) *
34541 lpfc_debugfs_max_slow_ring_trc));
34542@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
34543 "buffer\n");
34544 goto debug_failed;
34545 }
34546- atomic_set(&vport->disc_trc_cnt, 0);
34547+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
34548
34549 snprintf(name, sizeof(name), "discovery_trace");
34550 vport->debug_disc_trc =
34551diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h
34552--- linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
34553+++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
34554@@ -400,7 +400,7 @@ struct lpfc_vport {
34555 struct dentry *debug_nodelist;
34556 struct dentry *vport_debugfs_root;
34557 struct lpfc_debugfs_trc *disc_trc;
34558- atomic_t disc_trc_cnt;
34559+ atomic_unchecked_t disc_trc_cnt;
34560 #endif
34561 uint8_t stat_data_enabled;
34562 uint8_t stat_data_blocked;
34563@@ -725,8 +725,8 @@ struct lpfc_hba {
34564 struct timer_list fabric_block_timer;
34565 unsigned long bit_flags;
34566 #define FABRIC_COMANDS_BLOCKED 0
34567- atomic_t num_rsrc_err;
34568- atomic_t num_cmd_success;
34569+ atomic_unchecked_t num_rsrc_err;
34570+ atomic_unchecked_t num_cmd_success;
34571 unsigned long last_rsrc_error_time;
34572 unsigned long last_ramp_down_time;
34573 unsigned long last_ramp_up_time;
34574@@ -740,7 +740,7 @@ struct lpfc_hba {
34575 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
34576 struct dentry *debug_slow_ring_trc;
34577 struct lpfc_debugfs_trc *slow_ring_trc;
34578- atomic_t slow_ring_trc_cnt;
34579+ atomic_unchecked_t slow_ring_trc_cnt;
34580 #endif
34581
34582 /* Used for deferred freeing of ELS data buffers */
34583diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c
34584--- linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
34585+++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
34586@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
34587 uint32_t evt_posted;
34588
34589 spin_lock_irqsave(&phba->hbalock, flags);
34590- atomic_inc(&phba->num_rsrc_err);
34591+ atomic_inc_unchecked(&phba->num_rsrc_err);
34592 phba->last_rsrc_error_time = jiffies;
34593
34594 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
34595@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
34596 unsigned long flags;
34597 struct lpfc_hba *phba = vport->phba;
34598 uint32_t evt_posted;
34599- atomic_inc(&phba->num_cmd_success);
34600+ atomic_inc_unchecked(&phba->num_cmd_success);
34601
34602 if (vport->cfg_lun_queue_depth <= queue_depth)
34603 return;
34604@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34605 int i;
34606 struct lpfc_rport_data *rdata;
34607
34608- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
34609- num_cmd_success = atomic_read(&phba->num_cmd_success);
34610+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
34611+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
34612
34613 vports = lpfc_create_vport_work_array(phba);
34614 if (vports != NULL)
34615@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
34616 }
34617 }
34618 lpfc_destroy_vport_work_array(phba, vports);
34619- atomic_set(&phba->num_rsrc_err, 0);
34620- atomic_set(&phba->num_cmd_success, 0);
34621+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34622+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34623 }
34624
34625 /**
34626@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
34627 }
34628 }
34629 lpfc_destroy_vport_work_array(phba, vports);
34630- atomic_set(&phba->num_rsrc_err, 0);
34631- atomic_set(&phba->num_cmd_success, 0);
34632+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
34633+ atomic_set_unchecked(&phba->num_cmd_success, 0);
34634 }
34635
34636 /**
34637diff -urNp linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c
34638--- linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
34639+++ linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
34640@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
34641 int rval;
34642 int i;
34643
34644+ pax_track_stack();
34645+
34646 // Allocate memory for the base list of scb for management module.
34647 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
34648
34649diff -urNp linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c
34650--- linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
34651+++ linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
34652@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
34653 int nelem = ARRAY_SIZE(get_attrs), a = 0;
34654 int ret;
34655
34656+ pax_track_stack();
34657+
34658 or = osd_start_request(od, GFP_KERNEL);
34659 if (!or)
34660 return -ENOMEM;
34661diff -urNp linux-2.6.32.43/drivers/scsi/pmcraid.c linux-2.6.32.43/drivers/scsi/pmcraid.c
34662--- linux-2.6.32.43/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
34663+++ linux-2.6.32.43/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
34664@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
34665 res->scsi_dev = scsi_dev;
34666 scsi_dev->hostdata = res;
34667 res->change_detected = 0;
34668- atomic_set(&res->read_failures, 0);
34669- atomic_set(&res->write_failures, 0);
34670+ atomic_set_unchecked(&res->read_failures, 0);
34671+ atomic_set_unchecked(&res->write_failures, 0);
34672 rc = 0;
34673 }
34674 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
34675@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
34676
34677 /* If this was a SCSI read/write command keep count of errors */
34678 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
34679- atomic_inc(&res->read_failures);
34680+ atomic_inc_unchecked(&res->read_failures);
34681 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
34682- atomic_inc(&res->write_failures);
34683+ atomic_inc_unchecked(&res->write_failures);
34684
34685 if (!RES_IS_GSCSI(res->cfg_entry) &&
34686 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
34687@@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
34688
34689 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
34690 /* add resources only after host is added into system */
34691- if (!atomic_read(&pinstance->expose_resources))
34692+ if (!atomic_read_unchecked(&pinstance->expose_resources))
34693 return;
34694
34695 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
34696@@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
34697 init_waitqueue_head(&pinstance->reset_wait_q);
34698
34699 atomic_set(&pinstance->outstanding_cmds, 0);
34700- atomic_set(&pinstance->expose_resources, 0);
34701+ atomic_set_unchecked(&pinstance->expose_resources, 0);
34702
34703 INIT_LIST_HEAD(&pinstance->free_res_q);
34704 INIT_LIST_HEAD(&pinstance->used_res_q);
34705@@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
34706 /* Schedule worker thread to handle CCN and take care of adding and
34707 * removing devices to OS
34708 */
34709- atomic_set(&pinstance->expose_resources, 1);
34710+ atomic_set_unchecked(&pinstance->expose_resources, 1);
34711 schedule_work(&pinstance->worker_q);
34712 return rc;
34713
34714diff -urNp linux-2.6.32.43/drivers/scsi/pmcraid.h linux-2.6.32.43/drivers/scsi/pmcraid.h
34715--- linux-2.6.32.43/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
34716+++ linux-2.6.32.43/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
34717@@ -690,7 +690,7 @@ struct pmcraid_instance {
34718 atomic_t outstanding_cmds;
34719
34720 /* should add/delete resources to mid-layer now ?*/
34721- atomic_t expose_resources;
34722+ atomic_unchecked_t expose_resources;
34723
34724 /* Tasklet to handle deferred processing */
34725 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
34726@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
34727 struct list_head queue; /* link to "to be exposed" resources */
34728 struct pmcraid_config_table_entry cfg_entry;
34729 struct scsi_device *scsi_dev; /* Link scsi_device structure */
34730- atomic_t read_failures; /* count of failed READ commands */
34731- atomic_t write_failures; /* count of failed WRITE commands */
34732+ atomic_unchecked_t read_failures; /* count of failed READ commands */
34733+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
34734
34735 /* To indicate add/delete/modify during CCN */
34736 u8 change_detected;
34737diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h
34738--- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
34739+++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
34740@@ -240,7 +240,7 @@ struct ddb_entry {
34741 atomic_t retry_relogin_timer; /* Min Time between relogins
34742 * (4000 only) */
34743 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
34744- atomic_t relogin_retry_count; /* Num of times relogin has been
34745+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
34746 * retried */
34747
34748 uint16_t port;
34749diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c
34750--- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
34751+++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
34752@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
34753 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
34754 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
34755 atomic_set(&ddb_entry->relogin_timer, 0);
34756- atomic_set(&ddb_entry->relogin_retry_count, 0);
34757+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34758 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34759 list_add_tail(&ddb_entry->list, &ha->ddb_list);
34760 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
34761@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
34762 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
34763 atomic_set(&ddb_entry->port_down_timer,
34764 ha->port_down_retry_count);
34765- atomic_set(&ddb_entry->relogin_retry_count, 0);
34766+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
34767 atomic_set(&ddb_entry->relogin_timer, 0);
34768 clear_bit(DF_RELOGIN, &ddb_entry->flags);
34769 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
34770diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c
34771--- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
34772+++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
34773@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
34774 ddb_entry->fw_ddb_device_state ==
34775 DDB_DS_SESSION_FAILED) {
34776 /* Reset retry relogin timer */
34777- atomic_inc(&ddb_entry->relogin_retry_count);
34778+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
34779 DEBUG2(printk("scsi%ld: index[%d] relogin"
34780 " timed out-retrying"
34781 " relogin (%d)\n",
34782 ha->host_no,
34783 ddb_entry->fw_ddb_index,
34784- atomic_read(&ddb_entry->
34785+ atomic_read_unchecked(&ddb_entry->
34786 relogin_retry_count))
34787 );
34788 start_dpc++;
34789diff -urNp linux-2.6.32.43/drivers/scsi/scsi.c linux-2.6.32.43/drivers/scsi/scsi.c
34790--- linux-2.6.32.43/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
34791+++ linux-2.6.32.43/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
34792@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
34793 unsigned long timeout;
34794 int rtn = 0;
34795
34796- atomic_inc(&cmd->device->iorequest_cnt);
34797+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34798
34799 /* check if the device is still usable */
34800 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
34801diff -urNp linux-2.6.32.43/drivers/scsi/scsi_debug.c linux-2.6.32.43/drivers/scsi/scsi_debug.c
34802--- linux-2.6.32.43/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
34803+++ linux-2.6.32.43/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
34804@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
34805 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
34806 unsigned char *cmd = (unsigned char *)scp->cmnd;
34807
34808+ pax_track_stack();
34809+
34810 if ((errsts = check_readiness(scp, 1, devip)))
34811 return errsts;
34812 memset(arr, 0, sizeof(arr));
34813@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
34814 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
34815 unsigned char *cmd = (unsigned char *)scp->cmnd;
34816
34817+ pax_track_stack();
34818+
34819 if ((errsts = check_readiness(scp, 1, devip)))
34820 return errsts;
34821 memset(arr, 0, sizeof(arr));
34822diff -urNp linux-2.6.32.43/drivers/scsi/scsi_lib.c linux-2.6.32.43/drivers/scsi/scsi_lib.c
34823--- linux-2.6.32.43/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
34824+++ linux-2.6.32.43/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
34825@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
34826
34827 scsi_init_cmd_errh(cmd);
34828 cmd->result = DID_NO_CONNECT << 16;
34829- atomic_inc(&cmd->device->iorequest_cnt);
34830+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
34831
34832 /*
34833 * SCSI request completion path will do scsi_device_unbusy(),
34834@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
34835 */
34836 cmd->serial_number = 0;
34837
34838- atomic_inc(&cmd->device->iodone_cnt);
34839+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
34840 if (cmd->result)
34841- atomic_inc(&cmd->device->ioerr_cnt);
34842+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
34843
34844 disposition = scsi_decide_disposition(cmd);
34845 if (disposition != SUCCESS &&
34846diff -urNp linux-2.6.32.43/drivers/scsi/scsi_sysfs.c linux-2.6.32.43/drivers/scsi/scsi_sysfs.c
34847--- linux-2.6.32.43/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
34848+++ linux-2.6.32.43/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
34849@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
34850 char *buf) \
34851 { \
34852 struct scsi_device *sdev = to_scsi_device(dev); \
34853- unsigned long long count = atomic_read(&sdev->field); \
34854+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
34855 return snprintf(buf, 20, "0x%llx\n", count); \
34856 } \
34857 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
34858diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c
34859--- linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
34860+++ linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
34861@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
34862 * Netlink Infrastructure
34863 */
34864
34865-static atomic_t fc_event_seq;
34866+static atomic_unchecked_t fc_event_seq;
34867
34868 /**
34869 * fc_get_event_number - Obtain the next sequential FC event number
34870@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
34871 u32
34872 fc_get_event_number(void)
34873 {
34874- return atomic_add_return(1, &fc_event_seq);
34875+ return atomic_add_return_unchecked(1, &fc_event_seq);
34876 }
34877 EXPORT_SYMBOL(fc_get_event_number);
34878
34879@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
34880 {
34881 int error;
34882
34883- atomic_set(&fc_event_seq, 0);
34884+ atomic_set_unchecked(&fc_event_seq, 0);
34885
34886 error = transport_class_register(&fc_host_class);
34887 if (error)
34888diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c
34889--- linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
34890+++ linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
34891@@ -81,7 +81,7 @@ struct iscsi_internal {
34892 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
34893 };
34894
34895-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
34896+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
34897 static struct workqueue_struct *iscsi_eh_timer_workq;
34898
34899 /*
34900@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
34901 int err;
34902
34903 ihost = shost->shost_data;
34904- session->sid = atomic_add_return(1, &iscsi_session_nr);
34905+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
34906
34907 if (id == ISCSI_MAX_TARGET) {
34908 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
34909@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
34910 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
34911 ISCSI_TRANSPORT_VERSION);
34912
34913- atomic_set(&iscsi_session_nr, 0);
34914+ atomic_set_unchecked(&iscsi_session_nr, 0);
34915
34916 err = class_register(&iscsi_transport_class);
34917 if (err)
34918diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c
34919--- linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
34920+++ linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
34921@@ -33,7 +33,7 @@
34922 #include "scsi_transport_srp_internal.h"
34923
34924 struct srp_host_attrs {
34925- atomic_t next_port_id;
34926+ atomic_unchecked_t next_port_id;
34927 };
34928 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
34929
34930@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
34931 struct Scsi_Host *shost = dev_to_shost(dev);
34932 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
34933
34934- atomic_set(&srp_host->next_port_id, 0);
34935+ atomic_set_unchecked(&srp_host->next_port_id, 0);
34936 return 0;
34937 }
34938
34939@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
34940 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
34941 rport->roles = ids->roles;
34942
34943- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
34944+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
34945 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
34946
34947 transport_setup_device(&rport->dev);
34948diff -urNp linux-2.6.32.43/drivers/scsi/sg.c linux-2.6.32.43/drivers/scsi/sg.c
34949--- linux-2.6.32.43/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
34950+++ linux-2.6.32.43/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
34951@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
34952 const struct file_operations * fops;
34953 };
34954
34955-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
34956+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
34957 {"allow_dio", &adio_fops},
34958 {"debug", &debug_fops},
34959 {"def_reserved_size", &dressz_fops},
34960@@ -2307,7 +2307,7 @@ sg_proc_init(void)
34961 {
34962 int k, mask;
34963 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
34964- struct sg_proc_leaf * leaf;
34965+ const struct sg_proc_leaf * leaf;
34966
34967 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
34968 if (!sg_proc_sgp)
34969diff -urNp linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c
34970--- linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
34971+++ linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
34972@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
34973 int do_iounmap = 0;
34974 int do_disable_device = 1;
34975
34976+ pax_track_stack();
34977+
34978 memset(&sym_dev, 0, sizeof(sym_dev));
34979 memset(&nvram, 0, sizeof(nvram));
34980 sym_dev.pdev = pdev;
34981diff -urNp linux-2.6.32.43/drivers/serial/kgdboc.c linux-2.6.32.43/drivers/serial/kgdboc.c
34982--- linux-2.6.32.43/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
34983+++ linux-2.6.32.43/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
34984@@ -18,7 +18,7 @@
34985
34986 #define MAX_CONFIG_LEN 40
34987
34988-static struct kgdb_io kgdboc_io_ops;
34989+static const struct kgdb_io kgdboc_io_ops;
34990
34991 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
34992 static int configured = -1;
34993@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
34994 module_put(THIS_MODULE);
34995 }
34996
34997-static struct kgdb_io kgdboc_io_ops = {
34998+static const struct kgdb_io kgdboc_io_ops = {
34999 .name = "kgdboc",
35000 .read_char = kgdboc_get_char,
35001 .write_char = kgdboc_put_char,
35002diff -urNp linux-2.6.32.43/drivers/spi/spi.c linux-2.6.32.43/drivers/spi/spi.c
35003--- linux-2.6.32.43/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
35004+++ linux-2.6.32.43/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
35005@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
35006 EXPORT_SYMBOL_GPL(spi_sync);
35007
35008 /* portable code must never pass more than 32 bytes */
35009-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
35010+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
35011
35012 static u8 *buf;
35013
35014diff -urNp linux-2.6.32.43/drivers/staging/android/binder.c linux-2.6.32.43/drivers/staging/android/binder.c
35015--- linux-2.6.32.43/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
35016+++ linux-2.6.32.43/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
35017@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
35018 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
35019 }
35020
35021-static struct vm_operations_struct binder_vm_ops = {
35022+static const struct vm_operations_struct binder_vm_ops = {
35023 .open = binder_vma_open,
35024 .close = binder_vma_close,
35025 };
35026diff -urNp linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c
35027--- linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
35028+++ linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
35029@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
35030 return VM_FAULT_NOPAGE;
35031 }
35032
35033-static struct vm_operations_struct b3dfg_vm_ops = {
35034+static const struct vm_operations_struct b3dfg_vm_ops = {
35035 .fault = b3dfg_vma_fault,
35036 };
35037
35038@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
35039 return r;
35040 }
35041
35042-static struct file_operations b3dfg_fops = {
35043+static const struct file_operations b3dfg_fops = {
35044 .owner = THIS_MODULE,
35045 .open = b3dfg_open,
35046 .release = b3dfg_release,
35047diff -urNp linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c
35048--- linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
35049+++ linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
35050@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
35051 mutex_unlock(&dev->mutex);
35052 }
35053
35054-static struct vm_operations_struct comedi_vm_ops = {
35055+static const struct vm_operations_struct comedi_vm_ops = {
35056 .close = comedi_unmap,
35057 };
35058
35059diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c
35060--- linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
35061+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
35062@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
35063 static dev_t adsp_devno;
35064 static struct class *adsp_class;
35065
35066-static struct file_operations adsp_fops = {
35067+static const struct file_operations adsp_fops = {
35068 .owner = THIS_MODULE,
35069 .open = adsp_open,
35070 .unlocked_ioctl = adsp_ioctl,
35071diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c
35072--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
35073+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
35074@@ -1022,7 +1022,7 @@ done:
35075 return rc;
35076 }
35077
35078-static struct file_operations audio_aac_fops = {
35079+static const struct file_operations audio_aac_fops = {
35080 .owner = THIS_MODULE,
35081 .open = audio_open,
35082 .release = audio_release,
35083diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c
35084--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
35085+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
35086@@ -833,7 +833,7 @@ done:
35087 return rc;
35088 }
35089
35090-static struct file_operations audio_amrnb_fops = {
35091+static const struct file_operations audio_amrnb_fops = {
35092 .owner = THIS_MODULE,
35093 .open = audamrnb_open,
35094 .release = audamrnb_release,
35095diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c
35096--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
35097+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
35098@@ -805,7 +805,7 @@ dma_fail:
35099 return rc;
35100 }
35101
35102-static struct file_operations audio_evrc_fops = {
35103+static const struct file_operations audio_evrc_fops = {
35104 .owner = THIS_MODULE,
35105 .open = audevrc_open,
35106 .release = audevrc_release,
35107diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c
35108--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
35109+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
35110@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
35111 return 0;
35112 }
35113
35114-static struct file_operations audio_fops = {
35115+static const struct file_operations audio_fops = {
35116 .owner = THIS_MODULE,
35117 .open = audio_in_open,
35118 .release = audio_in_release,
35119@@ -922,7 +922,7 @@ static struct file_operations audio_fops
35120 .unlocked_ioctl = audio_in_ioctl,
35121 };
35122
35123-static struct file_operations audpre_fops = {
35124+static const struct file_operations audpre_fops = {
35125 .owner = THIS_MODULE,
35126 .open = audpre_open,
35127 .unlocked_ioctl = audpre_ioctl,
35128diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c
35129--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
35130+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
35131@@ -941,7 +941,7 @@ done:
35132 return rc;
35133 }
35134
35135-static struct file_operations audio_mp3_fops = {
35136+static const struct file_operations audio_mp3_fops = {
35137 .owner = THIS_MODULE,
35138 .open = audio_open,
35139 .release = audio_release,
35140diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c
35141--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
35142+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
35143@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
35144 return 0;
35145 }
35146
35147-static struct file_operations audio_fops = {
35148+static const struct file_operations audio_fops = {
35149 .owner = THIS_MODULE,
35150 .open = audio_open,
35151 .release = audio_release,
35152@@ -819,7 +819,7 @@ static struct file_operations audio_fops
35153 .unlocked_ioctl = audio_ioctl,
35154 };
35155
35156-static struct file_operations audpp_fops = {
35157+static const struct file_operations audpp_fops = {
35158 .owner = THIS_MODULE,
35159 .open = audpp_open,
35160 .unlocked_ioctl = audpp_ioctl,
35161diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c
35162--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
35163+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
35164@@ -816,7 +816,7 @@ err:
35165 return rc;
35166 }
35167
35168-static struct file_operations audio_qcelp_fops = {
35169+static const struct file_operations audio_qcelp_fops = {
35170 .owner = THIS_MODULE,
35171 .open = audqcelp_open,
35172 .release = audqcelp_release,
35173diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c
35174--- linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
35175+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
35176@@ -242,7 +242,7 @@ err:
35177 return rc;
35178 }
35179
35180-static struct file_operations snd_fops = {
35181+static const struct file_operations snd_fops = {
35182 .owner = THIS_MODULE,
35183 .open = snd_open,
35184 .release = snd_release,
35185diff -urNp linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c
35186--- linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
35187+++ linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
35188@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
35189 return 0;
35190 }
35191
35192-static struct file_operations qmi_fops = {
35193+static const struct file_operations qmi_fops = {
35194 .owner = THIS_MODULE,
35195 .read = qmi_read,
35196 .write = qmi_write,
35197diff -urNp linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c
35198--- linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
35199+++ linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
35200@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
35201 return rc;
35202 }
35203
35204-static struct file_operations rpcrouter_server_fops = {
35205+static const struct file_operations rpcrouter_server_fops = {
35206 .owner = THIS_MODULE,
35207 .open = rpcrouter_open,
35208 .release = rpcrouter_release,
35209@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
35210 .unlocked_ioctl = rpcrouter_ioctl,
35211 };
35212
35213-static struct file_operations rpcrouter_router_fops = {
35214+static const struct file_operations rpcrouter_router_fops = {
35215 .owner = THIS_MODULE,
35216 .open = rpcrouter_open,
35217 .release = rpcrouter_release,
35218diff -urNp linux-2.6.32.43/drivers/staging/dst/dcore.c linux-2.6.32.43/drivers/staging/dst/dcore.c
35219--- linux-2.6.32.43/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
35220+++ linux-2.6.32.43/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
35221@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
35222 return 0;
35223 }
35224
35225-static struct block_device_operations dst_blk_ops = {
35226+static const struct block_device_operations dst_blk_ops = {
35227 .open = dst_bdev_open,
35228 .release = dst_bdev_release,
35229 .owner = THIS_MODULE,
35230@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
35231 n->size = ctl->size;
35232
35233 atomic_set(&n->refcnt, 1);
35234- atomic_long_set(&n->gen, 0);
35235+ atomic_long_set_unchecked(&n->gen, 0);
35236 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
35237
35238 err = dst_node_sysfs_init(n);
35239diff -urNp linux-2.6.32.43/drivers/staging/dst/trans.c linux-2.6.32.43/drivers/staging/dst/trans.c
35240--- linux-2.6.32.43/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
35241+++ linux-2.6.32.43/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
35242@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
35243 t->error = 0;
35244 t->retries = 0;
35245 atomic_set(&t->refcnt, 1);
35246- t->gen = atomic_long_inc_return(&n->gen);
35247+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
35248
35249 t->enc = bio_data_dir(bio);
35250 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
35251diff -urNp linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c
35252--- linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
35253+++ linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
35254@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
35255 struct net_device_stats *stats = &etdev->net_stats;
35256
35257 if (pMpTcb->Flags & fMP_DEST_BROAD)
35258- atomic_inc(&etdev->Stats.brdcstxmt);
35259+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
35260 else if (pMpTcb->Flags & fMP_DEST_MULTI)
35261- atomic_inc(&etdev->Stats.multixmt);
35262+ atomic_inc_unchecked(&etdev->Stats.multixmt);
35263 else
35264- atomic_inc(&etdev->Stats.unixmt);
35265+ atomic_inc_unchecked(&etdev->Stats.unixmt);
35266
35267 if (pMpTcb->Packet) {
35268 stats->tx_bytes += pMpTcb->Packet->len;
35269diff -urNp linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h
35270--- linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
35271+++ linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
35272@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
35273 * operations
35274 */
35275 u32 unircv; /* # multicast packets received */
35276- atomic_t unixmt; /* # multicast packets for Tx */
35277+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
35278 u32 multircv; /* # multicast packets received */
35279- atomic_t multixmt; /* # multicast packets for Tx */
35280+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
35281 u32 brdcstrcv; /* # broadcast packets received */
35282- atomic_t brdcstxmt; /* # broadcast packets for Tx */
35283+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
35284 u32 norcvbuf; /* # Rx packets discarded */
35285 u32 noxmtbuf; /* # Tx packets discarded */
35286
35287diff -urNp linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c
35288--- linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
35289+++ linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
35290@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
35291 return 0;
35292 }
35293
35294-static struct vm_operations_struct go7007_vm_ops = {
35295+static const struct vm_operations_struct go7007_vm_ops = {
35296 .open = go7007_vm_open,
35297 .close = go7007_vm_close,
35298 .fault = go7007_vm_fault,
35299diff -urNp linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c
35300--- linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
35301+++ linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
35302@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
35303 /* The one and only one */
35304 static struct blkvsc_driver_context g_blkvsc_drv;
35305
35306-static struct block_device_operations block_ops = {
35307+static const struct block_device_operations block_ops = {
35308 .owner = THIS_MODULE,
35309 .open = blkvsc_open,
35310 .release = blkvsc_release,
35311diff -urNp linux-2.6.32.43/drivers/staging/hv/Channel.c linux-2.6.32.43/drivers/staging/hv/Channel.c
35312--- linux-2.6.32.43/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
35313+++ linux-2.6.32.43/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
35314@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
35315
35316 DPRINT_ENTER(VMBUS);
35317
35318- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
35319- atomic_inc(&gVmbusConnection.NextGpadlHandle);
35320+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
35321+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
35322
35323 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
35324 ASSERT(msgInfo != NULL);
35325diff -urNp linux-2.6.32.43/drivers/staging/hv/Hv.c linux-2.6.32.43/drivers/staging/hv/Hv.c
35326--- linux-2.6.32.43/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
35327+++ linux-2.6.32.43/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
35328@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
35329 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
35330 u32 outputAddressHi = outputAddress >> 32;
35331 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
35332- volatile void *hypercallPage = gHvContext.HypercallPage;
35333+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
35334
35335 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
35336 Control, Input, Output);
35337diff -urNp linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c
35338--- linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
35339+++ linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
35340@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
35341 to_device_context(root_device_obj);
35342 struct device_context *child_device_ctx =
35343 to_device_context(child_device_obj);
35344- static atomic_t device_num = ATOMIC_INIT(0);
35345+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
35346
35347 DPRINT_ENTER(VMBUS_DRV);
35348
35349@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
35350
35351 /* Set the device name. Otherwise, device_register() will fail. */
35352 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
35353- atomic_inc_return(&device_num));
35354+ atomic_inc_return_unchecked(&device_num));
35355
35356 /* The new device belongs to this bus */
35357 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
35358diff -urNp linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h
35359--- linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
35360+++ linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
35361@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
35362 struct VMBUS_CONNECTION {
35363 enum VMBUS_CONNECT_STATE ConnectState;
35364
35365- atomic_t NextGpadlHandle;
35366+ atomic_unchecked_t NextGpadlHandle;
35367
35368 /*
35369 * Represents channel interrupts. Each bit position represents a
35370diff -urNp linux-2.6.32.43/drivers/staging/octeon/ethernet.c linux-2.6.32.43/drivers/staging/octeon/ethernet.c
35371--- linux-2.6.32.43/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
35372+++ linux-2.6.32.43/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
35373@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
35374 * since the RX tasklet also increments it.
35375 */
35376 #ifdef CONFIG_64BIT
35377- atomic64_add(rx_status.dropped_packets,
35378- (atomic64_t *)&priv->stats.rx_dropped);
35379+ atomic64_add_unchecked(rx_status.dropped_packets,
35380+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
35381 #else
35382- atomic_add(rx_status.dropped_packets,
35383- (atomic_t *)&priv->stats.rx_dropped);
35384+ atomic_add_unchecked(rx_status.dropped_packets,
35385+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
35386 #endif
35387 }
35388
35389diff -urNp linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c
35390--- linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
35391+++ linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
35392@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
35393 /* Increment RX stats for virtual ports */
35394 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
35395 #ifdef CONFIG_64BIT
35396- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
35397- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
35398+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
35399+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
35400 #else
35401- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
35402- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
35403+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
35404+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
35405 #endif
35406 }
35407 netif_receive_skb(skb);
35408@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
35409 dev->name);
35410 */
35411 #ifdef CONFIG_64BIT
35412- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
35413+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
35414 #else
35415- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
35416+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
35417 #endif
35418 dev_kfree_skb_irq(skb);
35419 }
35420diff -urNp linux-2.6.32.43/drivers/staging/panel/panel.c linux-2.6.32.43/drivers/staging/panel/panel.c
35421--- linux-2.6.32.43/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
35422+++ linux-2.6.32.43/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
35423@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
35424 return 0;
35425 }
35426
35427-static struct file_operations lcd_fops = {
35428+static const struct file_operations lcd_fops = {
35429 .write = lcd_write,
35430 .open = lcd_open,
35431 .release = lcd_release,
35432@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
35433 return 0;
35434 }
35435
35436-static struct file_operations keypad_fops = {
35437+static const struct file_operations keypad_fops = {
35438 .read = keypad_read, /* read */
35439 .open = keypad_open, /* open */
35440 .release = keypad_release, /* close */
35441diff -urNp linux-2.6.32.43/drivers/staging/phison/phison.c linux-2.6.32.43/drivers/staging/phison/phison.c
35442--- linux-2.6.32.43/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
35443+++ linux-2.6.32.43/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
35444@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
35445 ATA_BMDMA_SHT(DRV_NAME),
35446 };
35447
35448-static struct ata_port_operations phison_ops = {
35449+static const struct ata_port_operations phison_ops = {
35450 .inherits = &ata_bmdma_port_ops,
35451 .prereset = phison_pre_reset,
35452 };
35453diff -urNp linux-2.6.32.43/drivers/staging/poch/poch.c linux-2.6.32.43/drivers/staging/poch/poch.c
35454--- linux-2.6.32.43/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
35455+++ linux-2.6.32.43/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
35456@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
35457 return 0;
35458 }
35459
35460-static struct file_operations poch_fops = {
35461+static const struct file_operations poch_fops = {
35462 .owner = THIS_MODULE,
35463 .open = poch_open,
35464 .release = poch_release,
35465diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/inode.c linux-2.6.32.43/drivers/staging/pohmelfs/inode.c
35466--- linux-2.6.32.43/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
35467+++ linux-2.6.32.43/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
35468@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
35469 mutex_init(&psb->mcache_lock);
35470 psb->mcache_root = RB_ROOT;
35471 psb->mcache_timeout = msecs_to_jiffies(5000);
35472- atomic_long_set(&psb->mcache_gen, 0);
35473+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
35474
35475 psb->trans_max_pages = 100;
35476
35477@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
35478 INIT_LIST_HEAD(&psb->crypto_ready_list);
35479 INIT_LIST_HEAD(&psb->crypto_active_list);
35480
35481- atomic_set(&psb->trans_gen, 1);
35482+ atomic_set_unchecked(&psb->trans_gen, 1);
35483 atomic_long_set(&psb->total_inodes, 0);
35484
35485 mutex_init(&psb->state_lock);
35486diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c
35487--- linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
35488+++ linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
35489@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
35490 m->data = data;
35491 m->start = start;
35492 m->size = size;
35493- m->gen = atomic_long_inc_return(&psb->mcache_gen);
35494+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
35495
35496 mutex_lock(&psb->mcache_lock);
35497 err = pohmelfs_mcache_insert(psb, m);
35498diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h
35499--- linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
35500+++ linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
35501@@ -570,14 +570,14 @@ struct pohmelfs_config;
35502 struct pohmelfs_sb {
35503 struct rb_root mcache_root;
35504 struct mutex mcache_lock;
35505- atomic_long_t mcache_gen;
35506+ atomic_long_unchecked_t mcache_gen;
35507 unsigned long mcache_timeout;
35508
35509 unsigned int idx;
35510
35511 unsigned int trans_retries;
35512
35513- atomic_t trans_gen;
35514+ atomic_unchecked_t trans_gen;
35515
35516 unsigned int crypto_attached_size;
35517 unsigned int crypto_align_size;
35518diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/trans.c linux-2.6.32.43/drivers/staging/pohmelfs/trans.c
35519--- linux-2.6.32.43/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
35520+++ linux-2.6.32.43/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
35521@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
35522 int err;
35523 struct netfs_cmd *cmd = t->iovec.iov_base;
35524
35525- t->gen = atomic_inc_return(&psb->trans_gen);
35526+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
35527
35528 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
35529 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
35530diff -urNp linux-2.6.32.43/drivers/staging/sep/sep_driver.c linux-2.6.32.43/drivers/staging/sep/sep_driver.c
35531--- linux-2.6.32.43/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
35532+++ linux-2.6.32.43/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
35533@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
35534 static dev_t sep_devno;
35535
35536 /* the files operations structure of the driver */
35537-static struct file_operations sep_file_operations = {
35538+static const struct file_operations sep_file_operations = {
35539 .owner = THIS_MODULE,
35540 .ioctl = sep_ioctl,
35541 .poll = sep_poll,
35542diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci.h linux-2.6.32.43/drivers/staging/usbip/vhci.h
35543--- linux-2.6.32.43/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
35544+++ linux-2.6.32.43/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
35545@@ -92,7 +92,7 @@ struct vhci_hcd {
35546 unsigned resuming:1;
35547 unsigned long re_timeout;
35548
35549- atomic_t seqnum;
35550+ atomic_unchecked_t seqnum;
35551
35552 /*
35553 * NOTE:
35554diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c
35555--- linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
35556+++ linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
35557@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
35558 return;
35559 }
35560
35561- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
35562+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35563 if (priv->seqnum == 0xffff)
35564 usbip_uinfo("seqnum max\n");
35565
35566@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
35567 return -ENOMEM;
35568 }
35569
35570- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
35571+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
35572 if (unlink->seqnum == 0xffff)
35573 usbip_uinfo("seqnum max\n");
35574
35575@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
35576 vdev->rhport = rhport;
35577 }
35578
35579- atomic_set(&vhci->seqnum, 0);
35580+ atomic_set_unchecked(&vhci->seqnum, 0);
35581 spin_lock_init(&vhci->lock);
35582
35583
35584diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c
35585--- linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
35586+++ linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
35587@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
35588 usbip_uerr("cannot find a urb of seqnum %u\n",
35589 pdu->base.seqnum);
35590 usbip_uinfo("max seqnum %d\n",
35591- atomic_read(&the_controller->seqnum));
35592+ atomic_read_unchecked(&the_controller->seqnum));
35593 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
35594 return;
35595 }
35596diff -urNp linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c
35597--- linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
35598+++ linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
35599@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
35600 static int __init vme_user_probe(struct device *, int, int);
35601 static int __exit vme_user_remove(struct device *, int, int);
35602
35603-static struct file_operations vme_user_fops = {
35604+static const struct file_operations vme_user_fops = {
35605 .open = vme_user_open,
35606 .release = vme_user_release,
35607 .read = vme_user_read,
35608diff -urNp linux-2.6.32.43/drivers/telephony/ixj.c linux-2.6.32.43/drivers/telephony/ixj.c
35609--- linux-2.6.32.43/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
35610+++ linux-2.6.32.43/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
35611@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
35612 bool mContinue;
35613 char *pIn, *pOut;
35614
35615+ pax_track_stack();
35616+
35617 if (!SCI_Prepare(j))
35618 return 0;
35619
35620diff -urNp linux-2.6.32.43/drivers/uio/uio.c linux-2.6.32.43/drivers/uio/uio.c
35621--- linux-2.6.32.43/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
35622+++ linux-2.6.32.43/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
35623@@ -23,6 +23,7 @@
35624 #include <linux/string.h>
35625 #include <linux/kobject.h>
35626 #include <linux/uio_driver.h>
35627+#include <asm/local.h>
35628
35629 #define UIO_MAX_DEVICES 255
35630
35631@@ -30,10 +31,10 @@ struct uio_device {
35632 struct module *owner;
35633 struct device *dev;
35634 int minor;
35635- atomic_t event;
35636+ atomic_unchecked_t event;
35637 struct fasync_struct *async_queue;
35638 wait_queue_head_t wait;
35639- int vma_count;
35640+ local_t vma_count;
35641 struct uio_info *info;
35642 struct kobject *map_dir;
35643 struct kobject *portio_dir;
35644@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
35645 return entry->show(mem, buf);
35646 }
35647
35648-static struct sysfs_ops map_sysfs_ops = {
35649+static const struct sysfs_ops map_sysfs_ops = {
35650 .show = map_type_show,
35651 };
35652
35653@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
35654 return entry->show(port, buf);
35655 }
35656
35657-static struct sysfs_ops portio_sysfs_ops = {
35658+static const struct sysfs_ops portio_sysfs_ops = {
35659 .show = portio_type_show,
35660 };
35661
35662@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
35663 struct uio_device *idev = dev_get_drvdata(dev);
35664 if (idev)
35665 return sprintf(buf, "%u\n",
35666- (unsigned int)atomic_read(&idev->event));
35667+ (unsigned int)atomic_read_unchecked(&idev->event));
35668 else
35669 return -ENODEV;
35670 }
35671@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
35672 {
35673 struct uio_device *idev = info->uio_dev;
35674
35675- atomic_inc(&idev->event);
35676+ atomic_inc_unchecked(&idev->event);
35677 wake_up_interruptible(&idev->wait);
35678 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
35679 }
35680@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
35681 }
35682
35683 listener->dev = idev;
35684- listener->event_count = atomic_read(&idev->event);
35685+ listener->event_count = atomic_read_unchecked(&idev->event);
35686 filep->private_data = listener;
35687
35688 if (idev->info->open) {
35689@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
35690 return -EIO;
35691
35692 poll_wait(filep, &idev->wait, wait);
35693- if (listener->event_count != atomic_read(&idev->event))
35694+ if (listener->event_count != atomic_read_unchecked(&idev->event))
35695 return POLLIN | POLLRDNORM;
35696 return 0;
35697 }
35698@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
35699 do {
35700 set_current_state(TASK_INTERRUPTIBLE);
35701
35702- event_count = atomic_read(&idev->event);
35703+ event_count = atomic_read_unchecked(&idev->event);
35704 if (event_count != listener->event_count) {
35705 if (copy_to_user(buf, &event_count, count))
35706 retval = -EFAULT;
35707@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
35708 static void uio_vma_open(struct vm_area_struct *vma)
35709 {
35710 struct uio_device *idev = vma->vm_private_data;
35711- idev->vma_count++;
35712+ local_inc(&idev->vma_count);
35713 }
35714
35715 static void uio_vma_close(struct vm_area_struct *vma)
35716 {
35717 struct uio_device *idev = vma->vm_private_data;
35718- idev->vma_count--;
35719+ local_dec(&idev->vma_count);
35720 }
35721
35722 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35723@@ -840,7 +841,7 @@ int __uio_register_device(struct module
35724 idev->owner = owner;
35725 idev->info = info;
35726 init_waitqueue_head(&idev->wait);
35727- atomic_set(&idev->event, 0);
35728+ atomic_set_unchecked(&idev->event, 0);
35729
35730 ret = uio_get_minor(idev);
35731 if (ret)
35732diff -urNp linux-2.6.32.43/drivers/usb/atm/usbatm.c linux-2.6.32.43/drivers/usb/atm/usbatm.c
35733--- linux-2.6.32.43/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
35734+++ linux-2.6.32.43/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
35735@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
35736 if (printk_ratelimit())
35737 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
35738 __func__, vpi, vci);
35739- atomic_inc(&vcc->stats->rx_err);
35740+ atomic_inc_unchecked(&vcc->stats->rx_err);
35741 return;
35742 }
35743
35744@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
35745 if (length > ATM_MAX_AAL5_PDU) {
35746 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
35747 __func__, length, vcc);
35748- atomic_inc(&vcc->stats->rx_err);
35749+ atomic_inc_unchecked(&vcc->stats->rx_err);
35750 goto out;
35751 }
35752
35753@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
35754 if (sarb->len < pdu_length) {
35755 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
35756 __func__, pdu_length, sarb->len, vcc);
35757- atomic_inc(&vcc->stats->rx_err);
35758+ atomic_inc_unchecked(&vcc->stats->rx_err);
35759 goto out;
35760 }
35761
35762 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
35763 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
35764 __func__, vcc);
35765- atomic_inc(&vcc->stats->rx_err);
35766+ atomic_inc_unchecked(&vcc->stats->rx_err);
35767 goto out;
35768 }
35769
35770@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
35771 if (printk_ratelimit())
35772 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
35773 __func__, length);
35774- atomic_inc(&vcc->stats->rx_drop);
35775+ atomic_inc_unchecked(&vcc->stats->rx_drop);
35776 goto out;
35777 }
35778
35779@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
35780
35781 vcc->push(vcc, skb);
35782
35783- atomic_inc(&vcc->stats->rx);
35784+ atomic_inc_unchecked(&vcc->stats->rx);
35785 out:
35786 skb_trim(sarb, 0);
35787 }
35788@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
35789 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
35790
35791 usbatm_pop(vcc, skb);
35792- atomic_inc(&vcc->stats->tx);
35793+ atomic_inc_unchecked(&vcc->stats->tx);
35794
35795 skb = skb_dequeue(&instance->sndqueue);
35796 }
35797@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
35798 if (!left--)
35799 return sprintf(page,
35800 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
35801- atomic_read(&atm_dev->stats.aal5.tx),
35802- atomic_read(&atm_dev->stats.aal5.tx_err),
35803- atomic_read(&atm_dev->stats.aal5.rx),
35804- atomic_read(&atm_dev->stats.aal5.rx_err),
35805- atomic_read(&atm_dev->stats.aal5.rx_drop));
35806+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
35807+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
35808+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
35809+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
35810+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
35811
35812 if (!left--) {
35813 if (instance->disconnected)
35814diff -urNp linux-2.6.32.43/drivers/usb/class/cdc-wdm.c linux-2.6.32.43/drivers/usb/class/cdc-wdm.c
35815--- linux-2.6.32.43/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
35816+++ linux-2.6.32.43/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
35817@@ -314,7 +314,7 @@ static ssize_t wdm_write
35818 if (r < 0)
35819 goto outnp;
35820
35821- if (!file->f_flags && O_NONBLOCK)
35822+ if (!(file->f_flags & O_NONBLOCK))
35823 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
35824 &desc->flags));
35825 else
35826diff -urNp linux-2.6.32.43/drivers/usb/core/hcd.c linux-2.6.32.43/drivers/usb/core/hcd.c
35827--- linux-2.6.32.43/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
35828+++ linux-2.6.32.43/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
35829@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
35830
35831 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35832
35833-struct usb_mon_operations *mon_ops;
35834+const struct usb_mon_operations *mon_ops;
35835
35836 /*
35837 * The registration is unlocked.
35838@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
35839 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
35840 */
35841
35842-int usb_mon_register (struct usb_mon_operations *ops)
35843+int usb_mon_register (const struct usb_mon_operations *ops)
35844 {
35845
35846 if (mon_ops)
35847diff -urNp linux-2.6.32.43/drivers/usb/core/hcd.h linux-2.6.32.43/drivers/usb/core/hcd.h
35848--- linux-2.6.32.43/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
35849+++ linux-2.6.32.43/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
35850@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
35851 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
35852
35853 struct usb_mon_operations {
35854- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
35855- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35856- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35857+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
35858+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
35859+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
35860 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
35861 };
35862
35863-extern struct usb_mon_operations *mon_ops;
35864+extern const struct usb_mon_operations *mon_ops;
35865
35866 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
35867 {
35868@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
35869 (*mon_ops->urb_complete)(bus, urb, status);
35870 }
35871
35872-int usb_mon_register(struct usb_mon_operations *ops);
35873+int usb_mon_register(const struct usb_mon_operations *ops);
35874 void usb_mon_deregister(void);
35875
35876 #else
35877diff -urNp linux-2.6.32.43/drivers/usb/core/message.c linux-2.6.32.43/drivers/usb/core/message.c
35878--- linux-2.6.32.43/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
35879+++ linux-2.6.32.43/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
35880@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
35881 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
35882 if (buf) {
35883 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
35884- if (len > 0) {
35885- smallbuf = kmalloc(++len, GFP_NOIO);
35886+ if (len++ > 0) {
35887+ smallbuf = kmalloc(len, GFP_NOIO);
35888 if (!smallbuf)
35889 return buf;
35890 memcpy(smallbuf, buf, len);
35891diff -urNp linux-2.6.32.43/drivers/usb/misc/appledisplay.c linux-2.6.32.43/drivers/usb/misc/appledisplay.c
35892--- linux-2.6.32.43/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
35893+++ linux-2.6.32.43/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
35894@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
35895 return pdata->msgdata[1];
35896 }
35897
35898-static struct backlight_ops appledisplay_bl_data = {
35899+static const struct backlight_ops appledisplay_bl_data = {
35900 .get_brightness = appledisplay_bl_get_brightness,
35901 .update_status = appledisplay_bl_update_status,
35902 };
35903diff -urNp linux-2.6.32.43/drivers/usb/mon/mon_main.c linux-2.6.32.43/drivers/usb/mon/mon_main.c
35904--- linux-2.6.32.43/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
35905+++ linux-2.6.32.43/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
35906@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
35907 /*
35908 * Ops
35909 */
35910-static struct usb_mon_operations mon_ops_0 = {
35911+static const struct usb_mon_operations mon_ops_0 = {
35912 .urb_submit = mon_submit,
35913 .urb_submit_error = mon_submit_error,
35914 .urb_complete = mon_complete,
35915diff -urNp linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h
35916--- linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
35917+++ linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
35918@@ -192,7 +192,7 @@ struct wahc {
35919 struct list_head xfer_delayed_list;
35920 spinlock_t xfer_list_lock;
35921 struct work_struct xfer_work;
35922- atomic_t xfer_id_count;
35923+ atomic_unchecked_t xfer_id_count;
35924 };
35925
35926
35927@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
35928 INIT_LIST_HEAD(&wa->xfer_delayed_list);
35929 spin_lock_init(&wa->xfer_list_lock);
35930 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
35931- atomic_set(&wa->xfer_id_count, 1);
35932+ atomic_set_unchecked(&wa->xfer_id_count, 1);
35933 }
35934
35935 /**
35936diff -urNp linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c
35937--- linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
35938+++ linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
35939@@ -293,7 +293,7 @@ out:
35940 */
35941 static void wa_xfer_id_init(struct wa_xfer *xfer)
35942 {
35943- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
35944+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
35945 }
35946
35947 /*
35948diff -urNp linux-2.6.32.43/drivers/uwb/wlp/messages.c linux-2.6.32.43/drivers/uwb/wlp/messages.c
35949--- linux-2.6.32.43/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
35950+++ linux-2.6.32.43/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
35951@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
35952 size_t len = skb->len;
35953 size_t used;
35954 ssize_t result;
35955- struct wlp_nonce enonce, rnonce;
35956+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
35957 enum wlp_assc_error assc_err;
35958 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
35959 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
35960diff -urNp linux-2.6.32.43/drivers/uwb/wlp/sysfs.c linux-2.6.32.43/drivers/uwb/wlp/sysfs.c
35961--- linux-2.6.32.43/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
35962+++ linux-2.6.32.43/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
35963@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
35964 return ret;
35965 }
35966
35967-static
35968-struct sysfs_ops wss_sysfs_ops = {
35969+static const struct sysfs_ops wss_sysfs_ops = {
35970 .show = wlp_wss_attr_show,
35971 .store = wlp_wss_attr_store,
35972 };
35973diff -urNp linux-2.6.32.43/drivers/video/atmel_lcdfb.c linux-2.6.32.43/drivers/video/atmel_lcdfb.c
35974--- linux-2.6.32.43/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
35975+++ linux-2.6.32.43/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
35976@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
35977 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
35978 }
35979
35980-static struct backlight_ops atmel_lcdc_bl_ops = {
35981+static const struct backlight_ops atmel_lcdc_bl_ops = {
35982 .update_status = atmel_bl_update_status,
35983 .get_brightness = atmel_bl_get_brightness,
35984 };
35985diff -urNp linux-2.6.32.43/drivers/video/aty/aty128fb.c linux-2.6.32.43/drivers/video/aty/aty128fb.c
35986--- linux-2.6.32.43/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
35987+++ linux-2.6.32.43/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
35988@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
35989 return bd->props.brightness;
35990 }
35991
35992-static struct backlight_ops aty128_bl_data = {
35993+static const struct backlight_ops aty128_bl_data = {
35994 .get_brightness = aty128_bl_get_brightness,
35995 .update_status = aty128_bl_update_status,
35996 };
35997diff -urNp linux-2.6.32.43/drivers/video/aty/atyfb_base.c linux-2.6.32.43/drivers/video/aty/atyfb_base.c
35998--- linux-2.6.32.43/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
35999+++ linux-2.6.32.43/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
36000@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
36001 return bd->props.brightness;
36002 }
36003
36004-static struct backlight_ops aty_bl_data = {
36005+static const struct backlight_ops aty_bl_data = {
36006 .get_brightness = aty_bl_get_brightness,
36007 .update_status = aty_bl_update_status,
36008 };
36009diff -urNp linux-2.6.32.43/drivers/video/aty/radeon_backlight.c linux-2.6.32.43/drivers/video/aty/radeon_backlight.c
36010--- linux-2.6.32.43/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
36011+++ linux-2.6.32.43/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
36012@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
36013 return bd->props.brightness;
36014 }
36015
36016-static struct backlight_ops radeon_bl_data = {
36017+static const struct backlight_ops radeon_bl_data = {
36018 .get_brightness = radeon_bl_get_brightness,
36019 .update_status = radeon_bl_update_status,
36020 };
36021diff -urNp linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c
36022--- linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
36023+++ linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
36024@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
36025 return error ? data->current_brightness : reg_val;
36026 }
36027
36028-static struct backlight_ops adp5520_bl_ops = {
36029+static const struct backlight_ops adp5520_bl_ops = {
36030 .update_status = adp5520_bl_update_status,
36031 .get_brightness = adp5520_bl_get_brightness,
36032 };
36033diff -urNp linux-2.6.32.43/drivers/video/backlight/adx_bl.c linux-2.6.32.43/drivers/video/backlight/adx_bl.c
36034--- linux-2.6.32.43/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
36035+++ linux-2.6.32.43/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
36036@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
36037 return 1;
36038 }
36039
36040-static struct backlight_ops adx_backlight_ops = {
36041+static const struct backlight_ops adx_backlight_ops = {
36042 .options = 0,
36043 .update_status = adx_backlight_update_status,
36044 .get_brightness = adx_backlight_get_brightness,
36045diff -urNp linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c
36046--- linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
36047+++ linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
36048@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
36049 return pwm_channel_enable(&pwmbl->pwmc);
36050 }
36051
36052-static struct backlight_ops atmel_pwm_bl_ops = {
36053+static const struct backlight_ops atmel_pwm_bl_ops = {
36054 .get_brightness = atmel_pwm_bl_get_intensity,
36055 .update_status = atmel_pwm_bl_set_intensity,
36056 };
36057diff -urNp linux-2.6.32.43/drivers/video/backlight/backlight.c linux-2.6.32.43/drivers/video/backlight/backlight.c
36058--- linux-2.6.32.43/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
36059+++ linux-2.6.32.43/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
36060@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
36061 * ERR_PTR() or a pointer to the newly allocated device.
36062 */
36063 struct backlight_device *backlight_device_register(const char *name,
36064- struct device *parent, void *devdata, struct backlight_ops *ops)
36065+ struct device *parent, void *devdata, const struct backlight_ops *ops)
36066 {
36067 struct backlight_device *new_bd;
36068 int rc;
36069diff -urNp linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c
36070--- linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
36071+++ linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
36072@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
36073 }
36074 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
36075
36076-static struct backlight_ops corgi_bl_ops = {
36077+static const struct backlight_ops corgi_bl_ops = {
36078 .get_brightness = corgi_bl_get_intensity,
36079 .update_status = corgi_bl_update_status,
36080 };
36081diff -urNp linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c
36082--- linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
36083+++ linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
36084@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
36085 return intensity;
36086 }
36087
36088-static struct backlight_ops cr_backlight_ops = {
36089+static const struct backlight_ops cr_backlight_ops = {
36090 .get_brightness = cr_backlight_get_intensity,
36091 .update_status = cr_backlight_set_intensity,
36092 };
36093diff -urNp linux-2.6.32.43/drivers/video/backlight/da903x_bl.c linux-2.6.32.43/drivers/video/backlight/da903x_bl.c
36094--- linux-2.6.32.43/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
36095+++ linux-2.6.32.43/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
36096@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
36097 return data->current_brightness;
36098 }
36099
36100-static struct backlight_ops da903x_backlight_ops = {
36101+static const struct backlight_ops da903x_backlight_ops = {
36102 .update_status = da903x_backlight_update_status,
36103 .get_brightness = da903x_backlight_get_brightness,
36104 };
36105diff -urNp linux-2.6.32.43/drivers/video/backlight/generic_bl.c linux-2.6.32.43/drivers/video/backlight/generic_bl.c
36106--- linux-2.6.32.43/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
36107+++ linux-2.6.32.43/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
36108@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
36109 }
36110 EXPORT_SYMBOL(corgibl_limit_intensity);
36111
36112-static struct backlight_ops genericbl_ops = {
36113+static const struct backlight_ops genericbl_ops = {
36114 .options = BL_CORE_SUSPENDRESUME,
36115 .get_brightness = genericbl_get_intensity,
36116 .update_status = genericbl_send_intensity,
36117diff -urNp linux-2.6.32.43/drivers/video/backlight/hp680_bl.c linux-2.6.32.43/drivers/video/backlight/hp680_bl.c
36118--- linux-2.6.32.43/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
36119+++ linux-2.6.32.43/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
36120@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
36121 return current_intensity;
36122 }
36123
36124-static struct backlight_ops hp680bl_ops = {
36125+static const struct backlight_ops hp680bl_ops = {
36126 .get_brightness = hp680bl_get_intensity,
36127 .update_status = hp680bl_set_intensity,
36128 };
36129diff -urNp linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c
36130--- linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
36131+++ linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
36132@@ -93,7 +93,7 @@ out:
36133 return ret;
36134 }
36135
36136-static struct backlight_ops jornada_bl_ops = {
36137+static const struct backlight_ops jornada_bl_ops = {
36138 .get_brightness = jornada_bl_get_brightness,
36139 .update_status = jornada_bl_update_status,
36140 .options = BL_CORE_SUSPENDRESUME,
36141diff -urNp linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c
36142--- linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
36143+++ linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
36144@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
36145 return kb3886bl_intensity;
36146 }
36147
36148-static struct backlight_ops kb3886bl_ops = {
36149+static const struct backlight_ops kb3886bl_ops = {
36150 .get_brightness = kb3886bl_get_intensity,
36151 .update_status = kb3886bl_send_intensity,
36152 };
36153diff -urNp linux-2.6.32.43/drivers/video/backlight/locomolcd.c linux-2.6.32.43/drivers/video/backlight/locomolcd.c
36154--- linux-2.6.32.43/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
36155+++ linux-2.6.32.43/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
36156@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
36157 return current_intensity;
36158 }
36159
36160-static struct backlight_ops locomobl_data = {
36161+static const struct backlight_ops locomobl_data = {
36162 .get_brightness = locomolcd_get_intensity,
36163 .update_status = locomolcd_set_intensity,
36164 };
36165diff -urNp linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c
36166--- linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
36167+++ linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
36168@@ -33,7 +33,7 @@ struct dmi_match_data {
36169 unsigned long iostart;
36170 unsigned long iolen;
36171 /* Backlight operations structure. */
36172- struct backlight_ops backlight_ops;
36173+ const struct backlight_ops backlight_ops;
36174 };
36175
36176 /* Module parameters. */
36177diff -urNp linux-2.6.32.43/drivers/video/backlight/omap1_bl.c linux-2.6.32.43/drivers/video/backlight/omap1_bl.c
36178--- linux-2.6.32.43/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
36179+++ linux-2.6.32.43/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
36180@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
36181 return bl->current_intensity;
36182 }
36183
36184-static struct backlight_ops omapbl_ops = {
36185+static const struct backlight_ops omapbl_ops = {
36186 .get_brightness = omapbl_get_intensity,
36187 .update_status = omapbl_update_status,
36188 };
36189diff -urNp linux-2.6.32.43/drivers/video/backlight/progear_bl.c linux-2.6.32.43/drivers/video/backlight/progear_bl.c
36190--- linux-2.6.32.43/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
36191+++ linux-2.6.32.43/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
36192@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
36193 return intensity - HW_LEVEL_MIN;
36194 }
36195
36196-static struct backlight_ops progearbl_ops = {
36197+static const struct backlight_ops progearbl_ops = {
36198 .get_brightness = progearbl_get_intensity,
36199 .update_status = progearbl_set_intensity,
36200 };
36201diff -urNp linux-2.6.32.43/drivers/video/backlight/pwm_bl.c linux-2.6.32.43/drivers/video/backlight/pwm_bl.c
36202--- linux-2.6.32.43/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
36203+++ linux-2.6.32.43/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
36204@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
36205 return bl->props.brightness;
36206 }
36207
36208-static struct backlight_ops pwm_backlight_ops = {
36209+static const struct backlight_ops pwm_backlight_ops = {
36210 .update_status = pwm_backlight_update_status,
36211 .get_brightness = pwm_backlight_get_brightness,
36212 };
36213diff -urNp linux-2.6.32.43/drivers/video/backlight/tosa_bl.c linux-2.6.32.43/drivers/video/backlight/tosa_bl.c
36214--- linux-2.6.32.43/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
36215+++ linux-2.6.32.43/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
36216@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
36217 return props->brightness;
36218 }
36219
36220-static struct backlight_ops bl_ops = {
36221+static const struct backlight_ops bl_ops = {
36222 .get_brightness = tosa_bl_get_brightness,
36223 .update_status = tosa_bl_update_status,
36224 };
36225diff -urNp linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c
36226--- linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
36227+++ linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
36228@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
36229 return data->current_brightness;
36230 }
36231
36232-static struct backlight_ops wm831x_backlight_ops = {
36233+static const struct backlight_ops wm831x_backlight_ops = {
36234 .options = BL_CORE_SUSPENDRESUME,
36235 .update_status = wm831x_backlight_update_status,
36236 .get_brightness = wm831x_backlight_get_brightness,
36237diff -urNp linux-2.6.32.43/drivers/video/bf54x-lq043fb.c linux-2.6.32.43/drivers/video/bf54x-lq043fb.c
36238--- linux-2.6.32.43/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
36239+++ linux-2.6.32.43/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
36240@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
36241 return 0;
36242 }
36243
36244-static struct backlight_ops bfin_lq043fb_bl_ops = {
36245+static const struct backlight_ops bfin_lq043fb_bl_ops = {
36246 .get_brightness = bl_get_brightness,
36247 };
36248
36249diff -urNp linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c
36250--- linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
36251+++ linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
36252@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
36253 return 0;
36254 }
36255
36256-static struct backlight_ops bfin_lq043fb_bl_ops = {
36257+static const struct backlight_ops bfin_lq043fb_bl_ops = {
36258 .get_brightness = bl_get_brightness,
36259 };
36260
36261diff -urNp linux-2.6.32.43/drivers/video/fbcmap.c linux-2.6.32.43/drivers/video/fbcmap.c
36262--- linux-2.6.32.43/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
36263+++ linux-2.6.32.43/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
36264@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
36265 rc = -ENODEV;
36266 goto out;
36267 }
36268- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
36269- !info->fbops->fb_setcmap)) {
36270+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
36271 rc = -EINVAL;
36272 goto out1;
36273 }
36274diff -urNp linux-2.6.32.43/drivers/video/fbmem.c linux-2.6.32.43/drivers/video/fbmem.c
36275--- linux-2.6.32.43/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
36276+++ linux-2.6.32.43/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
36277@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
36278 image->dx += image->width + 8;
36279 }
36280 } else if (rotate == FB_ROTATE_UD) {
36281- for (x = 0; x < num && image->dx >= 0; x++) {
36282+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
36283 info->fbops->fb_imageblit(info, image);
36284 image->dx -= image->width + 8;
36285 }
36286@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
36287 image->dy += image->height + 8;
36288 }
36289 } else if (rotate == FB_ROTATE_CCW) {
36290- for (x = 0; x < num && image->dy >= 0; x++) {
36291+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
36292 info->fbops->fb_imageblit(info, image);
36293 image->dy -= image->height + 8;
36294 }
36295@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
36296 int flags = info->flags;
36297 int ret = 0;
36298
36299+ pax_track_stack();
36300+
36301 if (var->activate & FB_ACTIVATE_INV_MODE) {
36302 struct fb_videomode mode1, mode2;
36303
36304@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
36305 void __user *argp = (void __user *)arg;
36306 long ret = 0;
36307
36308+ pax_track_stack();
36309+
36310 switch (cmd) {
36311 case FBIOGET_VSCREENINFO:
36312 if (!lock_fb_info(info))
36313@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
36314 return -EFAULT;
36315 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
36316 return -EINVAL;
36317- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
36318+ if (con2fb.framebuffer >= FB_MAX)
36319 return -EINVAL;
36320 if (!registered_fb[con2fb.framebuffer])
36321 request_module("fb%d", con2fb.framebuffer);
36322diff -urNp linux-2.6.32.43/drivers/video/i810/i810_accel.c linux-2.6.32.43/drivers/video/i810/i810_accel.c
36323--- linux-2.6.32.43/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
36324+++ linux-2.6.32.43/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
36325@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
36326 }
36327 }
36328 printk("ringbuffer lockup!!!\n");
36329+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
36330 i810_report_error(mmio);
36331 par->dev_flags |= LOCKUP;
36332 info->pixmap.scan_align = 1;
36333diff -urNp linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c
36334--- linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
36335+++ linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
36336@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
36337 return bd->props.brightness;
36338 }
36339
36340-static struct backlight_ops nvidia_bl_ops = {
36341+static const struct backlight_ops nvidia_bl_ops = {
36342 .get_brightness = nvidia_bl_get_brightness,
36343 .update_status = nvidia_bl_update_status,
36344 };
36345diff -urNp linux-2.6.32.43/drivers/video/riva/fbdev.c linux-2.6.32.43/drivers/video/riva/fbdev.c
36346--- linux-2.6.32.43/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
36347+++ linux-2.6.32.43/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
36348@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
36349 return bd->props.brightness;
36350 }
36351
36352-static struct backlight_ops riva_bl_ops = {
36353+static const struct backlight_ops riva_bl_ops = {
36354 .get_brightness = riva_bl_get_brightness,
36355 .update_status = riva_bl_update_status,
36356 };
36357diff -urNp linux-2.6.32.43/drivers/video/uvesafb.c linux-2.6.32.43/drivers/video/uvesafb.c
36358--- linux-2.6.32.43/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
36359+++ linux-2.6.32.43/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
36360@@ -18,6 +18,7 @@
36361 #include <linux/fb.h>
36362 #include <linux/io.h>
36363 #include <linux/mutex.h>
36364+#include <linux/moduleloader.h>
36365 #include <video/edid.h>
36366 #include <video/uvesafb.h>
36367 #ifdef CONFIG_X86
36368@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
36369 NULL,
36370 };
36371
36372- return call_usermodehelper(v86d_path, argv, envp, 1);
36373+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
36374 }
36375
36376 /*
36377@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
36378 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
36379 par->pmi_setpal = par->ypan = 0;
36380 } else {
36381+
36382+#ifdef CONFIG_PAX_KERNEXEC
36383+#ifdef CONFIG_MODULES
36384+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
36385+#endif
36386+ if (!par->pmi_code) {
36387+ par->pmi_setpal = par->ypan = 0;
36388+ return 0;
36389+ }
36390+#endif
36391+
36392 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
36393 + task->t.regs.edi);
36394+
36395+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36396+ pax_open_kernel();
36397+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
36398+ pax_close_kernel();
36399+
36400+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
36401+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
36402+#else
36403 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
36404 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
36405+#endif
36406+
36407 printk(KERN_INFO "uvesafb: protected mode interface info at "
36408 "%04x:%04x\n",
36409 (u16)task->t.regs.es, (u16)task->t.regs.edi);
36410@@ -1799,6 +1822,11 @@ out:
36411 if (par->vbe_modes)
36412 kfree(par->vbe_modes);
36413
36414+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36415+ if (par->pmi_code)
36416+ module_free_exec(NULL, par->pmi_code);
36417+#endif
36418+
36419 framebuffer_release(info);
36420 return err;
36421 }
36422@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
36423 kfree(par->vbe_state_orig);
36424 if (par->vbe_state_saved)
36425 kfree(par->vbe_state_saved);
36426+
36427+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36428+ if (par->pmi_code)
36429+ module_free_exec(NULL, par->pmi_code);
36430+#endif
36431+
36432 }
36433
36434 framebuffer_release(info);
36435diff -urNp linux-2.6.32.43/drivers/video/vesafb.c linux-2.6.32.43/drivers/video/vesafb.c
36436--- linux-2.6.32.43/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
36437+++ linux-2.6.32.43/drivers/video/vesafb.c 2011-04-17 15:56:46.000000000 -0400
36438@@ -9,6 +9,7 @@
36439 */
36440
36441 #include <linux/module.h>
36442+#include <linux/moduleloader.h>
36443 #include <linux/kernel.h>
36444 #include <linux/errno.h>
36445 #include <linux/string.h>
36446@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
36447 static int vram_total __initdata; /* Set total amount of memory */
36448 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
36449 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
36450-static void (*pmi_start)(void) __read_mostly;
36451-static void (*pmi_pal) (void) __read_mostly;
36452+static void (*pmi_start)(void) __read_only;
36453+static void (*pmi_pal) (void) __read_only;
36454 static int depth __read_mostly;
36455 static int vga_compat __read_mostly;
36456 /* --------------------------------------------------------------------- */
36457@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
36458 unsigned int size_vmode;
36459 unsigned int size_remap;
36460 unsigned int size_total;
36461+ void *pmi_code = NULL;
36462
36463 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
36464 return -ENODEV;
36465@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
36466 size_remap = size_total;
36467 vesafb_fix.smem_len = size_remap;
36468
36469-#ifndef __i386__
36470- screen_info.vesapm_seg = 0;
36471-#endif
36472-
36473 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
36474 printk(KERN_WARNING
36475 "vesafb: cannot reserve video memory at 0x%lx\n",
36476@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
36477 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
36478 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
36479
36480+#ifdef __i386__
36481+
36482+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36483+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
36484+ if (!pmi_code)
36485+#elif !defined(CONFIG_PAX_KERNEXEC)
36486+ if (0)
36487+#endif
36488+
36489+#endif
36490+ screen_info.vesapm_seg = 0;
36491+
36492 if (screen_info.vesapm_seg) {
36493- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
36494- screen_info.vesapm_seg,screen_info.vesapm_off);
36495+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
36496+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
36497 }
36498
36499 if (screen_info.vesapm_seg < 0xc000)
36500@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
36501
36502 if (ypan || pmi_setpal) {
36503 unsigned short *pmi_base;
36504- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36505- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
36506- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
36507+
36508+ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
36509+
36510+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36511+ pax_open_kernel();
36512+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
36513+#else
36514+ pmi_code = pmi_base;
36515+#endif
36516+
36517+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
36518+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
36519+
36520+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36521+ pmi_start = ktva_ktla(pmi_start);
36522+ pmi_pal = ktva_ktla(pmi_pal);
36523+ pax_close_kernel();
36524+#endif
36525+
36526 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
36527 if (pmi_base[3]) {
36528 printk(KERN_INFO "vesafb: pmi: ports = ");
36529@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
36530 info->node, info->fix.id);
36531 return 0;
36532 err:
36533+
36534+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
36535+ module_free_exec(NULL, pmi_code);
36536+#endif
36537+
36538 if (info->screen_base)
36539 iounmap(info->screen_base);
36540 framebuffer_release(info);
36541diff -urNp linux-2.6.32.43/drivers/xen/sys-hypervisor.c linux-2.6.32.43/drivers/xen/sys-hypervisor.c
36542--- linux-2.6.32.43/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
36543+++ linux-2.6.32.43/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
36544@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
36545 return 0;
36546 }
36547
36548-static struct sysfs_ops hyp_sysfs_ops = {
36549+static const struct sysfs_ops hyp_sysfs_ops = {
36550 .show = hyp_sysfs_show,
36551 .store = hyp_sysfs_store,
36552 };
36553diff -urNp linux-2.6.32.43/fs/9p/vfs_inode.c linux-2.6.32.43/fs/9p/vfs_inode.c
36554--- linux-2.6.32.43/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
36555+++ linux-2.6.32.43/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
36556@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
36557 static void
36558 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
36559 {
36560- char *s = nd_get_link(nd);
36561+ const char *s = nd_get_link(nd);
36562
36563 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
36564 IS_ERR(s) ? "<error>" : s);
36565diff -urNp linux-2.6.32.43/fs/aio.c linux-2.6.32.43/fs/aio.c
36566--- linux-2.6.32.43/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
36567+++ linux-2.6.32.43/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
36568@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
36569 size += sizeof(struct io_event) * nr_events;
36570 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
36571
36572- if (nr_pages < 0)
36573+ if (nr_pages <= 0)
36574 return -EINVAL;
36575
36576 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
36577@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
36578 struct aio_timeout to;
36579 int retry = 0;
36580
36581+ pax_track_stack();
36582+
36583 /* needed to zero any padding within an entry (there shouldn't be
36584 * any, but C is fun!
36585 */
36586@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
36587 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
36588 {
36589 ssize_t ret;
36590+ struct iovec iovstack;
36591
36592 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
36593 kiocb->ki_nbytes, 1,
36594- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
36595+ &iovstack, &kiocb->ki_iovec);
36596 if (ret < 0)
36597 goto out;
36598
36599+ if (kiocb->ki_iovec == &iovstack) {
36600+ kiocb->ki_inline_vec = iovstack;
36601+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
36602+ }
36603 kiocb->ki_nr_segs = kiocb->ki_nbytes;
36604 kiocb->ki_cur_seg = 0;
36605 /* ki_nbytes/left now reflect bytes instead of segs */
36606diff -urNp linux-2.6.32.43/fs/attr.c linux-2.6.32.43/fs/attr.c
36607--- linux-2.6.32.43/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
36608+++ linux-2.6.32.43/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
36609@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
36610 unsigned long limit;
36611
36612 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
36613+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
36614 if (limit != RLIM_INFINITY && offset > limit)
36615 goto out_sig;
36616 if (offset > inode->i_sb->s_maxbytes)
36617diff -urNp linux-2.6.32.43/fs/autofs/root.c linux-2.6.32.43/fs/autofs/root.c
36618--- linux-2.6.32.43/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
36619+++ linux-2.6.32.43/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
36620@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
36621 set_bit(n,sbi->symlink_bitmap);
36622 sl = &sbi->symlink[n];
36623 sl->len = strlen(symname);
36624- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
36625+ slsize = sl->len+1;
36626+ sl->data = kmalloc(slsize, GFP_KERNEL);
36627 if (!sl->data) {
36628 clear_bit(n,sbi->symlink_bitmap);
36629 unlock_kernel();
36630diff -urNp linux-2.6.32.43/fs/autofs4/symlink.c linux-2.6.32.43/fs/autofs4/symlink.c
36631--- linux-2.6.32.43/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
36632+++ linux-2.6.32.43/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
36633@@ -15,7 +15,7 @@
36634 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
36635 {
36636 struct autofs_info *ino = autofs4_dentry_ino(dentry);
36637- nd_set_link(nd, (char *)ino->u.symlink);
36638+ nd_set_link(nd, ino->u.symlink);
36639 return NULL;
36640 }
36641
36642diff -urNp linux-2.6.32.43/fs/befs/linuxvfs.c linux-2.6.32.43/fs/befs/linuxvfs.c
36643--- linux-2.6.32.43/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
36644+++ linux-2.6.32.43/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
36645@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
36646 {
36647 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
36648 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
36649- char *link = nd_get_link(nd);
36650+ const char *link = nd_get_link(nd);
36651 if (!IS_ERR(link))
36652 kfree(link);
36653 }
36654diff -urNp linux-2.6.32.43/fs/binfmt_aout.c linux-2.6.32.43/fs/binfmt_aout.c
36655--- linux-2.6.32.43/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
36656+++ linux-2.6.32.43/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
36657@@ -16,6 +16,7 @@
36658 #include <linux/string.h>
36659 #include <linux/fs.h>
36660 #include <linux/file.h>
36661+#include <linux/security.h>
36662 #include <linux/stat.h>
36663 #include <linux/fcntl.h>
36664 #include <linux/ptrace.h>
36665@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
36666 #endif
36667 # define START_STACK(u) (u.start_stack)
36668
36669+ memset(&dump, 0, sizeof(dump));
36670+
36671 fs = get_fs();
36672 set_fs(KERNEL_DS);
36673 has_dumped = 1;
36674@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
36675
36676 /* If the size of the dump file exceeds the rlimit, then see what would happen
36677 if we wrote the stack, but not the data area. */
36678+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
36679 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
36680 dump.u_dsize = 0;
36681
36682 /* Make sure we have enough room to write the stack and data areas. */
36683+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
36684 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
36685 dump.u_ssize = 0;
36686
36687@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
36688 dump_size = dump.u_ssize << PAGE_SHIFT;
36689 DUMP_WRITE(dump_start,dump_size);
36690 }
36691-/* Finally dump the task struct. Not be used by gdb, but could be useful */
36692- set_fs(KERNEL_DS);
36693- DUMP_WRITE(current,sizeof(*current));
36694+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
36695 end_coredump:
36696 set_fs(fs);
36697 return has_dumped;
36698@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
36699 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
36700 if (rlim >= RLIM_INFINITY)
36701 rlim = ~0;
36702+
36703+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
36704 if (ex.a_data + ex.a_bss > rlim)
36705 return -ENOMEM;
36706
36707@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
36708 install_exec_creds(bprm);
36709 current->flags &= ~PF_FORKNOEXEC;
36710
36711+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
36712+ current->mm->pax_flags = 0UL;
36713+#endif
36714+
36715+#ifdef CONFIG_PAX_PAGEEXEC
36716+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
36717+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
36718+
36719+#ifdef CONFIG_PAX_EMUTRAMP
36720+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
36721+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
36722+#endif
36723+
36724+#ifdef CONFIG_PAX_MPROTECT
36725+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
36726+ current->mm->pax_flags |= MF_PAX_MPROTECT;
36727+#endif
36728+
36729+ }
36730+#endif
36731+
36732 if (N_MAGIC(ex) == OMAGIC) {
36733 unsigned long text_addr, map_size;
36734 loff_t pos;
36735@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
36736
36737 down_write(&current->mm->mmap_sem);
36738 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
36739- PROT_READ | PROT_WRITE | PROT_EXEC,
36740+ PROT_READ | PROT_WRITE,
36741 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
36742 fd_offset + ex.a_text);
36743 up_write(&current->mm->mmap_sem);
36744diff -urNp linux-2.6.32.43/fs/binfmt_elf.c linux-2.6.32.43/fs/binfmt_elf.c
36745--- linux-2.6.32.43/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
36746+++ linux-2.6.32.43/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
36747@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
36748 #define elf_core_dump NULL
36749 #endif
36750
36751+#ifdef CONFIG_PAX_MPROTECT
36752+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
36753+#endif
36754+
36755 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
36756 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
36757 #else
36758@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
36759 .load_binary = load_elf_binary,
36760 .load_shlib = load_elf_library,
36761 .core_dump = elf_core_dump,
36762+
36763+#ifdef CONFIG_PAX_MPROTECT
36764+ .handle_mprotect= elf_handle_mprotect,
36765+#endif
36766+
36767 .min_coredump = ELF_EXEC_PAGESIZE,
36768 .hasvdso = 1
36769 };
36770@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
36771
36772 static int set_brk(unsigned long start, unsigned long end)
36773 {
36774+ unsigned long e = end;
36775+
36776 start = ELF_PAGEALIGN(start);
36777 end = ELF_PAGEALIGN(end);
36778 if (end > start) {
36779@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
36780 if (BAD_ADDR(addr))
36781 return addr;
36782 }
36783- current->mm->start_brk = current->mm->brk = end;
36784+ current->mm->start_brk = current->mm->brk = e;
36785 return 0;
36786 }
36787
36788@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
36789 elf_addr_t __user *u_rand_bytes;
36790 const char *k_platform = ELF_PLATFORM;
36791 const char *k_base_platform = ELF_BASE_PLATFORM;
36792- unsigned char k_rand_bytes[16];
36793+ u32 k_rand_bytes[4];
36794 int items;
36795 elf_addr_t *elf_info;
36796 int ei_index = 0;
36797 const struct cred *cred = current_cred();
36798 struct vm_area_struct *vma;
36799+ unsigned long saved_auxv[AT_VECTOR_SIZE];
36800+
36801+ pax_track_stack();
36802
36803 /*
36804 * In some cases (e.g. Hyper-Threading), we want to avoid L1
36805@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
36806 * Generate 16 random bytes for userspace PRNG seeding.
36807 */
36808 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
36809- u_rand_bytes = (elf_addr_t __user *)
36810- STACK_ALLOC(p, sizeof(k_rand_bytes));
36811+ srandom32(k_rand_bytes[0] ^ random32());
36812+ srandom32(k_rand_bytes[1] ^ random32());
36813+ srandom32(k_rand_bytes[2] ^ random32());
36814+ srandom32(k_rand_bytes[3] ^ random32());
36815+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
36816+ u_rand_bytes = (elf_addr_t __user *) p;
36817 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
36818 return -EFAULT;
36819
36820@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
36821 return -EFAULT;
36822 current->mm->env_end = p;
36823
36824+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
36825+
36826 /* Put the elf_info on the stack in the right place. */
36827 sp = (elf_addr_t __user *)envp + 1;
36828- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
36829+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
36830 return -EFAULT;
36831 return 0;
36832 }
36833@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
36834 {
36835 struct elf_phdr *elf_phdata;
36836 struct elf_phdr *eppnt;
36837- unsigned long load_addr = 0;
36838+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
36839 int load_addr_set = 0;
36840 unsigned long last_bss = 0, elf_bss = 0;
36841- unsigned long error = ~0UL;
36842+ unsigned long error = -EINVAL;
36843 unsigned long total_size;
36844 int retval, i, size;
36845
36846@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
36847 goto out_close;
36848 }
36849
36850+#ifdef CONFIG_PAX_SEGMEXEC
36851+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
36852+ pax_task_size = SEGMEXEC_TASK_SIZE;
36853+#endif
36854+
36855 eppnt = elf_phdata;
36856 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
36857 if (eppnt->p_type == PT_LOAD) {
36858@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
36859 k = load_addr + eppnt->p_vaddr;
36860 if (BAD_ADDR(k) ||
36861 eppnt->p_filesz > eppnt->p_memsz ||
36862- eppnt->p_memsz > TASK_SIZE ||
36863- TASK_SIZE - eppnt->p_memsz < k) {
36864+ eppnt->p_memsz > pax_task_size ||
36865+ pax_task_size - eppnt->p_memsz < k) {
36866 error = -ENOMEM;
36867 goto out_close;
36868 }
36869@@ -532,6 +557,194 @@ out:
36870 return error;
36871 }
36872
36873+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
36874+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
36875+{
36876+ unsigned long pax_flags = 0UL;
36877+
36878+#ifdef CONFIG_PAX_PAGEEXEC
36879+ if (elf_phdata->p_flags & PF_PAGEEXEC)
36880+ pax_flags |= MF_PAX_PAGEEXEC;
36881+#endif
36882+
36883+#ifdef CONFIG_PAX_SEGMEXEC
36884+ if (elf_phdata->p_flags & PF_SEGMEXEC)
36885+ pax_flags |= MF_PAX_SEGMEXEC;
36886+#endif
36887+
36888+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36889+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36890+ if (nx_enabled)
36891+ pax_flags &= ~MF_PAX_SEGMEXEC;
36892+ else
36893+ pax_flags &= ~MF_PAX_PAGEEXEC;
36894+ }
36895+#endif
36896+
36897+#ifdef CONFIG_PAX_EMUTRAMP
36898+ if (elf_phdata->p_flags & PF_EMUTRAMP)
36899+ pax_flags |= MF_PAX_EMUTRAMP;
36900+#endif
36901+
36902+#ifdef CONFIG_PAX_MPROTECT
36903+ if (elf_phdata->p_flags & PF_MPROTECT)
36904+ pax_flags |= MF_PAX_MPROTECT;
36905+#endif
36906+
36907+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36908+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
36909+ pax_flags |= MF_PAX_RANDMMAP;
36910+#endif
36911+
36912+ return pax_flags;
36913+}
36914+#endif
36915+
36916+#ifdef CONFIG_PAX_PT_PAX_FLAGS
36917+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
36918+{
36919+ unsigned long pax_flags = 0UL;
36920+
36921+#ifdef CONFIG_PAX_PAGEEXEC
36922+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
36923+ pax_flags |= MF_PAX_PAGEEXEC;
36924+#endif
36925+
36926+#ifdef CONFIG_PAX_SEGMEXEC
36927+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
36928+ pax_flags |= MF_PAX_SEGMEXEC;
36929+#endif
36930+
36931+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36932+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36933+ if (nx_enabled)
36934+ pax_flags &= ~MF_PAX_SEGMEXEC;
36935+ else
36936+ pax_flags &= ~MF_PAX_PAGEEXEC;
36937+ }
36938+#endif
36939+
36940+#ifdef CONFIG_PAX_EMUTRAMP
36941+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
36942+ pax_flags |= MF_PAX_EMUTRAMP;
36943+#endif
36944+
36945+#ifdef CONFIG_PAX_MPROTECT
36946+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
36947+ pax_flags |= MF_PAX_MPROTECT;
36948+#endif
36949+
36950+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
36951+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
36952+ pax_flags |= MF_PAX_RANDMMAP;
36953+#endif
36954+
36955+ return pax_flags;
36956+}
36957+#endif
36958+
36959+#ifdef CONFIG_PAX_EI_PAX
36960+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
36961+{
36962+ unsigned long pax_flags = 0UL;
36963+
36964+#ifdef CONFIG_PAX_PAGEEXEC
36965+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
36966+ pax_flags |= MF_PAX_PAGEEXEC;
36967+#endif
36968+
36969+#ifdef CONFIG_PAX_SEGMEXEC
36970+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
36971+ pax_flags |= MF_PAX_SEGMEXEC;
36972+#endif
36973+
36974+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
36975+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
36976+ if (nx_enabled)
36977+ pax_flags &= ~MF_PAX_SEGMEXEC;
36978+ else
36979+ pax_flags &= ~MF_PAX_PAGEEXEC;
36980+ }
36981+#endif
36982+
36983+#ifdef CONFIG_PAX_EMUTRAMP
36984+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
36985+ pax_flags |= MF_PAX_EMUTRAMP;
36986+#endif
36987+
36988+#ifdef CONFIG_PAX_MPROTECT
36989+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
36990+ pax_flags |= MF_PAX_MPROTECT;
36991+#endif
36992+
36993+#ifdef CONFIG_PAX_ASLR
36994+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
36995+ pax_flags |= MF_PAX_RANDMMAP;
36996+#endif
36997+
36998+ return pax_flags;
36999+}
37000+#endif
37001+
37002+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37003+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
37004+{
37005+ unsigned long pax_flags = 0UL;
37006+
37007+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37008+ unsigned long i;
37009+ int found_flags = 0;
37010+#endif
37011+
37012+#ifdef CONFIG_PAX_EI_PAX
37013+ pax_flags = pax_parse_ei_pax(elf_ex);
37014+#endif
37015+
37016+#ifdef CONFIG_PAX_PT_PAX_FLAGS
37017+ for (i = 0UL; i < elf_ex->e_phnum; i++)
37018+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
37019+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
37020+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
37021+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
37022+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
37023+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
37024+ return -EINVAL;
37025+
37026+#ifdef CONFIG_PAX_SOFTMODE
37027+ if (pax_softmode)
37028+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
37029+ else
37030+#endif
37031+
37032+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
37033+ found_flags = 1;
37034+ break;
37035+ }
37036+#endif
37037+
37038+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
37039+ if (found_flags == 0) {
37040+ struct elf_phdr phdr;
37041+ memset(&phdr, 0, sizeof(phdr));
37042+ phdr.p_flags = PF_NOEMUTRAMP;
37043+#ifdef CONFIG_PAX_SOFTMODE
37044+ if (pax_softmode)
37045+ pax_flags = pax_parse_softmode(&phdr);
37046+ else
37047+#endif
37048+ pax_flags = pax_parse_hardmode(&phdr);
37049+ }
37050+#endif
37051+
37052+
37053+ if (0 > pax_check_flags(&pax_flags))
37054+ return -EINVAL;
37055+
37056+ current->mm->pax_flags = pax_flags;
37057+ return 0;
37058+}
37059+#endif
37060+
37061 /*
37062 * These are the functions used to load ELF style executables and shared
37063 * libraries. There is no binary dependent code anywhere else.
37064@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
37065 {
37066 unsigned int random_variable = 0;
37067
37068+#ifdef CONFIG_PAX_RANDUSTACK
37069+ if (randomize_va_space)
37070+ return stack_top - current->mm->delta_stack;
37071+#endif
37072+
37073 if ((current->flags & PF_RANDOMIZE) &&
37074 !(current->personality & ADDR_NO_RANDOMIZE)) {
37075 random_variable = get_random_int() & STACK_RND_MASK;
37076@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
37077 unsigned long load_addr = 0, load_bias = 0;
37078 int load_addr_set = 0;
37079 char * elf_interpreter = NULL;
37080- unsigned long error;
37081+ unsigned long error = 0;
37082 struct elf_phdr *elf_ppnt, *elf_phdata;
37083 unsigned long elf_bss, elf_brk;
37084 int retval, i;
37085@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
37086 unsigned long start_code, end_code, start_data, end_data;
37087 unsigned long reloc_func_desc = 0;
37088 int executable_stack = EXSTACK_DEFAULT;
37089- unsigned long def_flags = 0;
37090 struct {
37091 struct elfhdr elf_ex;
37092 struct elfhdr interp_elf_ex;
37093 } *loc;
37094+ unsigned long pax_task_size = TASK_SIZE;
37095
37096 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
37097 if (!loc) {
37098@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
37099
37100 /* OK, This is the point of no return */
37101 current->flags &= ~PF_FORKNOEXEC;
37102- current->mm->def_flags = def_flags;
37103+
37104+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
37105+ current->mm->pax_flags = 0UL;
37106+#endif
37107+
37108+#ifdef CONFIG_PAX_DLRESOLVE
37109+ current->mm->call_dl_resolve = 0UL;
37110+#endif
37111+
37112+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
37113+ current->mm->call_syscall = 0UL;
37114+#endif
37115+
37116+#ifdef CONFIG_PAX_ASLR
37117+ current->mm->delta_mmap = 0UL;
37118+ current->mm->delta_stack = 0UL;
37119+#endif
37120+
37121+ current->mm->def_flags = 0;
37122+
37123+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
37124+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
37125+ send_sig(SIGKILL, current, 0);
37126+ goto out_free_dentry;
37127+ }
37128+#endif
37129+
37130+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
37131+ pax_set_initial_flags(bprm);
37132+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
37133+ if (pax_set_initial_flags_func)
37134+ (pax_set_initial_flags_func)(bprm);
37135+#endif
37136+
37137+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
37138+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
37139+ current->mm->context.user_cs_limit = PAGE_SIZE;
37140+ current->mm->def_flags |= VM_PAGEEXEC;
37141+ }
37142+#endif
37143+
37144+#ifdef CONFIG_PAX_SEGMEXEC
37145+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
37146+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
37147+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
37148+ pax_task_size = SEGMEXEC_TASK_SIZE;
37149+ }
37150+#endif
37151+
37152+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
37153+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37154+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
37155+ put_cpu();
37156+ }
37157+#endif
37158
37159 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
37160 may depend on the personality. */
37161 SET_PERSONALITY(loc->elf_ex);
37162+
37163+#ifdef CONFIG_PAX_ASLR
37164+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
37165+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
37166+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
37167+ }
37168+#endif
37169+
37170+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
37171+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
37172+ executable_stack = EXSTACK_DISABLE_X;
37173+ current->personality &= ~READ_IMPLIES_EXEC;
37174+ } else
37175+#endif
37176+
37177 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
37178 current->personality |= READ_IMPLIES_EXEC;
37179
37180@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
37181 #else
37182 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
37183 #endif
37184+
37185+#ifdef CONFIG_PAX_RANDMMAP
37186+ /* PaX: randomize base address at the default exe base if requested */
37187+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
37188+#ifdef CONFIG_SPARC64
37189+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
37190+#else
37191+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
37192+#endif
37193+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
37194+ elf_flags |= MAP_FIXED;
37195+ }
37196+#endif
37197+
37198 }
37199
37200 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
37201@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
37202 * allowed task size. Note that p_filesz must always be
37203 * <= p_memsz so it is only necessary to check p_memsz.
37204 */
37205- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37206- elf_ppnt->p_memsz > TASK_SIZE ||
37207- TASK_SIZE - elf_ppnt->p_memsz < k) {
37208+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
37209+ elf_ppnt->p_memsz > pax_task_size ||
37210+ pax_task_size - elf_ppnt->p_memsz < k) {
37211 /* set_brk can never work. Avoid overflows. */
37212 send_sig(SIGKILL, current, 0);
37213 retval = -EINVAL;
37214@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
37215 start_data += load_bias;
37216 end_data += load_bias;
37217
37218+#ifdef CONFIG_PAX_RANDMMAP
37219+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
37220+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
37221+#endif
37222+
37223 /* Calling set_brk effectively mmaps the pages that we need
37224 * for the bss and break sections. We must do this before
37225 * mapping in the interpreter, to make sure it doesn't wind
37226@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
37227 goto out_free_dentry;
37228 }
37229 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
37230- send_sig(SIGSEGV, current, 0);
37231- retval = -EFAULT; /* Nobody gets to see this, but.. */
37232- goto out_free_dentry;
37233+ /*
37234+ * This bss-zeroing can fail if the ELF
37235+ * file specifies odd protections. So
37236+ * we don't check the return value
37237+ */
37238 }
37239
37240 if (elf_interpreter) {
37241@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
37242 unsigned long n = off;
37243 if (n > PAGE_SIZE)
37244 n = PAGE_SIZE;
37245- if (!dump_write(file, buf, n))
37246+ if (!dump_write(file, buf, n)) {
37247+ free_page((unsigned long)buf);
37248 return 0;
37249+ }
37250 off -= n;
37251 }
37252 free_page((unsigned long)buf);
37253@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
37254 * Decide what to dump of a segment, part, all or none.
37255 */
37256 static unsigned long vma_dump_size(struct vm_area_struct *vma,
37257- unsigned long mm_flags)
37258+ unsigned long mm_flags, long signr)
37259 {
37260 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
37261
37262@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
37263 if (vma->vm_file == NULL)
37264 return 0;
37265
37266- if (FILTER(MAPPED_PRIVATE))
37267+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
37268 goto whole;
37269
37270 /*
37271@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
37272 #undef DUMP_WRITE
37273
37274 #define DUMP_WRITE(addr, nr) \
37275+ do { \
37276+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
37277 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
37278- goto end_coredump;
37279+ goto end_coredump; \
37280+ } while (0);
37281
37282 static void fill_elf_header(struct elfhdr *elf, int segs,
37283 u16 machine, u32 flags, u8 osabi)
37284@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
37285 {
37286 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
37287 int i = 0;
37288- do
37289+ do {
37290 i += 2;
37291- while (auxv[i - 2] != AT_NULL);
37292+ } while (auxv[i - 2] != AT_NULL);
37293 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
37294 }
37295
37296@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
37297 phdr.p_offset = offset;
37298 phdr.p_vaddr = vma->vm_start;
37299 phdr.p_paddr = 0;
37300- phdr.p_filesz = vma_dump_size(vma, mm_flags);
37301+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
37302 phdr.p_memsz = vma->vm_end - vma->vm_start;
37303 offset += phdr.p_filesz;
37304 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
37305@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
37306 unsigned long addr;
37307 unsigned long end;
37308
37309- end = vma->vm_start + vma_dump_size(vma, mm_flags);
37310+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
37311
37312 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
37313 struct page *page;
37314@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
37315 page = get_dump_page(addr);
37316 if (page) {
37317 void *kaddr = kmap(page);
37318+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
37319 stop = ((size += PAGE_SIZE) > limit) ||
37320 !dump_write(file, kaddr, PAGE_SIZE);
37321 kunmap(page);
37322@@ -2042,6 +2356,97 @@ out:
37323
37324 #endif /* USE_ELF_CORE_DUMP */
37325
37326+#ifdef CONFIG_PAX_MPROTECT
37327+/* PaX: non-PIC ELF libraries need relocations on their executable segments
37328+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
37329+ * we'll remove VM_MAYWRITE for good on RELRO segments.
37330+ *
37331+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
37332+ * basis because we want to allow the common case and not the special ones.
37333+ */
37334+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
37335+{
37336+ struct elfhdr elf_h;
37337+ struct elf_phdr elf_p;
37338+ unsigned long i;
37339+ unsigned long oldflags;
37340+ bool is_textrel_rw, is_textrel_rx, is_relro;
37341+
37342+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
37343+ return;
37344+
37345+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
37346+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
37347+
37348+#ifdef CONFIG_PAX_ELFRELOCS
37349+ /* possible TEXTREL */
37350+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
37351+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
37352+#else
37353+ is_textrel_rw = false;
37354+ is_textrel_rx = false;
37355+#endif
37356+
37357+ /* possible RELRO */
37358+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
37359+
37360+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
37361+ return;
37362+
37363+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
37364+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
37365+
37366+#ifdef CONFIG_PAX_ETEXECRELOCS
37367+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37368+#else
37369+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
37370+#endif
37371+
37372+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
37373+ !elf_check_arch(&elf_h) ||
37374+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
37375+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
37376+ return;
37377+
37378+ for (i = 0UL; i < elf_h.e_phnum; i++) {
37379+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
37380+ return;
37381+ switch (elf_p.p_type) {
37382+ case PT_DYNAMIC:
37383+ if (!is_textrel_rw && !is_textrel_rx)
37384+ continue;
37385+ i = 0UL;
37386+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
37387+ elf_dyn dyn;
37388+
37389+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
37390+ return;
37391+ if (dyn.d_tag == DT_NULL)
37392+ return;
37393+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
37394+ gr_log_textrel(vma);
37395+ if (is_textrel_rw)
37396+ vma->vm_flags |= VM_MAYWRITE;
37397+ else
37398+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
37399+ vma->vm_flags &= ~VM_MAYWRITE;
37400+ return;
37401+ }
37402+ i++;
37403+ }
37404+ return;
37405+
37406+ case PT_GNU_RELRO:
37407+ if (!is_relro)
37408+ continue;
37409+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
37410+ vma->vm_flags &= ~VM_MAYWRITE;
37411+ return;
37412+ }
37413+ }
37414+}
37415+#endif
37416+
37417 static int __init init_elf_binfmt(void)
37418 {
37419 return register_binfmt(&elf_format);
37420diff -urNp linux-2.6.32.43/fs/binfmt_flat.c linux-2.6.32.43/fs/binfmt_flat.c
37421--- linux-2.6.32.43/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
37422+++ linux-2.6.32.43/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
37423@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
37424 realdatastart = (unsigned long) -ENOMEM;
37425 printk("Unable to allocate RAM for process data, errno %d\n",
37426 (int)-realdatastart);
37427+ down_write(&current->mm->mmap_sem);
37428 do_munmap(current->mm, textpos, text_len);
37429+ up_write(&current->mm->mmap_sem);
37430 ret = realdatastart;
37431 goto err;
37432 }
37433@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
37434 }
37435 if (IS_ERR_VALUE(result)) {
37436 printk("Unable to read data+bss, errno %d\n", (int)-result);
37437+ down_write(&current->mm->mmap_sem);
37438 do_munmap(current->mm, textpos, text_len);
37439 do_munmap(current->mm, realdatastart, data_len + extra);
37440+ up_write(&current->mm->mmap_sem);
37441 ret = result;
37442 goto err;
37443 }
37444@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
37445 }
37446 if (IS_ERR_VALUE(result)) {
37447 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
37448+ down_write(&current->mm->mmap_sem);
37449 do_munmap(current->mm, textpos, text_len + data_len + extra +
37450 MAX_SHARED_LIBS * sizeof(unsigned long));
37451+ up_write(&current->mm->mmap_sem);
37452 ret = result;
37453 goto err;
37454 }
37455diff -urNp linux-2.6.32.43/fs/bio.c linux-2.6.32.43/fs/bio.c
37456--- linux-2.6.32.43/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
37457+++ linux-2.6.32.43/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
37458@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
37459
37460 i = 0;
37461 while (i < bio_slab_nr) {
37462- struct bio_slab *bslab = &bio_slabs[i];
37463+ bslab = &bio_slabs[i];
37464
37465 if (!bslab->slab && entry == -1)
37466 entry = i;
37467@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
37468 const int read = bio_data_dir(bio) == READ;
37469 struct bio_map_data *bmd = bio->bi_private;
37470 int i;
37471- char *p = bmd->sgvecs[0].iov_base;
37472+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
37473
37474 __bio_for_each_segment(bvec, bio, i, 0) {
37475 char *addr = page_address(bvec->bv_page);
37476diff -urNp linux-2.6.32.43/fs/block_dev.c linux-2.6.32.43/fs/block_dev.c
37477--- linux-2.6.32.43/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
37478+++ linux-2.6.32.43/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
37479@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
37480 else if (bdev->bd_contains == bdev)
37481 res = 0; /* is a whole device which isn't held */
37482
37483- else if (bdev->bd_contains->bd_holder == bd_claim)
37484+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
37485 res = 0; /* is a partition of a device that is being partitioned */
37486 else if (bdev->bd_contains->bd_holder != NULL)
37487 res = -EBUSY; /* is a partition of a held device */
37488diff -urNp linux-2.6.32.43/fs/btrfs/ctree.c linux-2.6.32.43/fs/btrfs/ctree.c
37489--- linux-2.6.32.43/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
37490+++ linux-2.6.32.43/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
37491@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
37492 free_extent_buffer(buf);
37493 add_root_to_dirty_list(root);
37494 } else {
37495- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
37496- parent_start = parent->start;
37497- else
37498+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
37499+ if (parent)
37500+ parent_start = parent->start;
37501+ else
37502+ parent_start = 0;
37503+ } else
37504 parent_start = 0;
37505
37506 WARN_ON(trans->transid != btrfs_header_generation(parent));
37507@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
37508
37509 ret = 0;
37510 if (slot == 0) {
37511- struct btrfs_disk_key disk_key;
37512 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
37513 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
37514 }
37515diff -urNp linux-2.6.32.43/fs/btrfs/disk-io.c linux-2.6.32.43/fs/btrfs/disk-io.c
37516--- linux-2.6.32.43/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
37517+++ linux-2.6.32.43/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
37518@@ -39,7 +39,7 @@
37519 #include "tree-log.h"
37520 #include "free-space-cache.h"
37521
37522-static struct extent_io_ops btree_extent_io_ops;
37523+static const struct extent_io_ops btree_extent_io_ops;
37524 static void end_workqueue_fn(struct btrfs_work *work);
37525 static void free_fs_root(struct btrfs_root *root);
37526
37527@@ -2607,7 +2607,7 @@ out:
37528 return 0;
37529 }
37530
37531-static struct extent_io_ops btree_extent_io_ops = {
37532+static const struct extent_io_ops btree_extent_io_ops = {
37533 .write_cache_pages_lock_hook = btree_lock_page_hook,
37534 .readpage_end_io_hook = btree_readpage_end_io_hook,
37535 .submit_bio_hook = btree_submit_bio_hook,
37536diff -urNp linux-2.6.32.43/fs/btrfs/extent_io.h linux-2.6.32.43/fs/btrfs/extent_io.h
37537--- linux-2.6.32.43/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
37538+++ linux-2.6.32.43/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
37539@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
37540 struct bio *bio, int mirror_num,
37541 unsigned long bio_flags);
37542 struct extent_io_ops {
37543- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
37544+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
37545 u64 start, u64 end, int *page_started,
37546 unsigned long *nr_written);
37547- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
37548- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
37549+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
37550+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
37551 extent_submit_bio_hook_t *submit_bio_hook;
37552- int (*merge_bio_hook)(struct page *page, unsigned long offset,
37553+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
37554 size_t size, struct bio *bio,
37555 unsigned long bio_flags);
37556- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
37557- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
37558+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
37559+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
37560 u64 start, u64 end,
37561 struct extent_state *state);
37562- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
37563+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
37564 u64 start, u64 end,
37565 struct extent_state *state);
37566- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37567+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
37568 struct extent_state *state);
37569- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37570+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
37571 struct extent_state *state, int uptodate);
37572- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
37573+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
37574 unsigned long old, unsigned long bits);
37575- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
37576+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
37577 unsigned long bits);
37578- int (*merge_extent_hook)(struct inode *inode,
37579+ int (* const merge_extent_hook)(struct inode *inode,
37580 struct extent_state *new,
37581 struct extent_state *other);
37582- int (*split_extent_hook)(struct inode *inode,
37583+ int (* const split_extent_hook)(struct inode *inode,
37584 struct extent_state *orig, u64 split);
37585- int (*write_cache_pages_lock_hook)(struct page *page);
37586+ int (* const write_cache_pages_lock_hook)(struct page *page);
37587 };
37588
37589 struct extent_io_tree {
37590@@ -88,7 +88,7 @@ struct extent_io_tree {
37591 u64 dirty_bytes;
37592 spinlock_t lock;
37593 spinlock_t buffer_lock;
37594- struct extent_io_ops *ops;
37595+ const struct extent_io_ops *ops;
37596 };
37597
37598 struct extent_state {
37599diff -urNp linux-2.6.32.43/fs/btrfs/extent-tree.c linux-2.6.32.43/fs/btrfs/extent-tree.c
37600--- linux-2.6.32.43/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
37601+++ linux-2.6.32.43/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
37602@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
37603 u64 group_start = group->key.objectid;
37604 new_extents = kmalloc(sizeof(*new_extents),
37605 GFP_NOFS);
37606+ if (!new_extents) {
37607+ ret = -ENOMEM;
37608+ goto out;
37609+ }
37610 nr_extents = 1;
37611 ret = get_new_locations(reloc_inode,
37612 extent_key,
37613diff -urNp linux-2.6.32.43/fs/btrfs/free-space-cache.c linux-2.6.32.43/fs/btrfs/free-space-cache.c
37614--- linux-2.6.32.43/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
37615+++ linux-2.6.32.43/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
37616@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
37617
37618 while(1) {
37619 if (entry->bytes < bytes || entry->offset < min_start) {
37620- struct rb_node *node;
37621-
37622 node = rb_next(&entry->offset_index);
37623 if (!node)
37624 break;
37625@@ -1226,7 +1224,7 @@ again:
37626 */
37627 while (entry->bitmap || found_bitmap ||
37628 (!entry->bitmap && entry->bytes < min_bytes)) {
37629- struct rb_node *node = rb_next(&entry->offset_index);
37630+ node = rb_next(&entry->offset_index);
37631
37632 if (entry->bitmap && entry->bytes > bytes + empty_size) {
37633 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
37634diff -urNp linux-2.6.32.43/fs/btrfs/inode.c linux-2.6.32.43/fs/btrfs/inode.c
37635--- linux-2.6.32.43/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
37636+++ linux-2.6.32.43/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
37637@@ -63,7 +63,7 @@ static const struct inode_operations btr
37638 static const struct address_space_operations btrfs_aops;
37639 static const struct address_space_operations btrfs_symlink_aops;
37640 static const struct file_operations btrfs_dir_file_operations;
37641-static struct extent_io_ops btrfs_extent_io_ops;
37642+static const struct extent_io_ops btrfs_extent_io_ops;
37643
37644 static struct kmem_cache *btrfs_inode_cachep;
37645 struct kmem_cache *btrfs_trans_handle_cachep;
37646@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
37647 1, 0, NULL, GFP_NOFS);
37648 while (start < end) {
37649 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
37650+ BUG_ON(!async_cow);
37651 async_cow->inode = inode;
37652 async_cow->root = root;
37653 async_cow->locked_page = locked_page;
37654@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
37655 inline_size = btrfs_file_extent_inline_item_len(leaf,
37656 btrfs_item_nr(leaf, path->slots[0]));
37657 tmp = kmalloc(inline_size, GFP_NOFS);
37658+ if (!tmp)
37659+ return -ENOMEM;
37660 ptr = btrfs_file_extent_inline_start(item);
37661
37662 read_extent_buffer(leaf, tmp, ptr, inline_size);
37663@@ -5410,7 +5413,7 @@ fail:
37664 return -ENOMEM;
37665 }
37666
37667-static int btrfs_getattr(struct vfsmount *mnt,
37668+int btrfs_getattr(struct vfsmount *mnt,
37669 struct dentry *dentry, struct kstat *stat)
37670 {
37671 struct inode *inode = dentry->d_inode;
37672@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
37673 return 0;
37674 }
37675
37676+EXPORT_SYMBOL(btrfs_getattr);
37677+
37678+dev_t get_btrfs_dev_from_inode(struct inode *inode)
37679+{
37680+ return BTRFS_I(inode)->root->anon_super.s_dev;
37681+}
37682+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
37683+
37684 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
37685 struct inode *new_dir, struct dentry *new_dentry)
37686 {
37687@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
37688 .fsync = btrfs_sync_file,
37689 };
37690
37691-static struct extent_io_ops btrfs_extent_io_ops = {
37692+static const struct extent_io_ops btrfs_extent_io_ops = {
37693 .fill_delalloc = run_delalloc_range,
37694 .submit_bio_hook = btrfs_submit_bio_hook,
37695 .merge_bio_hook = btrfs_merge_bio_hook,
37696diff -urNp linux-2.6.32.43/fs/btrfs/relocation.c linux-2.6.32.43/fs/btrfs/relocation.c
37697--- linux-2.6.32.43/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
37698+++ linux-2.6.32.43/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
37699@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
37700 }
37701 spin_unlock(&rc->reloc_root_tree.lock);
37702
37703- BUG_ON((struct btrfs_root *)node->data != root);
37704+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
37705
37706 if (!del) {
37707 spin_lock(&rc->reloc_root_tree.lock);
37708diff -urNp linux-2.6.32.43/fs/btrfs/sysfs.c linux-2.6.32.43/fs/btrfs/sysfs.c
37709--- linux-2.6.32.43/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
37710+++ linux-2.6.32.43/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
37711@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
37712 complete(&root->kobj_unregister);
37713 }
37714
37715-static struct sysfs_ops btrfs_super_attr_ops = {
37716+static const struct sysfs_ops btrfs_super_attr_ops = {
37717 .show = btrfs_super_attr_show,
37718 .store = btrfs_super_attr_store,
37719 };
37720
37721-static struct sysfs_ops btrfs_root_attr_ops = {
37722+static const struct sysfs_ops btrfs_root_attr_ops = {
37723 .show = btrfs_root_attr_show,
37724 .store = btrfs_root_attr_store,
37725 };
37726diff -urNp linux-2.6.32.43/fs/buffer.c linux-2.6.32.43/fs/buffer.c
37727--- linux-2.6.32.43/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
37728+++ linux-2.6.32.43/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
37729@@ -25,6 +25,7 @@
37730 #include <linux/percpu.h>
37731 #include <linux/slab.h>
37732 #include <linux/capability.h>
37733+#include <linux/security.h>
37734 #include <linux/blkdev.h>
37735 #include <linux/file.h>
37736 #include <linux/quotaops.h>
37737diff -urNp linux-2.6.32.43/fs/cachefiles/bind.c linux-2.6.32.43/fs/cachefiles/bind.c
37738--- linux-2.6.32.43/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
37739+++ linux-2.6.32.43/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
37740@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
37741 args);
37742
37743 /* start by checking things over */
37744- ASSERT(cache->fstop_percent >= 0 &&
37745- cache->fstop_percent < cache->fcull_percent &&
37746+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
37747 cache->fcull_percent < cache->frun_percent &&
37748 cache->frun_percent < 100);
37749
37750- ASSERT(cache->bstop_percent >= 0 &&
37751- cache->bstop_percent < cache->bcull_percent &&
37752+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
37753 cache->bcull_percent < cache->brun_percent &&
37754 cache->brun_percent < 100);
37755
37756diff -urNp linux-2.6.32.43/fs/cachefiles/daemon.c linux-2.6.32.43/fs/cachefiles/daemon.c
37757--- linux-2.6.32.43/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
37758+++ linux-2.6.32.43/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
37759@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
37760 if (test_bit(CACHEFILES_DEAD, &cache->flags))
37761 return -EIO;
37762
37763- if (datalen < 0 || datalen > PAGE_SIZE - 1)
37764+ if (datalen > PAGE_SIZE - 1)
37765 return -EOPNOTSUPP;
37766
37767 /* drag the command string into the kernel so we can parse it */
37768@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
37769 if (args[0] != '%' || args[1] != '\0')
37770 return -EINVAL;
37771
37772- if (fstop < 0 || fstop >= cache->fcull_percent)
37773+ if (fstop >= cache->fcull_percent)
37774 return cachefiles_daemon_range_error(cache, args);
37775
37776 cache->fstop_percent = fstop;
37777@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
37778 if (args[0] != '%' || args[1] != '\0')
37779 return -EINVAL;
37780
37781- if (bstop < 0 || bstop >= cache->bcull_percent)
37782+ if (bstop >= cache->bcull_percent)
37783 return cachefiles_daemon_range_error(cache, args);
37784
37785 cache->bstop_percent = bstop;
37786diff -urNp linux-2.6.32.43/fs/cachefiles/internal.h linux-2.6.32.43/fs/cachefiles/internal.h
37787--- linux-2.6.32.43/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
37788+++ linux-2.6.32.43/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
37789@@ -56,7 +56,7 @@ struct cachefiles_cache {
37790 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
37791 struct rb_root active_nodes; /* active nodes (can't be culled) */
37792 rwlock_t active_lock; /* lock for active_nodes */
37793- atomic_t gravecounter; /* graveyard uniquifier */
37794+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
37795 unsigned frun_percent; /* when to stop culling (% files) */
37796 unsigned fcull_percent; /* when to start culling (% files) */
37797 unsigned fstop_percent; /* when to stop allocating (% files) */
37798@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
37799 * proc.c
37800 */
37801 #ifdef CONFIG_CACHEFILES_HISTOGRAM
37802-extern atomic_t cachefiles_lookup_histogram[HZ];
37803-extern atomic_t cachefiles_mkdir_histogram[HZ];
37804-extern atomic_t cachefiles_create_histogram[HZ];
37805+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37806+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37807+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
37808
37809 extern int __init cachefiles_proc_init(void);
37810 extern void cachefiles_proc_cleanup(void);
37811 static inline
37812-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
37813+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
37814 {
37815 unsigned long jif = jiffies - start_jif;
37816 if (jif >= HZ)
37817 jif = HZ - 1;
37818- atomic_inc(&histogram[jif]);
37819+ atomic_inc_unchecked(&histogram[jif]);
37820 }
37821
37822 #else
37823diff -urNp linux-2.6.32.43/fs/cachefiles/namei.c linux-2.6.32.43/fs/cachefiles/namei.c
37824--- linux-2.6.32.43/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
37825+++ linux-2.6.32.43/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
37826@@ -250,7 +250,7 @@ try_again:
37827 /* first step is to make up a grave dentry in the graveyard */
37828 sprintf(nbuffer, "%08x%08x",
37829 (uint32_t) get_seconds(),
37830- (uint32_t) atomic_inc_return(&cache->gravecounter));
37831+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
37832
37833 /* do the multiway lock magic */
37834 trap = lock_rename(cache->graveyard, dir);
37835diff -urNp linux-2.6.32.43/fs/cachefiles/proc.c linux-2.6.32.43/fs/cachefiles/proc.c
37836--- linux-2.6.32.43/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
37837+++ linux-2.6.32.43/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
37838@@ -14,9 +14,9 @@
37839 #include <linux/seq_file.h>
37840 #include "internal.h"
37841
37842-atomic_t cachefiles_lookup_histogram[HZ];
37843-atomic_t cachefiles_mkdir_histogram[HZ];
37844-atomic_t cachefiles_create_histogram[HZ];
37845+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
37846+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
37847+atomic_unchecked_t cachefiles_create_histogram[HZ];
37848
37849 /*
37850 * display the latency histogram
37851@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
37852 return 0;
37853 default:
37854 index = (unsigned long) v - 3;
37855- x = atomic_read(&cachefiles_lookup_histogram[index]);
37856- y = atomic_read(&cachefiles_mkdir_histogram[index]);
37857- z = atomic_read(&cachefiles_create_histogram[index]);
37858+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
37859+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
37860+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
37861 if (x == 0 && y == 0 && z == 0)
37862 return 0;
37863
37864diff -urNp linux-2.6.32.43/fs/cachefiles/rdwr.c linux-2.6.32.43/fs/cachefiles/rdwr.c
37865--- linux-2.6.32.43/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
37866+++ linux-2.6.32.43/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
37867@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
37868 old_fs = get_fs();
37869 set_fs(KERNEL_DS);
37870 ret = file->f_op->write(
37871- file, (const void __user *) data, len, &pos);
37872+ file, (__force const void __user *) data, len, &pos);
37873 set_fs(old_fs);
37874 kunmap(page);
37875 if (ret != len)
37876diff -urNp linux-2.6.32.43/fs/cifs/cifs_debug.c linux-2.6.32.43/fs/cifs/cifs_debug.c
37877--- linux-2.6.32.43/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
37878+++ linux-2.6.32.43/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
37879@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
37880 tcon = list_entry(tmp3,
37881 struct cifsTconInfo,
37882 tcon_list);
37883- atomic_set(&tcon->num_smbs_sent, 0);
37884- atomic_set(&tcon->num_writes, 0);
37885- atomic_set(&tcon->num_reads, 0);
37886- atomic_set(&tcon->num_oplock_brks, 0);
37887- atomic_set(&tcon->num_opens, 0);
37888- atomic_set(&tcon->num_posixopens, 0);
37889- atomic_set(&tcon->num_posixmkdirs, 0);
37890- atomic_set(&tcon->num_closes, 0);
37891- atomic_set(&tcon->num_deletes, 0);
37892- atomic_set(&tcon->num_mkdirs, 0);
37893- atomic_set(&tcon->num_rmdirs, 0);
37894- atomic_set(&tcon->num_renames, 0);
37895- atomic_set(&tcon->num_t2renames, 0);
37896- atomic_set(&tcon->num_ffirst, 0);
37897- atomic_set(&tcon->num_fnext, 0);
37898- atomic_set(&tcon->num_fclose, 0);
37899- atomic_set(&tcon->num_hardlinks, 0);
37900- atomic_set(&tcon->num_symlinks, 0);
37901- atomic_set(&tcon->num_locks, 0);
37902+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
37903+ atomic_set_unchecked(&tcon->num_writes, 0);
37904+ atomic_set_unchecked(&tcon->num_reads, 0);
37905+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
37906+ atomic_set_unchecked(&tcon->num_opens, 0);
37907+ atomic_set_unchecked(&tcon->num_posixopens, 0);
37908+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
37909+ atomic_set_unchecked(&tcon->num_closes, 0);
37910+ atomic_set_unchecked(&tcon->num_deletes, 0);
37911+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
37912+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
37913+ atomic_set_unchecked(&tcon->num_renames, 0);
37914+ atomic_set_unchecked(&tcon->num_t2renames, 0);
37915+ atomic_set_unchecked(&tcon->num_ffirst, 0);
37916+ atomic_set_unchecked(&tcon->num_fnext, 0);
37917+ atomic_set_unchecked(&tcon->num_fclose, 0);
37918+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
37919+ atomic_set_unchecked(&tcon->num_symlinks, 0);
37920+ atomic_set_unchecked(&tcon->num_locks, 0);
37921 }
37922 }
37923 }
37924@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
37925 if (tcon->need_reconnect)
37926 seq_puts(m, "\tDISCONNECTED ");
37927 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
37928- atomic_read(&tcon->num_smbs_sent),
37929- atomic_read(&tcon->num_oplock_brks));
37930+ atomic_read_unchecked(&tcon->num_smbs_sent),
37931+ atomic_read_unchecked(&tcon->num_oplock_brks));
37932 seq_printf(m, "\nReads: %d Bytes: %lld",
37933- atomic_read(&tcon->num_reads),
37934+ atomic_read_unchecked(&tcon->num_reads),
37935 (long long)(tcon->bytes_read));
37936 seq_printf(m, "\nWrites: %d Bytes: %lld",
37937- atomic_read(&tcon->num_writes),
37938+ atomic_read_unchecked(&tcon->num_writes),
37939 (long long)(tcon->bytes_written));
37940 seq_printf(m, "\nFlushes: %d",
37941- atomic_read(&tcon->num_flushes));
37942+ atomic_read_unchecked(&tcon->num_flushes));
37943 seq_printf(m, "\nLocks: %d HardLinks: %d "
37944 "Symlinks: %d",
37945- atomic_read(&tcon->num_locks),
37946- atomic_read(&tcon->num_hardlinks),
37947- atomic_read(&tcon->num_symlinks));
37948+ atomic_read_unchecked(&tcon->num_locks),
37949+ atomic_read_unchecked(&tcon->num_hardlinks),
37950+ atomic_read_unchecked(&tcon->num_symlinks));
37951 seq_printf(m, "\nOpens: %d Closes: %d "
37952 "Deletes: %d",
37953- atomic_read(&tcon->num_opens),
37954- atomic_read(&tcon->num_closes),
37955- atomic_read(&tcon->num_deletes));
37956+ atomic_read_unchecked(&tcon->num_opens),
37957+ atomic_read_unchecked(&tcon->num_closes),
37958+ atomic_read_unchecked(&tcon->num_deletes));
37959 seq_printf(m, "\nPosix Opens: %d "
37960 "Posix Mkdirs: %d",
37961- atomic_read(&tcon->num_posixopens),
37962- atomic_read(&tcon->num_posixmkdirs));
37963+ atomic_read_unchecked(&tcon->num_posixopens),
37964+ atomic_read_unchecked(&tcon->num_posixmkdirs));
37965 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
37966- atomic_read(&tcon->num_mkdirs),
37967- atomic_read(&tcon->num_rmdirs));
37968+ atomic_read_unchecked(&tcon->num_mkdirs),
37969+ atomic_read_unchecked(&tcon->num_rmdirs));
37970 seq_printf(m, "\nRenames: %d T2 Renames %d",
37971- atomic_read(&tcon->num_renames),
37972- atomic_read(&tcon->num_t2renames));
37973+ atomic_read_unchecked(&tcon->num_renames),
37974+ atomic_read_unchecked(&tcon->num_t2renames));
37975 seq_printf(m, "\nFindFirst: %d FNext %d "
37976 "FClose %d",
37977- atomic_read(&tcon->num_ffirst),
37978- atomic_read(&tcon->num_fnext),
37979- atomic_read(&tcon->num_fclose));
37980+ atomic_read_unchecked(&tcon->num_ffirst),
37981+ atomic_read_unchecked(&tcon->num_fnext),
37982+ atomic_read_unchecked(&tcon->num_fclose));
37983 }
37984 }
37985 }
37986diff -urNp linux-2.6.32.43/fs/cifs/cifsglob.h linux-2.6.32.43/fs/cifs/cifsglob.h
37987--- linux-2.6.32.43/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
37988+++ linux-2.6.32.43/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
37989@@ -252,28 +252,28 @@ struct cifsTconInfo {
37990 __u16 Flags; /* optional support bits */
37991 enum statusEnum tidStatus;
37992 #ifdef CONFIG_CIFS_STATS
37993- atomic_t num_smbs_sent;
37994- atomic_t num_writes;
37995- atomic_t num_reads;
37996- atomic_t num_flushes;
37997- atomic_t num_oplock_brks;
37998- atomic_t num_opens;
37999- atomic_t num_closes;
38000- atomic_t num_deletes;
38001- atomic_t num_mkdirs;
38002- atomic_t num_posixopens;
38003- atomic_t num_posixmkdirs;
38004- atomic_t num_rmdirs;
38005- atomic_t num_renames;
38006- atomic_t num_t2renames;
38007- atomic_t num_ffirst;
38008- atomic_t num_fnext;
38009- atomic_t num_fclose;
38010- atomic_t num_hardlinks;
38011- atomic_t num_symlinks;
38012- atomic_t num_locks;
38013- atomic_t num_acl_get;
38014- atomic_t num_acl_set;
38015+ atomic_unchecked_t num_smbs_sent;
38016+ atomic_unchecked_t num_writes;
38017+ atomic_unchecked_t num_reads;
38018+ atomic_unchecked_t num_flushes;
38019+ atomic_unchecked_t num_oplock_brks;
38020+ atomic_unchecked_t num_opens;
38021+ atomic_unchecked_t num_closes;
38022+ atomic_unchecked_t num_deletes;
38023+ atomic_unchecked_t num_mkdirs;
38024+ atomic_unchecked_t num_posixopens;
38025+ atomic_unchecked_t num_posixmkdirs;
38026+ atomic_unchecked_t num_rmdirs;
38027+ atomic_unchecked_t num_renames;
38028+ atomic_unchecked_t num_t2renames;
38029+ atomic_unchecked_t num_ffirst;
38030+ atomic_unchecked_t num_fnext;
38031+ atomic_unchecked_t num_fclose;
38032+ atomic_unchecked_t num_hardlinks;
38033+ atomic_unchecked_t num_symlinks;
38034+ atomic_unchecked_t num_locks;
38035+ atomic_unchecked_t num_acl_get;
38036+ atomic_unchecked_t num_acl_set;
38037 #ifdef CONFIG_CIFS_STATS2
38038 unsigned long long time_writes;
38039 unsigned long long time_reads;
38040@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
38041 }
38042
38043 #ifdef CONFIG_CIFS_STATS
38044-#define cifs_stats_inc atomic_inc
38045+#define cifs_stats_inc atomic_inc_unchecked
38046
38047 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
38048 unsigned int bytes)
38049diff -urNp linux-2.6.32.43/fs/cifs/link.c linux-2.6.32.43/fs/cifs/link.c
38050--- linux-2.6.32.43/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
38051+++ linux-2.6.32.43/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
38052@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
38053
38054 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
38055 {
38056- char *p = nd_get_link(nd);
38057+ const char *p = nd_get_link(nd);
38058 if (!IS_ERR(p))
38059 kfree(p);
38060 }
38061diff -urNp linux-2.6.32.43/fs/coda/cache.c linux-2.6.32.43/fs/coda/cache.c
38062--- linux-2.6.32.43/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
38063+++ linux-2.6.32.43/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
38064@@ -24,14 +24,14 @@
38065 #include <linux/coda_fs_i.h>
38066 #include <linux/coda_cache.h>
38067
38068-static atomic_t permission_epoch = ATOMIC_INIT(0);
38069+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
38070
38071 /* replace or extend an acl cache hit */
38072 void coda_cache_enter(struct inode *inode, int mask)
38073 {
38074 struct coda_inode_info *cii = ITOC(inode);
38075
38076- cii->c_cached_epoch = atomic_read(&permission_epoch);
38077+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
38078 if (cii->c_uid != current_fsuid()) {
38079 cii->c_uid = current_fsuid();
38080 cii->c_cached_perm = mask;
38081@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
38082 void coda_cache_clear_inode(struct inode *inode)
38083 {
38084 struct coda_inode_info *cii = ITOC(inode);
38085- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
38086+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
38087 }
38088
38089 /* remove all acl caches */
38090 void coda_cache_clear_all(struct super_block *sb)
38091 {
38092- atomic_inc(&permission_epoch);
38093+ atomic_inc_unchecked(&permission_epoch);
38094 }
38095
38096
38097@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
38098
38099 hit = (mask & cii->c_cached_perm) == mask &&
38100 cii->c_uid == current_fsuid() &&
38101- cii->c_cached_epoch == atomic_read(&permission_epoch);
38102+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
38103
38104 return hit;
38105 }
38106diff -urNp linux-2.6.32.43/fs/compat_binfmt_elf.c linux-2.6.32.43/fs/compat_binfmt_elf.c
38107--- linux-2.6.32.43/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
38108+++ linux-2.6.32.43/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
38109@@ -29,10 +29,12 @@
38110 #undef elfhdr
38111 #undef elf_phdr
38112 #undef elf_note
38113+#undef elf_dyn
38114 #undef elf_addr_t
38115 #define elfhdr elf32_hdr
38116 #define elf_phdr elf32_phdr
38117 #define elf_note elf32_note
38118+#define elf_dyn Elf32_Dyn
38119 #define elf_addr_t Elf32_Addr
38120
38121 /*
38122diff -urNp linux-2.6.32.43/fs/compat.c linux-2.6.32.43/fs/compat.c
38123--- linux-2.6.32.43/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
38124+++ linux-2.6.32.43/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
38125@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
38126
38127 struct compat_readdir_callback {
38128 struct compat_old_linux_dirent __user *dirent;
38129+ struct file * file;
38130 int result;
38131 };
38132
38133@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
38134 buf->result = -EOVERFLOW;
38135 return -EOVERFLOW;
38136 }
38137+
38138+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38139+ return 0;
38140+
38141 buf->result++;
38142 dirent = buf->dirent;
38143 if (!access_ok(VERIFY_WRITE, dirent,
38144@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
38145
38146 buf.result = 0;
38147 buf.dirent = dirent;
38148+ buf.file = file;
38149
38150 error = vfs_readdir(file, compat_fillonedir, &buf);
38151 if (buf.result)
38152@@ -899,6 +905,7 @@ struct compat_linux_dirent {
38153 struct compat_getdents_callback {
38154 struct compat_linux_dirent __user *current_dir;
38155 struct compat_linux_dirent __user *previous;
38156+ struct file * file;
38157 int count;
38158 int error;
38159 };
38160@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
38161 buf->error = -EOVERFLOW;
38162 return -EOVERFLOW;
38163 }
38164+
38165+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38166+ return 0;
38167+
38168 dirent = buf->previous;
38169 if (dirent) {
38170 if (__put_user(offset, &dirent->d_off))
38171@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
38172 buf.previous = NULL;
38173 buf.count = count;
38174 buf.error = 0;
38175+ buf.file = file;
38176
38177 error = vfs_readdir(file, compat_filldir, &buf);
38178 if (error >= 0)
38179@@ -987,6 +999,7 @@ out:
38180 struct compat_getdents_callback64 {
38181 struct linux_dirent64 __user *current_dir;
38182 struct linux_dirent64 __user *previous;
38183+ struct file * file;
38184 int count;
38185 int error;
38186 };
38187@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
38188 buf->error = -EINVAL; /* only used if we fail.. */
38189 if (reclen > buf->count)
38190 return -EINVAL;
38191+
38192+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
38193+ return 0;
38194+
38195 dirent = buf->previous;
38196
38197 if (dirent) {
38198@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
38199 buf.previous = NULL;
38200 buf.count = count;
38201 buf.error = 0;
38202+ buf.file = file;
38203
38204 error = vfs_readdir(file, compat_filldir64, &buf);
38205 if (error >= 0)
38206@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
38207 * verify all the pointers
38208 */
38209 ret = -EINVAL;
38210- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
38211+ if (nr_segs > UIO_MAXIOV)
38212 goto out;
38213 if (!file->f_op)
38214 goto out;
38215@@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
38216 compat_uptr_t __user *envp,
38217 struct pt_regs * regs)
38218 {
38219+#ifdef CONFIG_GRKERNSEC
38220+ struct file *old_exec_file;
38221+ struct acl_subject_label *old_acl;
38222+ struct rlimit old_rlim[RLIM_NLIMITS];
38223+#endif
38224 struct linux_binprm *bprm;
38225 struct file *file;
38226 struct files_struct *displaced;
38227@@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
38228 bprm->filename = filename;
38229 bprm->interp = filename;
38230
38231+ if (gr_process_user_ban()) {
38232+ retval = -EPERM;
38233+ goto out_file;
38234+ }
38235+
38236+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38237+ retval = -EAGAIN;
38238+ if (gr_handle_nproc())
38239+ goto out_file;
38240+ retval = -EACCES;
38241+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
38242+ goto out_file;
38243+
38244 retval = bprm_mm_init(bprm);
38245 if (retval)
38246 goto out_file;
38247@@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
38248 if (retval < 0)
38249 goto out;
38250
38251+ if (!gr_tpe_allow(file)) {
38252+ retval = -EACCES;
38253+ goto out;
38254+ }
38255+
38256+ if (gr_check_crash_exec(file)) {
38257+ retval = -EACCES;
38258+ goto out;
38259+ }
38260+
38261+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38262+
38263+ gr_handle_exec_args_compat(bprm, argv);
38264+
38265+#ifdef CONFIG_GRKERNSEC
38266+ old_acl = current->acl;
38267+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38268+ old_exec_file = current->exec_file;
38269+ get_file(file);
38270+ current->exec_file = file;
38271+#endif
38272+
38273+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38274+ bprm->unsafe & LSM_UNSAFE_SHARE);
38275+ if (retval < 0)
38276+ goto out_fail;
38277+
38278 retval = search_binary_handler(bprm, regs);
38279 if (retval < 0)
38280- goto out;
38281+ goto out_fail;
38282+#ifdef CONFIG_GRKERNSEC
38283+ if (old_exec_file)
38284+ fput(old_exec_file);
38285+#endif
38286
38287 /* execve succeeded */
38288 current->fs->in_exec = 0;
38289@@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
38290 put_files_struct(displaced);
38291 return retval;
38292
38293+out_fail:
38294+#ifdef CONFIG_GRKERNSEC
38295+ current->acl = old_acl;
38296+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38297+ fput(current->exec_file);
38298+ current->exec_file = old_exec_file;
38299+#endif
38300+
38301 out:
38302 if (bprm->mm) {
38303 acct_arg_size(bprm, 0);
38304@@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
38305 struct fdtable *fdt;
38306 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
38307
38308+ pax_track_stack();
38309+
38310 if (n < 0)
38311 goto out_nofds;
38312
38313diff -urNp linux-2.6.32.43/fs/compat_ioctl.c linux-2.6.32.43/fs/compat_ioctl.c
38314--- linux-2.6.32.43/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
38315+++ linux-2.6.32.43/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
38316@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
38317 up = (struct compat_video_spu_palette __user *) arg;
38318 err = get_user(palp, &up->palette);
38319 err |= get_user(length, &up->length);
38320+ if (err)
38321+ return -EFAULT;
38322
38323 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
38324 err = put_user(compat_ptr(palp), &up_native->palette);
38325diff -urNp linux-2.6.32.43/fs/configfs/dir.c linux-2.6.32.43/fs/configfs/dir.c
38326--- linux-2.6.32.43/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
38327+++ linux-2.6.32.43/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
38328@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
38329 }
38330 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
38331 struct configfs_dirent *next;
38332- const char * name;
38333+ const unsigned char * name;
38334+ char d_name[sizeof(next->s_dentry->d_iname)];
38335 int len;
38336
38337 next = list_entry(p, struct configfs_dirent,
38338@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
38339 continue;
38340
38341 name = configfs_get_name(next);
38342- len = strlen(name);
38343+ if (next->s_dentry && name == next->s_dentry->d_iname) {
38344+ len = next->s_dentry->d_name.len;
38345+ memcpy(d_name, name, len);
38346+ name = d_name;
38347+ } else
38348+ len = strlen(name);
38349 if (next->s_dentry)
38350 ino = next->s_dentry->d_inode->i_ino;
38351 else
38352diff -urNp linux-2.6.32.43/fs/dcache.c linux-2.6.32.43/fs/dcache.c
38353--- linux-2.6.32.43/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
38354+++ linux-2.6.32.43/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
38355@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
38356
38357 static struct kmem_cache *dentry_cache __read_mostly;
38358
38359-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
38360-
38361 /*
38362 * This is the single most critical data structure when it comes
38363 * to the dcache: the hashtable for lookups. Somebody should try
38364@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
38365 mempages -= reserve;
38366
38367 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
38368- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
38369+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
38370
38371 dcache_init();
38372 inode_init();
38373diff -urNp linux-2.6.32.43/fs/dlm/lockspace.c linux-2.6.32.43/fs/dlm/lockspace.c
38374--- linux-2.6.32.43/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
38375+++ linux-2.6.32.43/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
38376@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
38377 kfree(ls);
38378 }
38379
38380-static struct sysfs_ops dlm_attr_ops = {
38381+static const struct sysfs_ops dlm_attr_ops = {
38382 .show = dlm_attr_show,
38383 .store = dlm_attr_store,
38384 };
38385diff -urNp linux-2.6.32.43/fs/ecryptfs/inode.c linux-2.6.32.43/fs/ecryptfs/inode.c
38386--- linux-2.6.32.43/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
38387+++ linux-2.6.32.43/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
38388@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
38389 old_fs = get_fs();
38390 set_fs(get_ds());
38391 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
38392- (char __user *)lower_buf,
38393+ (__force char __user *)lower_buf,
38394 lower_bufsiz);
38395 set_fs(old_fs);
38396 if (rc < 0)
38397@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
38398 }
38399 old_fs = get_fs();
38400 set_fs(get_ds());
38401- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
38402+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
38403 set_fs(old_fs);
38404 if (rc < 0)
38405 goto out_free;
38406diff -urNp linux-2.6.32.43/fs/exec.c linux-2.6.32.43/fs/exec.c
38407--- linux-2.6.32.43/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
38408+++ linux-2.6.32.43/fs/exec.c 2011-07-06 19:53:33.000000000 -0400
38409@@ -56,12 +56,24 @@
38410 #include <linux/fsnotify.h>
38411 #include <linux/fs_struct.h>
38412 #include <linux/pipe_fs_i.h>
38413+#include <linux/random.h>
38414+#include <linux/seq_file.h>
38415+
38416+#ifdef CONFIG_PAX_REFCOUNT
38417+#include <linux/kallsyms.h>
38418+#include <linux/kdebug.h>
38419+#endif
38420
38421 #include <asm/uaccess.h>
38422 #include <asm/mmu_context.h>
38423 #include <asm/tlb.h>
38424 #include "internal.h"
38425
38426+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
38427+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
38428+EXPORT_SYMBOL(pax_set_initial_flags_func);
38429+#endif
38430+
38431 int core_uses_pid;
38432 char core_pattern[CORENAME_MAX_SIZE] = "core";
38433 unsigned int core_pipe_limit;
38434@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
38435 goto out;
38436
38437 file = do_filp_open(AT_FDCWD, tmp,
38438- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38439+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38440 MAY_READ | MAY_EXEC | MAY_OPEN);
38441 putname(tmp);
38442 error = PTR_ERR(file);
38443@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
38444 int write)
38445 {
38446 struct page *page;
38447- int ret;
38448
38449-#ifdef CONFIG_STACK_GROWSUP
38450- if (write) {
38451- ret = expand_stack_downwards(bprm->vma, pos);
38452- if (ret < 0)
38453- return NULL;
38454- }
38455-#endif
38456- ret = get_user_pages(current, bprm->mm, pos,
38457- 1, write, 1, &page, NULL);
38458- if (ret <= 0)
38459+ if (0 > expand_stack_downwards(bprm->vma, pos))
38460+ return NULL;
38461+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
38462 return NULL;
38463
38464 if (write) {
38465@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
38466 vma->vm_end = STACK_TOP_MAX;
38467 vma->vm_start = vma->vm_end - PAGE_SIZE;
38468 vma->vm_flags = VM_STACK_FLAGS;
38469+
38470+#ifdef CONFIG_PAX_SEGMEXEC
38471+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
38472+#endif
38473+
38474 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
38475
38476 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
38477@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
38478 mm->stack_vm = mm->total_vm = 1;
38479 up_write(&mm->mmap_sem);
38480 bprm->p = vma->vm_end - sizeof(void *);
38481+
38482+#ifdef CONFIG_PAX_RANDUSTACK
38483+ if (randomize_va_space)
38484+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
38485+#endif
38486+
38487 return 0;
38488 err:
38489 up_write(&mm->mmap_sem);
38490@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
38491 int r;
38492 mm_segment_t oldfs = get_fs();
38493 set_fs(KERNEL_DS);
38494- r = copy_strings(argc, (char __user * __user *)argv, bprm);
38495+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
38496 set_fs(oldfs);
38497 return r;
38498 }
38499@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
38500 unsigned long new_end = old_end - shift;
38501 struct mmu_gather *tlb;
38502
38503- BUG_ON(new_start > new_end);
38504+ if (new_start >= new_end || new_start < mmap_min_addr)
38505+ return -ENOMEM;
38506
38507 /*
38508 * ensure there are no vmas between where we want to go
38509@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
38510 if (vma != find_vma(mm, new_start))
38511 return -EFAULT;
38512
38513+#ifdef CONFIG_PAX_SEGMEXEC
38514+ BUG_ON(pax_find_mirror_vma(vma));
38515+#endif
38516+
38517 /*
38518 * cover the whole range: [new_start, old_end)
38519 */
38520@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
38521 stack_top = arch_align_stack(stack_top);
38522 stack_top = PAGE_ALIGN(stack_top);
38523
38524- if (unlikely(stack_top < mmap_min_addr) ||
38525- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
38526- return -ENOMEM;
38527-
38528 stack_shift = vma->vm_end - stack_top;
38529
38530 bprm->p -= stack_shift;
38531@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
38532 bprm->exec -= stack_shift;
38533
38534 down_write(&mm->mmap_sem);
38535+
38536+ /* Move stack pages down in memory. */
38537+ if (stack_shift) {
38538+ ret = shift_arg_pages(vma, stack_shift);
38539+ if (ret)
38540+ goto out_unlock;
38541+ }
38542+
38543 vm_flags = VM_STACK_FLAGS;
38544
38545 /*
38546@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
38547 vm_flags &= ~VM_EXEC;
38548 vm_flags |= mm->def_flags;
38549
38550+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38551+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
38552+ vm_flags &= ~VM_EXEC;
38553+
38554+#ifdef CONFIG_PAX_MPROTECT
38555+ if (mm->pax_flags & MF_PAX_MPROTECT)
38556+ vm_flags &= ~VM_MAYEXEC;
38557+#endif
38558+
38559+ }
38560+#endif
38561+
38562 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
38563 vm_flags);
38564 if (ret)
38565 goto out_unlock;
38566 BUG_ON(prev != vma);
38567
38568- /* Move stack pages down in memory. */
38569- if (stack_shift) {
38570- ret = shift_arg_pages(vma, stack_shift);
38571- if (ret)
38572- goto out_unlock;
38573- }
38574-
38575 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
38576 stack_size = vma->vm_end - vma->vm_start;
38577 /*
38578@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
38579 int err;
38580
38581 file = do_filp_open(AT_FDCWD, name,
38582- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
38583+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
38584 MAY_EXEC | MAY_OPEN);
38585 if (IS_ERR(file))
38586 goto out;
38587@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
38588 old_fs = get_fs();
38589 set_fs(get_ds());
38590 /* The cast to a user pointer is valid due to the set_fs() */
38591- result = vfs_read(file, (void __user *)addr, count, &pos);
38592+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
38593 set_fs(old_fs);
38594 return result;
38595 }
38596@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
38597 }
38598 rcu_read_unlock();
38599
38600- if (p->fs->users > n_fs) {
38601+ if (atomic_read(&p->fs->users) > n_fs) {
38602 bprm->unsafe |= LSM_UNSAFE_SHARE;
38603 } else {
38604 res = -EAGAIN;
38605@@ -1347,6 +1376,11 @@ int do_execve(char * filename,
38606 char __user *__user *envp,
38607 struct pt_regs * regs)
38608 {
38609+#ifdef CONFIG_GRKERNSEC
38610+ struct file *old_exec_file;
38611+ struct acl_subject_label *old_acl;
38612+ struct rlimit old_rlim[RLIM_NLIMITS];
38613+#endif
38614 struct linux_binprm *bprm;
38615 struct file *file;
38616 struct files_struct *displaced;
38617@@ -1383,6 +1417,23 @@ int do_execve(char * filename,
38618 bprm->filename = filename;
38619 bprm->interp = filename;
38620
38621+ if (gr_process_user_ban()) {
38622+ retval = -EPERM;
38623+ goto out_file;
38624+ }
38625+
38626+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
38627+
38628+ if (gr_handle_nproc()) {
38629+ retval = -EAGAIN;
38630+ goto out_file;
38631+ }
38632+
38633+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
38634+ retval = -EACCES;
38635+ goto out_file;
38636+ }
38637+
38638 retval = bprm_mm_init(bprm);
38639 if (retval)
38640 goto out_file;
38641@@ -1412,10 +1463,41 @@ int do_execve(char * filename,
38642 if (retval < 0)
38643 goto out;
38644
38645+ if (!gr_tpe_allow(file)) {
38646+ retval = -EACCES;
38647+ goto out;
38648+ }
38649+
38650+ if (gr_check_crash_exec(file)) {
38651+ retval = -EACCES;
38652+ goto out;
38653+ }
38654+
38655+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
38656+
38657+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
38658+
38659+#ifdef CONFIG_GRKERNSEC
38660+ old_acl = current->acl;
38661+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
38662+ old_exec_file = current->exec_file;
38663+ get_file(file);
38664+ current->exec_file = file;
38665+#endif
38666+
38667+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
38668+ bprm->unsafe & LSM_UNSAFE_SHARE);
38669+ if (retval < 0)
38670+ goto out_fail;
38671+
38672 current->flags &= ~PF_KTHREAD;
38673 retval = search_binary_handler(bprm,regs);
38674 if (retval < 0)
38675- goto out;
38676+ goto out_fail;
38677+#ifdef CONFIG_GRKERNSEC
38678+ if (old_exec_file)
38679+ fput(old_exec_file);
38680+#endif
38681
38682 /* execve succeeded */
38683 current->fs->in_exec = 0;
38684@@ -1426,6 +1508,14 @@ int do_execve(char * filename,
38685 put_files_struct(displaced);
38686 return retval;
38687
38688+out_fail:
38689+#ifdef CONFIG_GRKERNSEC
38690+ current->acl = old_acl;
38691+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
38692+ fput(current->exec_file);
38693+ current->exec_file = old_exec_file;
38694+#endif
38695+
38696 out:
38697 if (bprm->mm) {
38698 acct_arg_size(bprm, 0);
38699@@ -1591,6 +1681,220 @@ out:
38700 return ispipe;
38701 }
38702
38703+int pax_check_flags(unsigned long *flags)
38704+{
38705+ int retval = 0;
38706+
38707+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
38708+ if (*flags & MF_PAX_SEGMEXEC)
38709+ {
38710+ *flags &= ~MF_PAX_SEGMEXEC;
38711+ retval = -EINVAL;
38712+ }
38713+#endif
38714+
38715+ if ((*flags & MF_PAX_PAGEEXEC)
38716+
38717+#ifdef CONFIG_PAX_PAGEEXEC
38718+ && (*flags & MF_PAX_SEGMEXEC)
38719+#endif
38720+
38721+ )
38722+ {
38723+ *flags &= ~MF_PAX_PAGEEXEC;
38724+ retval = -EINVAL;
38725+ }
38726+
38727+ if ((*flags & MF_PAX_MPROTECT)
38728+
38729+#ifdef CONFIG_PAX_MPROTECT
38730+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38731+#endif
38732+
38733+ )
38734+ {
38735+ *flags &= ~MF_PAX_MPROTECT;
38736+ retval = -EINVAL;
38737+ }
38738+
38739+ if ((*flags & MF_PAX_EMUTRAMP)
38740+
38741+#ifdef CONFIG_PAX_EMUTRAMP
38742+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
38743+#endif
38744+
38745+ )
38746+ {
38747+ *flags &= ~MF_PAX_EMUTRAMP;
38748+ retval = -EINVAL;
38749+ }
38750+
38751+ return retval;
38752+}
38753+
38754+EXPORT_SYMBOL(pax_check_flags);
38755+
38756+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
38757+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
38758+{
38759+ struct task_struct *tsk = current;
38760+ struct mm_struct *mm = current->mm;
38761+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
38762+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
38763+ char *path_exec = NULL;
38764+ char *path_fault = NULL;
38765+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
38766+
38767+ if (buffer_exec && buffer_fault) {
38768+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
38769+
38770+ down_read(&mm->mmap_sem);
38771+ vma = mm->mmap;
38772+ while (vma && (!vma_exec || !vma_fault)) {
38773+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
38774+ vma_exec = vma;
38775+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
38776+ vma_fault = vma;
38777+ vma = vma->vm_next;
38778+ }
38779+ if (vma_exec) {
38780+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
38781+ if (IS_ERR(path_exec))
38782+ path_exec = "<path too long>";
38783+ else {
38784+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
38785+ if (path_exec) {
38786+ *path_exec = 0;
38787+ path_exec = buffer_exec;
38788+ } else
38789+ path_exec = "<path too long>";
38790+ }
38791+ }
38792+ if (vma_fault) {
38793+ start = vma_fault->vm_start;
38794+ end = vma_fault->vm_end;
38795+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
38796+ if (vma_fault->vm_file) {
38797+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
38798+ if (IS_ERR(path_fault))
38799+ path_fault = "<path too long>";
38800+ else {
38801+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
38802+ if (path_fault) {
38803+ *path_fault = 0;
38804+ path_fault = buffer_fault;
38805+ } else
38806+ path_fault = "<path too long>";
38807+ }
38808+ } else
38809+ path_fault = "<anonymous mapping>";
38810+ }
38811+ up_read(&mm->mmap_sem);
38812+ }
38813+ if (tsk->signal->curr_ip)
38814+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
38815+ else
38816+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
38817+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
38818+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
38819+ task_uid(tsk), task_euid(tsk), pc, sp);
38820+ free_page((unsigned long)buffer_exec);
38821+ free_page((unsigned long)buffer_fault);
38822+ pax_report_insns(pc, sp);
38823+ do_coredump(SIGKILL, SIGKILL, regs);
38824+}
38825+#endif
38826+
38827+#ifdef CONFIG_PAX_REFCOUNT
38828+void pax_report_refcount_overflow(struct pt_regs *regs)
38829+{
38830+ if (current->signal->curr_ip)
38831+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38832+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
38833+ else
38834+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
38835+ current->comm, task_pid_nr(current), current_uid(), current_euid());
38836+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
38837+ show_regs(regs);
38838+ force_sig_specific(SIGKILL, current);
38839+}
38840+#endif
38841+
38842+#ifdef CONFIG_PAX_USERCOPY
38843+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
38844+int object_is_on_stack(const void *obj, unsigned long len)
38845+{
38846+ const void * const stack = task_stack_page(current);
38847+ const void * const stackend = stack + THREAD_SIZE;
38848+
38849+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38850+ const void *frame = NULL;
38851+ const void *oldframe;
38852+#endif
38853+
38854+ if (obj + len < obj)
38855+ return -1;
38856+
38857+ if (obj + len <= stack || stackend <= obj)
38858+ return 0;
38859+
38860+ if (obj < stack || stackend < obj + len)
38861+ return -1;
38862+
38863+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
38864+ oldframe = __builtin_frame_address(1);
38865+ if (oldframe)
38866+ frame = __builtin_frame_address(2);
38867+ /*
38868+ low ----------------------------------------------> high
38869+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
38870+ ^----------------^
38871+ allow copies only within here
38872+ */
38873+ while (stack <= frame && frame < stackend) {
38874+ /* if obj + len extends past the last frame, this
38875+ check won't pass and the next frame will be 0,
38876+ causing us to bail out and correctly report
38877+ the copy as invalid
38878+ */
38879+ if (obj + len <= frame)
38880+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
38881+ oldframe = frame;
38882+ frame = *(const void * const *)frame;
38883+ }
38884+ return -1;
38885+#else
38886+ return 1;
38887+#endif
38888+}
38889+
38890+
38891+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
38892+{
38893+ if (current->signal->curr_ip)
38894+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38895+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38896+ else
38897+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
38898+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
38899+
38900+ dump_stack();
38901+ gr_handle_kernel_exploit();
38902+ do_group_exit(SIGKILL);
38903+}
38904+#endif
38905+
38906+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
38907+void pax_track_stack(void)
38908+{
38909+ unsigned long sp = (unsigned long)&sp;
38910+ if (sp < current_thread_info()->lowest_stack &&
38911+ sp > (unsigned long)task_stack_page(current))
38912+ current_thread_info()->lowest_stack = sp;
38913+}
38914+EXPORT_SYMBOL(pax_track_stack);
38915+#endif
38916+
38917 static int zap_process(struct task_struct *start)
38918 {
38919 struct task_struct *t;
38920@@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
38921 pipe = file->f_path.dentry->d_inode->i_pipe;
38922
38923 pipe_lock(pipe);
38924- pipe->readers++;
38925- pipe->writers--;
38926+ atomic_inc(&pipe->readers);
38927+ atomic_dec(&pipe->writers);
38928
38929- while ((pipe->readers > 1) && (!signal_pending(current))) {
38930+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
38931 wake_up_interruptible_sync(&pipe->wait);
38932 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
38933 pipe_wait(pipe);
38934 }
38935
38936- pipe->readers--;
38937- pipe->writers++;
38938+ atomic_dec(&pipe->readers);
38939+ atomic_inc(&pipe->writers);
38940 pipe_unlock(pipe);
38941
38942 }
38943@@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
38944 char **helper_argv = NULL;
38945 int helper_argc = 0;
38946 int dump_count = 0;
38947- static atomic_t core_dump_count = ATOMIC_INIT(0);
38948+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
38949
38950 audit_core_dumps(signr);
38951
38952+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
38953+ gr_handle_brute_attach(current, mm->flags);
38954+
38955 binfmt = mm->binfmt;
38956 if (!binfmt || !binfmt->core_dump)
38957 goto fail;
38958@@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
38959 */
38960 clear_thread_flag(TIF_SIGPENDING);
38961
38962+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
38963+
38964 /*
38965 * lock_kernel() because format_corename() is controlled by sysctl, which
38966 * uses lock_kernel()
38967@@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
38968 goto fail_unlock;
38969 }
38970
38971- dump_count = atomic_inc_return(&core_dump_count);
38972+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
38973 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
38974 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
38975 task_tgid_vnr(current), current->comm);
38976@@ -1972,7 +2281,7 @@ close_fail:
38977 filp_close(file, NULL);
38978 fail_dropcount:
38979 if (dump_count)
38980- atomic_dec(&core_dump_count);
38981+ atomic_dec_unchecked(&core_dump_count);
38982 fail_unlock:
38983 if (helper_argv)
38984 argv_free(helper_argv);
38985diff -urNp linux-2.6.32.43/fs/ext2/balloc.c linux-2.6.32.43/fs/ext2/balloc.c
38986--- linux-2.6.32.43/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
38987+++ linux-2.6.32.43/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
38988@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
38989
38990 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
38991 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
38992- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
38993+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
38994 sbi->s_resuid != current_fsuid() &&
38995 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
38996 return 0;
38997diff -urNp linux-2.6.32.43/fs/ext3/balloc.c linux-2.6.32.43/fs/ext3/balloc.c
38998--- linux-2.6.32.43/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
38999+++ linux-2.6.32.43/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
39000@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
39001
39002 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
39003 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
39004- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
39005+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
39006 sbi->s_resuid != current_fsuid() &&
39007 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
39008 return 0;
39009diff -urNp linux-2.6.32.43/fs/ext4/balloc.c linux-2.6.32.43/fs/ext4/balloc.c
39010--- linux-2.6.32.43/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
39011+++ linux-2.6.32.43/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
39012@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
39013 /* Hm, nope. Are (enough) root reserved blocks available? */
39014 if (sbi->s_resuid == current_fsuid() ||
39015 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
39016- capable(CAP_SYS_RESOURCE)) {
39017+ capable_nolog(CAP_SYS_RESOURCE)) {
39018 if (free_blocks >= (nblocks + dirty_blocks))
39019 return 1;
39020 }
39021diff -urNp linux-2.6.32.43/fs/ext4/ext4.h linux-2.6.32.43/fs/ext4/ext4.h
39022--- linux-2.6.32.43/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
39023+++ linux-2.6.32.43/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
39024@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
39025
39026 /* stats for buddy allocator */
39027 spinlock_t s_mb_pa_lock;
39028- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
39029- atomic_t s_bal_success; /* we found long enough chunks */
39030- atomic_t s_bal_allocated; /* in blocks */
39031- atomic_t s_bal_ex_scanned; /* total extents scanned */
39032- atomic_t s_bal_goals; /* goal hits */
39033- atomic_t s_bal_breaks; /* too long searches */
39034- atomic_t s_bal_2orders; /* 2^order hits */
39035+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
39036+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
39037+ atomic_unchecked_t s_bal_allocated; /* in blocks */
39038+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
39039+ atomic_unchecked_t s_bal_goals; /* goal hits */
39040+ atomic_unchecked_t s_bal_breaks; /* too long searches */
39041+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
39042 spinlock_t s_bal_lock;
39043 unsigned long s_mb_buddies_generated;
39044 unsigned long long s_mb_generation_time;
39045- atomic_t s_mb_lost_chunks;
39046- atomic_t s_mb_preallocated;
39047- atomic_t s_mb_discarded;
39048+ atomic_unchecked_t s_mb_lost_chunks;
39049+ atomic_unchecked_t s_mb_preallocated;
39050+ atomic_unchecked_t s_mb_discarded;
39051 atomic_t s_lock_busy;
39052
39053 /* locality groups */
39054diff -urNp linux-2.6.32.43/fs/ext4/mballoc.c linux-2.6.32.43/fs/ext4/mballoc.c
39055--- linux-2.6.32.43/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
39056+++ linux-2.6.32.43/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
39057@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
39058 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
39059
39060 if (EXT4_SB(sb)->s_mb_stats)
39061- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
39062+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
39063
39064 break;
39065 }
39066@@ -2131,7 +2131,7 @@ repeat:
39067 ac->ac_status = AC_STATUS_CONTINUE;
39068 ac->ac_flags |= EXT4_MB_HINT_FIRST;
39069 cr = 3;
39070- atomic_inc(&sbi->s_mb_lost_chunks);
39071+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
39072 goto repeat;
39073 }
39074 }
39075@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
39076 ext4_grpblk_t counters[16];
39077 } sg;
39078
39079+ pax_track_stack();
39080+
39081 group--;
39082 if (group == 0)
39083 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
39084@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
39085 if (sbi->s_mb_stats) {
39086 printk(KERN_INFO
39087 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
39088- atomic_read(&sbi->s_bal_allocated),
39089- atomic_read(&sbi->s_bal_reqs),
39090- atomic_read(&sbi->s_bal_success));
39091+ atomic_read_unchecked(&sbi->s_bal_allocated),
39092+ atomic_read_unchecked(&sbi->s_bal_reqs),
39093+ atomic_read_unchecked(&sbi->s_bal_success));
39094 printk(KERN_INFO
39095 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
39096 "%u 2^N hits, %u breaks, %u lost\n",
39097- atomic_read(&sbi->s_bal_ex_scanned),
39098- atomic_read(&sbi->s_bal_goals),
39099- atomic_read(&sbi->s_bal_2orders),
39100- atomic_read(&sbi->s_bal_breaks),
39101- atomic_read(&sbi->s_mb_lost_chunks));
39102+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
39103+ atomic_read_unchecked(&sbi->s_bal_goals),
39104+ atomic_read_unchecked(&sbi->s_bal_2orders),
39105+ atomic_read_unchecked(&sbi->s_bal_breaks),
39106+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
39107 printk(KERN_INFO
39108 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
39109 sbi->s_mb_buddies_generated++,
39110 sbi->s_mb_generation_time);
39111 printk(KERN_INFO
39112 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
39113- atomic_read(&sbi->s_mb_preallocated),
39114- atomic_read(&sbi->s_mb_discarded));
39115+ atomic_read_unchecked(&sbi->s_mb_preallocated),
39116+ atomic_read_unchecked(&sbi->s_mb_discarded));
39117 }
39118
39119 free_percpu(sbi->s_locality_groups);
39120@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
39121 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
39122
39123 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
39124- atomic_inc(&sbi->s_bal_reqs);
39125- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39126+ atomic_inc_unchecked(&sbi->s_bal_reqs);
39127+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
39128 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
39129- atomic_inc(&sbi->s_bal_success);
39130- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
39131+ atomic_inc_unchecked(&sbi->s_bal_success);
39132+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
39133 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
39134 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
39135- atomic_inc(&sbi->s_bal_goals);
39136+ atomic_inc_unchecked(&sbi->s_bal_goals);
39137 if (ac->ac_found > sbi->s_mb_max_to_scan)
39138- atomic_inc(&sbi->s_bal_breaks);
39139+ atomic_inc_unchecked(&sbi->s_bal_breaks);
39140 }
39141
39142 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
39143@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
39144 trace_ext4_mb_new_inode_pa(ac, pa);
39145
39146 ext4_mb_use_inode_pa(ac, pa);
39147- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39148+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39149
39150 ei = EXT4_I(ac->ac_inode);
39151 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39152@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
39153 trace_ext4_mb_new_group_pa(ac, pa);
39154
39155 ext4_mb_use_group_pa(ac, pa);
39156- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39157+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
39158
39159 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
39160 lg = ac->ac_lg;
39161@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
39162 * from the bitmap and continue.
39163 */
39164 }
39165- atomic_add(free, &sbi->s_mb_discarded);
39166+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
39167
39168 return err;
39169 }
39170@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
39171 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
39172 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
39173 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
39174- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39175+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
39176
39177 if (ac) {
39178 ac->ac_sb = sb;
39179diff -urNp linux-2.6.32.43/fs/ext4/super.c linux-2.6.32.43/fs/ext4/super.c
39180--- linux-2.6.32.43/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
39181+++ linux-2.6.32.43/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
39182@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
39183 }
39184
39185
39186-static struct sysfs_ops ext4_attr_ops = {
39187+static const struct sysfs_ops ext4_attr_ops = {
39188 .show = ext4_attr_show,
39189 .store = ext4_attr_store,
39190 };
39191diff -urNp linux-2.6.32.43/fs/fcntl.c linux-2.6.32.43/fs/fcntl.c
39192--- linux-2.6.32.43/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
39193+++ linux-2.6.32.43/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
39194@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
39195 if (err)
39196 return err;
39197
39198+ if (gr_handle_chroot_fowner(pid, type))
39199+ return -ENOENT;
39200+ if (gr_check_protected_task_fowner(pid, type))
39201+ return -EACCES;
39202+
39203 f_modown(filp, pid, type, force);
39204 return 0;
39205 }
39206@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
39207 switch (cmd) {
39208 case F_DUPFD:
39209 case F_DUPFD_CLOEXEC:
39210+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
39211 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39212 break;
39213 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
39214diff -urNp linux-2.6.32.43/fs/fifo.c linux-2.6.32.43/fs/fifo.c
39215--- linux-2.6.32.43/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
39216+++ linux-2.6.32.43/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
39217@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
39218 */
39219 filp->f_op = &read_pipefifo_fops;
39220 pipe->r_counter++;
39221- if (pipe->readers++ == 0)
39222+ if (atomic_inc_return(&pipe->readers) == 1)
39223 wake_up_partner(inode);
39224
39225- if (!pipe->writers) {
39226+ if (!atomic_read(&pipe->writers)) {
39227 if ((filp->f_flags & O_NONBLOCK)) {
39228 /* suppress POLLHUP until we have
39229 * seen a writer */
39230@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
39231 * errno=ENXIO when there is no process reading the FIFO.
39232 */
39233 ret = -ENXIO;
39234- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
39235+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
39236 goto err;
39237
39238 filp->f_op = &write_pipefifo_fops;
39239 pipe->w_counter++;
39240- if (!pipe->writers++)
39241+ if (atomic_inc_return(&pipe->writers) == 1)
39242 wake_up_partner(inode);
39243
39244- if (!pipe->readers) {
39245+ if (!atomic_read(&pipe->readers)) {
39246 wait_for_partner(inode, &pipe->r_counter);
39247 if (signal_pending(current))
39248 goto err_wr;
39249@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
39250 */
39251 filp->f_op = &rdwr_pipefifo_fops;
39252
39253- pipe->readers++;
39254- pipe->writers++;
39255+ atomic_inc(&pipe->readers);
39256+ atomic_inc(&pipe->writers);
39257 pipe->r_counter++;
39258 pipe->w_counter++;
39259- if (pipe->readers == 1 || pipe->writers == 1)
39260+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
39261 wake_up_partner(inode);
39262 break;
39263
39264@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
39265 return 0;
39266
39267 err_rd:
39268- if (!--pipe->readers)
39269+ if (atomic_dec_and_test(&pipe->readers))
39270 wake_up_interruptible(&pipe->wait);
39271 ret = -ERESTARTSYS;
39272 goto err;
39273
39274 err_wr:
39275- if (!--pipe->writers)
39276+ if (atomic_dec_and_test(&pipe->writers))
39277 wake_up_interruptible(&pipe->wait);
39278 ret = -ERESTARTSYS;
39279 goto err;
39280
39281 err:
39282- if (!pipe->readers && !pipe->writers)
39283+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
39284 free_pipe_info(inode);
39285
39286 err_nocleanup:
39287diff -urNp linux-2.6.32.43/fs/file.c linux-2.6.32.43/fs/file.c
39288--- linux-2.6.32.43/fs/file.c 2011-03-27 14:31:47.000000000 -0400
39289+++ linux-2.6.32.43/fs/file.c 2011-04-17 15:56:46.000000000 -0400
39290@@ -14,6 +14,7 @@
39291 #include <linux/slab.h>
39292 #include <linux/vmalloc.h>
39293 #include <linux/file.h>
39294+#include <linux/security.h>
39295 #include <linux/fdtable.h>
39296 #include <linux/bitops.h>
39297 #include <linux/interrupt.h>
39298@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
39299 * N.B. For clone tasks sharing a files structure, this test
39300 * will limit the total number of files that can be opened.
39301 */
39302+
39303+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
39304 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
39305 return -EMFILE;
39306
39307diff -urNp linux-2.6.32.43/fs/filesystems.c linux-2.6.32.43/fs/filesystems.c
39308--- linux-2.6.32.43/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
39309+++ linux-2.6.32.43/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
39310@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
39311 int len = dot ? dot - name : strlen(name);
39312
39313 fs = __get_fs_type(name, len);
39314+
39315+#ifdef CONFIG_GRKERNSEC_MODHARDEN
39316+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
39317+#else
39318 if (!fs && (request_module("%.*s", len, name) == 0))
39319+#endif
39320 fs = __get_fs_type(name, len);
39321
39322 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
39323diff -urNp linux-2.6.32.43/fs/fscache/cookie.c linux-2.6.32.43/fs/fscache/cookie.c
39324--- linux-2.6.32.43/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
39325+++ linux-2.6.32.43/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
39326@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
39327 parent ? (char *) parent->def->name : "<no-parent>",
39328 def->name, netfs_data);
39329
39330- fscache_stat(&fscache_n_acquires);
39331+ fscache_stat_unchecked(&fscache_n_acquires);
39332
39333 /* if there's no parent cookie, then we don't create one here either */
39334 if (!parent) {
39335- fscache_stat(&fscache_n_acquires_null);
39336+ fscache_stat_unchecked(&fscache_n_acquires_null);
39337 _leave(" [no parent]");
39338 return NULL;
39339 }
39340@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
39341 /* allocate and initialise a cookie */
39342 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
39343 if (!cookie) {
39344- fscache_stat(&fscache_n_acquires_oom);
39345+ fscache_stat_unchecked(&fscache_n_acquires_oom);
39346 _leave(" [ENOMEM]");
39347 return NULL;
39348 }
39349@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
39350
39351 switch (cookie->def->type) {
39352 case FSCACHE_COOKIE_TYPE_INDEX:
39353- fscache_stat(&fscache_n_cookie_index);
39354+ fscache_stat_unchecked(&fscache_n_cookie_index);
39355 break;
39356 case FSCACHE_COOKIE_TYPE_DATAFILE:
39357- fscache_stat(&fscache_n_cookie_data);
39358+ fscache_stat_unchecked(&fscache_n_cookie_data);
39359 break;
39360 default:
39361- fscache_stat(&fscache_n_cookie_special);
39362+ fscache_stat_unchecked(&fscache_n_cookie_special);
39363 break;
39364 }
39365
39366@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
39367 if (fscache_acquire_non_index_cookie(cookie) < 0) {
39368 atomic_dec(&parent->n_children);
39369 __fscache_cookie_put(cookie);
39370- fscache_stat(&fscache_n_acquires_nobufs);
39371+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
39372 _leave(" = NULL");
39373 return NULL;
39374 }
39375 }
39376
39377- fscache_stat(&fscache_n_acquires_ok);
39378+ fscache_stat_unchecked(&fscache_n_acquires_ok);
39379 _leave(" = %p", cookie);
39380 return cookie;
39381 }
39382@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
39383 cache = fscache_select_cache_for_object(cookie->parent);
39384 if (!cache) {
39385 up_read(&fscache_addremove_sem);
39386- fscache_stat(&fscache_n_acquires_no_cache);
39387+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
39388 _leave(" = -ENOMEDIUM [no cache]");
39389 return -ENOMEDIUM;
39390 }
39391@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
39392 object = cache->ops->alloc_object(cache, cookie);
39393 fscache_stat_d(&fscache_n_cop_alloc_object);
39394 if (IS_ERR(object)) {
39395- fscache_stat(&fscache_n_object_no_alloc);
39396+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
39397 ret = PTR_ERR(object);
39398 goto error;
39399 }
39400
39401- fscache_stat(&fscache_n_object_alloc);
39402+ fscache_stat_unchecked(&fscache_n_object_alloc);
39403
39404 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
39405
39406@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
39407 struct fscache_object *object;
39408 struct hlist_node *_p;
39409
39410- fscache_stat(&fscache_n_updates);
39411+ fscache_stat_unchecked(&fscache_n_updates);
39412
39413 if (!cookie) {
39414- fscache_stat(&fscache_n_updates_null);
39415+ fscache_stat_unchecked(&fscache_n_updates_null);
39416 _leave(" [no cookie]");
39417 return;
39418 }
39419@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
39420 struct fscache_object *object;
39421 unsigned long event;
39422
39423- fscache_stat(&fscache_n_relinquishes);
39424+ fscache_stat_unchecked(&fscache_n_relinquishes);
39425 if (retire)
39426- fscache_stat(&fscache_n_relinquishes_retire);
39427+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
39428
39429 if (!cookie) {
39430- fscache_stat(&fscache_n_relinquishes_null);
39431+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
39432 _leave(" [no cookie]");
39433 return;
39434 }
39435@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
39436
39437 /* wait for the cookie to finish being instantiated (or to fail) */
39438 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
39439- fscache_stat(&fscache_n_relinquishes_waitcrt);
39440+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
39441 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
39442 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
39443 }
39444diff -urNp linux-2.6.32.43/fs/fscache/internal.h linux-2.6.32.43/fs/fscache/internal.h
39445--- linux-2.6.32.43/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
39446+++ linux-2.6.32.43/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
39447@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
39448 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
39449 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
39450
39451-extern atomic_t fscache_n_op_pend;
39452-extern atomic_t fscache_n_op_run;
39453-extern atomic_t fscache_n_op_enqueue;
39454-extern atomic_t fscache_n_op_deferred_release;
39455-extern atomic_t fscache_n_op_release;
39456-extern atomic_t fscache_n_op_gc;
39457-extern atomic_t fscache_n_op_cancelled;
39458-extern atomic_t fscache_n_op_rejected;
39459-
39460-extern atomic_t fscache_n_attr_changed;
39461-extern atomic_t fscache_n_attr_changed_ok;
39462-extern atomic_t fscache_n_attr_changed_nobufs;
39463-extern atomic_t fscache_n_attr_changed_nomem;
39464-extern atomic_t fscache_n_attr_changed_calls;
39465-
39466-extern atomic_t fscache_n_allocs;
39467-extern atomic_t fscache_n_allocs_ok;
39468-extern atomic_t fscache_n_allocs_wait;
39469-extern atomic_t fscache_n_allocs_nobufs;
39470-extern atomic_t fscache_n_allocs_intr;
39471-extern atomic_t fscache_n_allocs_object_dead;
39472-extern atomic_t fscache_n_alloc_ops;
39473-extern atomic_t fscache_n_alloc_op_waits;
39474-
39475-extern atomic_t fscache_n_retrievals;
39476-extern atomic_t fscache_n_retrievals_ok;
39477-extern atomic_t fscache_n_retrievals_wait;
39478-extern atomic_t fscache_n_retrievals_nodata;
39479-extern atomic_t fscache_n_retrievals_nobufs;
39480-extern atomic_t fscache_n_retrievals_intr;
39481-extern atomic_t fscache_n_retrievals_nomem;
39482-extern atomic_t fscache_n_retrievals_object_dead;
39483-extern atomic_t fscache_n_retrieval_ops;
39484-extern atomic_t fscache_n_retrieval_op_waits;
39485-
39486-extern atomic_t fscache_n_stores;
39487-extern atomic_t fscache_n_stores_ok;
39488-extern atomic_t fscache_n_stores_again;
39489-extern atomic_t fscache_n_stores_nobufs;
39490-extern atomic_t fscache_n_stores_oom;
39491-extern atomic_t fscache_n_store_ops;
39492-extern atomic_t fscache_n_store_calls;
39493-extern atomic_t fscache_n_store_pages;
39494-extern atomic_t fscache_n_store_radix_deletes;
39495-extern atomic_t fscache_n_store_pages_over_limit;
39496-
39497-extern atomic_t fscache_n_store_vmscan_not_storing;
39498-extern atomic_t fscache_n_store_vmscan_gone;
39499-extern atomic_t fscache_n_store_vmscan_busy;
39500-extern atomic_t fscache_n_store_vmscan_cancelled;
39501-
39502-extern atomic_t fscache_n_marks;
39503-extern atomic_t fscache_n_uncaches;
39504-
39505-extern atomic_t fscache_n_acquires;
39506-extern atomic_t fscache_n_acquires_null;
39507-extern atomic_t fscache_n_acquires_no_cache;
39508-extern atomic_t fscache_n_acquires_ok;
39509-extern atomic_t fscache_n_acquires_nobufs;
39510-extern atomic_t fscache_n_acquires_oom;
39511-
39512-extern atomic_t fscache_n_updates;
39513-extern atomic_t fscache_n_updates_null;
39514-extern atomic_t fscache_n_updates_run;
39515-
39516-extern atomic_t fscache_n_relinquishes;
39517-extern atomic_t fscache_n_relinquishes_null;
39518-extern atomic_t fscache_n_relinquishes_waitcrt;
39519-extern atomic_t fscache_n_relinquishes_retire;
39520-
39521-extern atomic_t fscache_n_cookie_index;
39522-extern atomic_t fscache_n_cookie_data;
39523-extern atomic_t fscache_n_cookie_special;
39524-
39525-extern atomic_t fscache_n_object_alloc;
39526-extern atomic_t fscache_n_object_no_alloc;
39527-extern atomic_t fscache_n_object_lookups;
39528-extern atomic_t fscache_n_object_lookups_negative;
39529-extern atomic_t fscache_n_object_lookups_positive;
39530-extern atomic_t fscache_n_object_lookups_timed_out;
39531-extern atomic_t fscache_n_object_created;
39532-extern atomic_t fscache_n_object_avail;
39533-extern atomic_t fscache_n_object_dead;
39534-
39535-extern atomic_t fscache_n_checkaux_none;
39536-extern atomic_t fscache_n_checkaux_okay;
39537-extern atomic_t fscache_n_checkaux_update;
39538-extern atomic_t fscache_n_checkaux_obsolete;
39539+extern atomic_unchecked_t fscache_n_op_pend;
39540+extern atomic_unchecked_t fscache_n_op_run;
39541+extern atomic_unchecked_t fscache_n_op_enqueue;
39542+extern atomic_unchecked_t fscache_n_op_deferred_release;
39543+extern atomic_unchecked_t fscache_n_op_release;
39544+extern atomic_unchecked_t fscache_n_op_gc;
39545+extern atomic_unchecked_t fscache_n_op_cancelled;
39546+extern atomic_unchecked_t fscache_n_op_rejected;
39547+
39548+extern atomic_unchecked_t fscache_n_attr_changed;
39549+extern atomic_unchecked_t fscache_n_attr_changed_ok;
39550+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
39551+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
39552+extern atomic_unchecked_t fscache_n_attr_changed_calls;
39553+
39554+extern atomic_unchecked_t fscache_n_allocs;
39555+extern atomic_unchecked_t fscache_n_allocs_ok;
39556+extern atomic_unchecked_t fscache_n_allocs_wait;
39557+extern atomic_unchecked_t fscache_n_allocs_nobufs;
39558+extern atomic_unchecked_t fscache_n_allocs_intr;
39559+extern atomic_unchecked_t fscache_n_allocs_object_dead;
39560+extern atomic_unchecked_t fscache_n_alloc_ops;
39561+extern atomic_unchecked_t fscache_n_alloc_op_waits;
39562+
39563+extern atomic_unchecked_t fscache_n_retrievals;
39564+extern atomic_unchecked_t fscache_n_retrievals_ok;
39565+extern atomic_unchecked_t fscache_n_retrievals_wait;
39566+extern atomic_unchecked_t fscache_n_retrievals_nodata;
39567+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
39568+extern atomic_unchecked_t fscache_n_retrievals_intr;
39569+extern atomic_unchecked_t fscache_n_retrievals_nomem;
39570+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
39571+extern atomic_unchecked_t fscache_n_retrieval_ops;
39572+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
39573+
39574+extern atomic_unchecked_t fscache_n_stores;
39575+extern atomic_unchecked_t fscache_n_stores_ok;
39576+extern atomic_unchecked_t fscache_n_stores_again;
39577+extern atomic_unchecked_t fscache_n_stores_nobufs;
39578+extern atomic_unchecked_t fscache_n_stores_oom;
39579+extern atomic_unchecked_t fscache_n_store_ops;
39580+extern atomic_unchecked_t fscache_n_store_calls;
39581+extern atomic_unchecked_t fscache_n_store_pages;
39582+extern atomic_unchecked_t fscache_n_store_radix_deletes;
39583+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
39584+
39585+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
39586+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
39587+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
39588+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
39589+
39590+extern atomic_unchecked_t fscache_n_marks;
39591+extern atomic_unchecked_t fscache_n_uncaches;
39592+
39593+extern atomic_unchecked_t fscache_n_acquires;
39594+extern atomic_unchecked_t fscache_n_acquires_null;
39595+extern atomic_unchecked_t fscache_n_acquires_no_cache;
39596+extern atomic_unchecked_t fscache_n_acquires_ok;
39597+extern atomic_unchecked_t fscache_n_acquires_nobufs;
39598+extern atomic_unchecked_t fscache_n_acquires_oom;
39599+
39600+extern atomic_unchecked_t fscache_n_updates;
39601+extern atomic_unchecked_t fscache_n_updates_null;
39602+extern atomic_unchecked_t fscache_n_updates_run;
39603+
39604+extern atomic_unchecked_t fscache_n_relinquishes;
39605+extern atomic_unchecked_t fscache_n_relinquishes_null;
39606+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
39607+extern atomic_unchecked_t fscache_n_relinquishes_retire;
39608+
39609+extern atomic_unchecked_t fscache_n_cookie_index;
39610+extern atomic_unchecked_t fscache_n_cookie_data;
39611+extern atomic_unchecked_t fscache_n_cookie_special;
39612+
39613+extern atomic_unchecked_t fscache_n_object_alloc;
39614+extern atomic_unchecked_t fscache_n_object_no_alloc;
39615+extern atomic_unchecked_t fscache_n_object_lookups;
39616+extern atomic_unchecked_t fscache_n_object_lookups_negative;
39617+extern atomic_unchecked_t fscache_n_object_lookups_positive;
39618+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
39619+extern atomic_unchecked_t fscache_n_object_created;
39620+extern atomic_unchecked_t fscache_n_object_avail;
39621+extern atomic_unchecked_t fscache_n_object_dead;
39622+
39623+extern atomic_unchecked_t fscache_n_checkaux_none;
39624+extern atomic_unchecked_t fscache_n_checkaux_okay;
39625+extern atomic_unchecked_t fscache_n_checkaux_update;
39626+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
39627
39628 extern atomic_t fscache_n_cop_alloc_object;
39629 extern atomic_t fscache_n_cop_lookup_object;
39630@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
39631 atomic_inc(stat);
39632 }
39633
39634+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
39635+{
39636+ atomic_inc_unchecked(stat);
39637+}
39638+
39639 static inline void fscache_stat_d(atomic_t *stat)
39640 {
39641 atomic_dec(stat);
39642@@ -259,6 +264,7 @@ extern const struct file_operations fsca
39643
39644 #define __fscache_stat(stat) (NULL)
39645 #define fscache_stat(stat) do {} while (0)
39646+#define fscache_stat_unchecked(stat) do {} while (0)
39647 #define fscache_stat_d(stat) do {} while (0)
39648 #endif
39649
39650diff -urNp linux-2.6.32.43/fs/fscache/object.c linux-2.6.32.43/fs/fscache/object.c
39651--- linux-2.6.32.43/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
39652+++ linux-2.6.32.43/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
39653@@ -144,7 +144,7 @@ static void fscache_object_state_machine
39654 /* update the object metadata on disk */
39655 case FSCACHE_OBJECT_UPDATING:
39656 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
39657- fscache_stat(&fscache_n_updates_run);
39658+ fscache_stat_unchecked(&fscache_n_updates_run);
39659 fscache_stat(&fscache_n_cop_update_object);
39660 object->cache->ops->update_object(object);
39661 fscache_stat_d(&fscache_n_cop_update_object);
39662@@ -233,7 +233,7 @@ static void fscache_object_state_machine
39663 spin_lock(&object->lock);
39664 object->state = FSCACHE_OBJECT_DEAD;
39665 spin_unlock(&object->lock);
39666- fscache_stat(&fscache_n_object_dead);
39667+ fscache_stat_unchecked(&fscache_n_object_dead);
39668 goto terminal_transit;
39669
39670 /* handle the parent cache of this object being withdrawn from
39671@@ -248,7 +248,7 @@ static void fscache_object_state_machine
39672 spin_lock(&object->lock);
39673 object->state = FSCACHE_OBJECT_DEAD;
39674 spin_unlock(&object->lock);
39675- fscache_stat(&fscache_n_object_dead);
39676+ fscache_stat_unchecked(&fscache_n_object_dead);
39677 goto terminal_transit;
39678
39679 /* complain about the object being woken up once it is
39680@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
39681 parent->cookie->def->name, cookie->def->name,
39682 object->cache->tag->name);
39683
39684- fscache_stat(&fscache_n_object_lookups);
39685+ fscache_stat_unchecked(&fscache_n_object_lookups);
39686 fscache_stat(&fscache_n_cop_lookup_object);
39687 ret = object->cache->ops->lookup_object(object);
39688 fscache_stat_d(&fscache_n_cop_lookup_object);
39689@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
39690 if (ret == -ETIMEDOUT) {
39691 /* probably stuck behind another object, so move this one to
39692 * the back of the queue */
39693- fscache_stat(&fscache_n_object_lookups_timed_out);
39694+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
39695 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39696 }
39697
39698@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
39699
39700 spin_lock(&object->lock);
39701 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39702- fscache_stat(&fscache_n_object_lookups_negative);
39703+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
39704
39705 /* transit here to allow write requests to begin stacking up
39706 * and read requests to begin returning ENODATA */
39707@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
39708 * result, in which case there may be data available */
39709 spin_lock(&object->lock);
39710 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
39711- fscache_stat(&fscache_n_object_lookups_positive);
39712+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
39713
39714 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
39715
39716@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
39717 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
39718 } else {
39719 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
39720- fscache_stat(&fscache_n_object_created);
39721+ fscache_stat_unchecked(&fscache_n_object_created);
39722
39723 object->state = FSCACHE_OBJECT_AVAILABLE;
39724 spin_unlock(&object->lock);
39725@@ -633,7 +633,7 @@ static void fscache_object_available(str
39726 fscache_enqueue_dependents(object);
39727
39728 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
39729- fscache_stat(&fscache_n_object_avail);
39730+ fscache_stat_unchecked(&fscache_n_object_avail);
39731
39732 _leave("");
39733 }
39734@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
39735 enum fscache_checkaux result;
39736
39737 if (!object->cookie->def->check_aux) {
39738- fscache_stat(&fscache_n_checkaux_none);
39739+ fscache_stat_unchecked(&fscache_n_checkaux_none);
39740 return FSCACHE_CHECKAUX_OKAY;
39741 }
39742
39743@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
39744 switch (result) {
39745 /* entry okay as is */
39746 case FSCACHE_CHECKAUX_OKAY:
39747- fscache_stat(&fscache_n_checkaux_okay);
39748+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
39749 break;
39750
39751 /* entry requires update */
39752 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
39753- fscache_stat(&fscache_n_checkaux_update);
39754+ fscache_stat_unchecked(&fscache_n_checkaux_update);
39755 break;
39756
39757 /* entry requires deletion */
39758 case FSCACHE_CHECKAUX_OBSOLETE:
39759- fscache_stat(&fscache_n_checkaux_obsolete);
39760+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
39761 break;
39762
39763 default:
39764diff -urNp linux-2.6.32.43/fs/fscache/operation.c linux-2.6.32.43/fs/fscache/operation.c
39765--- linux-2.6.32.43/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
39766+++ linux-2.6.32.43/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
39767@@ -16,7 +16,7 @@
39768 #include <linux/seq_file.h>
39769 #include "internal.h"
39770
39771-atomic_t fscache_op_debug_id;
39772+atomic_unchecked_t fscache_op_debug_id;
39773 EXPORT_SYMBOL(fscache_op_debug_id);
39774
39775 /**
39776@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
39777 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
39778 ASSERTCMP(atomic_read(&op->usage), >, 0);
39779
39780- fscache_stat(&fscache_n_op_enqueue);
39781+ fscache_stat_unchecked(&fscache_n_op_enqueue);
39782 switch (op->flags & FSCACHE_OP_TYPE) {
39783 case FSCACHE_OP_FAST:
39784 _debug("queue fast");
39785@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
39786 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
39787 if (op->processor)
39788 fscache_enqueue_operation(op);
39789- fscache_stat(&fscache_n_op_run);
39790+ fscache_stat_unchecked(&fscache_n_op_run);
39791 }
39792
39793 /*
39794@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
39795 if (object->n_ops > 0) {
39796 atomic_inc(&op->usage);
39797 list_add_tail(&op->pend_link, &object->pending_ops);
39798- fscache_stat(&fscache_n_op_pend);
39799+ fscache_stat_unchecked(&fscache_n_op_pend);
39800 } else if (!list_empty(&object->pending_ops)) {
39801 atomic_inc(&op->usage);
39802 list_add_tail(&op->pend_link, &object->pending_ops);
39803- fscache_stat(&fscache_n_op_pend);
39804+ fscache_stat_unchecked(&fscache_n_op_pend);
39805 fscache_start_operations(object);
39806 } else {
39807 ASSERTCMP(object->n_in_progress, ==, 0);
39808@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
39809 object->n_exclusive++; /* reads and writes must wait */
39810 atomic_inc(&op->usage);
39811 list_add_tail(&op->pend_link, &object->pending_ops);
39812- fscache_stat(&fscache_n_op_pend);
39813+ fscache_stat_unchecked(&fscache_n_op_pend);
39814 ret = 0;
39815 } else {
39816 /* not allowed to submit ops in any other state */
39817@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
39818 if (object->n_exclusive > 0) {
39819 atomic_inc(&op->usage);
39820 list_add_tail(&op->pend_link, &object->pending_ops);
39821- fscache_stat(&fscache_n_op_pend);
39822+ fscache_stat_unchecked(&fscache_n_op_pend);
39823 } else if (!list_empty(&object->pending_ops)) {
39824 atomic_inc(&op->usage);
39825 list_add_tail(&op->pend_link, &object->pending_ops);
39826- fscache_stat(&fscache_n_op_pend);
39827+ fscache_stat_unchecked(&fscache_n_op_pend);
39828 fscache_start_operations(object);
39829 } else {
39830 ASSERTCMP(object->n_exclusive, ==, 0);
39831@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
39832 object->n_ops++;
39833 atomic_inc(&op->usage);
39834 list_add_tail(&op->pend_link, &object->pending_ops);
39835- fscache_stat(&fscache_n_op_pend);
39836+ fscache_stat_unchecked(&fscache_n_op_pend);
39837 ret = 0;
39838 } else if (object->state == FSCACHE_OBJECT_DYING ||
39839 object->state == FSCACHE_OBJECT_LC_DYING ||
39840 object->state == FSCACHE_OBJECT_WITHDRAWING) {
39841- fscache_stat(&fscache_n_op_rejected);
39842+ fscache_stat_unchecked(&fscache_n_op_rejected);
39843 ret = -ENOBUFS;
39844 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
39845 fscache_report_unexpected_submission(object, op, ostate);
39846@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
39847
39848 ret = -EBUSY;
39849 if (!list_empty(&op->pend_link)) {
39850- fscache_stat(&fscache_n_op_cancelled);
39851+ fscache_stat_unchecked(&fscache_n_op_cancelled);
39852 list_del_init(&op->pend_link);
39853 object->n_ops--;
39854 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
39855@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
39856 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
39857 BUG();
39858
39859- fscache_stat(&fscache_n_op_release);
39860+ fscache_stat_unchecked(&fscache_n_op_release);
39861
39862 if (op->release) {
39863 op->release(op);
39864@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
39865 * lock, and defer it otherwise */
39866 if (!spin_trylock(&object->lock)) {
39867 _debug("defer put");
39868- fscache_stat(&fscache_n_op_deferred_release);
39869+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
39870
39871 cache = object->cache;
39872 spin_lock(&cache->op_gc_list_lock);
39873@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
39874
39875 _debug("GC DEFERRED REL OBJ%x OP%x",
39876 object->debug_id, op->debug_id);
39877- fscache_stat(&fscache_n_op_gc);
39878+ fscache_stat_unchecked(&fscache_n_op_gc);
39879
39880 ASSERTCMP(atomic_read(&op->usage), ==, 0);
39881
39882diff -urNp linux-2.6.32.43/fs/fscache/page.c linux-2.6.32.43/fs/fscache/page.c
39883--- linux-2.6.32.43/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
39884+++ linux-2.6.32.43/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
39885@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
39886 val = radix_tree_lookup(&cookie->stores, page->index);
39887 if (!val) {
39888 rcu_read_unlock();
39889- fscache_stat(&fscache_n_store_vmscan_not_storing);
39890+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
39891 __fscache_uncache_page(cookie, page);
39892 return true;
39893 }
39894@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
39895 spin_unlock(&cookie->stores_lock);
39896
39897 if (xpage) {
39898- fscache_stat(&fscache_n_store_vmscan_cancelled);
39899- fscache_stat(&fscache_n_store_radix_deletes);
39900+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
39901+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39902 ASSERTCMP(xpage, ==, page);
39903 } else {
39904- fscache_stat(&fscache_n_store_vmscan_gone);
39905+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
39906 }
39907
39908 wake_up_bit(&cookie->flags, 0);
39909@@ -106,7 +106,7 @@ page_busy:
39910 /* we might want to wait here, but that could deadlock the allocator as
39911 * the slow-work threads writing to the cache may all end up sleeping
39912 * on memory allocation */
39913- fscache_stat(&fscache_n_store_vmscan_busy);
39914+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
39915 return false;
39916 }
39917 EXPORT_SYMBOL(__fscache_maybe_release_page);
39918@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
39919 FSCACHE_COOKIE_STORING_TAG);
39920 if (!radix_tree_tag_get(&cookie->stores, page->index,
39921 FSCACHE_COOKIE_PENDING_TAG)) {
39922- fscache_stat(&fscache_n_store_radix_deletes);
39923+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
39924 xpage = radix_tree_delete(&cookie->stores, page->index);
39925 }
39926 spin_unlock(&cookie->stores_lock);
39927@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
39928
39929 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
39930
39931- fscache_stat(&fscache_n_attr_changed_calls);
39932+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
39933
39934 if (fscache_object_is_active(object)) {
39935 fscache_set_op_state(op, "CallFS");
39936@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
39937
39938 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
39939
39940- fscache_stat(&fscache_n_attr_changed);
39941+ fscache_stat_unchecked(&fscache_n_attr_changed);
39942
39943 op = kzalloc(sizeof(*op), GFP_KERNEL);
39944 if (!op) {
39945- fscache_stat(&fscache_n_attr_changed_nomem);
39946+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
39947 _leave(" = -ENOMEM");
39948 return -ENOMEM;
39949 }
39950@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
39951 if (fscache_submit_exclusive_op(object, op) < 0)
39952 goto nobufs;
39953 spin_unlock(&cookie->lock);
39954- fscache_stat(&fscache_n_attr_changed_ok);
39955+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
39956 fscache_put_operation(op);
39957 _leave(" = 0");
39958 return 0;
39959@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
39960 nobufs:
39961 spin_unlock(&cookie->lock);
39962 kfree(op);
39963- fscache_stat(&fscache_n_attr_changed_nobufs);
39964+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
39965 _leave(" = %d", -ENOBUFS);
39966 return -ENOBUFS;
39967 }
39968@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
39969 /* allocate a retrieval operation and attempt to submit it */
39970 op = kzalloc(sizeof(*op), GFP_NOIO);
39971 if (!op) {
39972- fscache_stat(&fscache_n_retrievals_nomem);
39973+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
39974 return NULL;
39975 }
39976
39977@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
39978 return 0;
39979 }
39980
39981- fscache_stat(&fscache_n_retrievals_wait);
39982+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
39983
39984 jif = jiffies;
39985 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
39986 fscache_wait_bit_interruptible,
39987 TASK_INTERRUPTIBLE) != 0) {
39988- fscache_stat(&fscache_n_retrievals_intr);
39989+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
39990 _leave(" = -ERESTARTSYS");
39991 return -ERESTARTSYS;
39992 }
39993@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
39994 */
39995 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
39996 struct fscache_retrieval *op,
39997- atomic_t *stat_op_waits,
39998- atomic_t *stat_object_dead)
39999+ atomic_unchecked_t *stat_op_waits,
40000+ atomic_unchecked_t *stat_object_dead)
40001 {
40002 int ret;
40003
40004@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
40005 goto check_if_dead;
40006
40007 _debug(">>> WT");
40008- fscache_stat(stat_op_waits);
40009+ fscache_stat_unchecked(stat_op_waits);
40010 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
40011 fscache_wait_bit_interruptible,
40012 TASK_INTERRUPTIBLE) < 0) {
40013@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
40014
40015 check_if_dead:
40016 if (unlikely(fscache_object_is_dead(object))) {
40017- fscache_stat(stat_object_dead);
40018+ fscache_stat_unchecked(stat_object_dead);
40019 return -ENOBUFS;
40020 }
40021 return 0;
40022@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
40023
40024 _enter("%p,%p,,,", cookie, page);
40025
40026- fscache_stat(&fscache_n_retrievals);
40027+ fscache_stat_unchecked(&fscache_n_retrievals);
40028
40029 if (hlist_empty(&cookie->backing_objects))
40030 goto nobufs;
40031@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
40032 goto nobufs_unlock;
40033 spin_unlock(&cookie->lock);
40034
40035- fscache_stat(&fscache_n_retrieval_ops);
40036+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
40037
40038 /* pin the netfs read context in case we need to do the actual netfs
40039 * read because we've encountered a cache read failure */
40040@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
40041
40042 error:
40043 if (ret == -ENOMEM)
40044- fscache_stat(&fscache_n_retrievals_nomem);
40045+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40046 else if (ret == -ERESTARTSYS)
40047- fscache_stat(&fscache_n_retrievals_intr);
40048+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40049 else if (ret == -ENODATA)
40050- fscache_stat(&fscache_n_retrievals_nodata);
40051+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40052 else if (ret < 0)
40053- fscache_stat(&fscache_n_retrievals_nobufs);
40054+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40055 else
40056- fscache_stat(&fscache_n_retrievals_ok);
40057+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40058
40059 fscache_put_retrieval(op);
40060 _leave(" = %d", ret);
40061@@ -453,7 +453,7 @@ nobufs_unlock:
40062 spin_unlock(&cookie->lock);
40063 kfree(op);
40064 nobufs:
40065- fscache_stat(&fscache_n_retrievals_nobufs);
40066+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40067 _leave(" = -ENOBUFS");
40068 return -ENOBUFS;
40069 }
40070@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
40071
40072 _enter("%p,,%d,,,", cookie, *nr_pages);
40073
40074- fscache_stat(&fscache_n_retrievals);
40075+ fscache_stat_unchecked(&fscache_n_retrievals);
40076
40077 if (hlist_empty(&cookie->backing_objects))
40078 goto nobufs;
40079@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
40080 goto nobufs_unlock;
40081 spin_unlock(&cookie->lock);
40082
40083- fscache_stat(&fscache_n_retrieval_ops);
40084+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
40085
40086 /* pin the netfs read context in case we need to do the actual netfs
40087 * read because we've encountered a cache read failure */
40088@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
40089
40090 error:
40091 if (ret == -ENOMEM)
40092- fscache_stat(&fscache_n_retrievals_nomem);
40093+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
40094 else if (ret == -ERESTARTSYS)
40095- fscache_stat(&fscache_n_retrievals_intr);
40096+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
40097 else if (ret == -ENODATA)
40098- fscache_stat(&fscache_n_retrievals_nodata);
40099+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
40100 else if (ret < 0)
40101- fscache_stat(&fscache_n_retrievals_nobufs);
40102+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40103 else
40104- fscache_stat(&fscache_n_retrievals_ok);
40105+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
40106
40107 fscache_put_retrieval(op);
40108 _leave(" = %d", ret);
40109@@ -570,7 +570,7 @@ nobufs_unlock:
40110 spin_unlock(&cookie->lock);
40111 kfree(op);
40112 nobufs:
40113- fscache_stat(&fscache_n_retrievals_nobufs);
40114+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
40115 _leave(" = -ENOBUFS");
40116 return -ENOBUFS;
40117 }
40118@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
40119
40120 _enter("%p,%p,,,", cookie, page);
40121
40122- fscache_stat(&fscache_n_allocs);
40123+ fscache_stat_unchecked(&fscache_n_allocs);
40124
40125 if (hlist_empty(&cookie->backing_objects))
40126 goto nobufs;
40127@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
40128 goto nobufs_unlock;
40129 spin_unlock(&cookie->lock);
40130
40131- fscache_stat(&fscache_n_alloc_ops);
40132+ fscache_stat_unchecked(&fscache_n_alloc_ops);
40133
40134 ret = fscache_wait_for_retrieval_activation(
40135 object, op,
40136@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
40137
40138 error:
40139 if (ret == -ERESTARTSYS)
40140- fscache_stat(&fscache_n_allocs_intr);
40141+ fscache_stat_unchecked(&fscache_n_allocs_intr);
40142 else if (ret < 0)
40143- fscache_stat(&fscache_n_allocs_nobufs);
40144+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40145 else
40146- fscache_stat(&fscache_n_allocs_ok);
40147+ fscache_stat_unchecked(&fscache_n_allocs_ok);
40148
40149 fscache_put_retrieval(op);
40150 _leave(" = %d", ret);
40151@@ -651,7 +651,7 @@ nobufs_unlock:
40152 spin_unlock(&cookie->lock);
40153 kfree(op);
40154 nobufs:
40155- fscache_stat(&fscache_n_allocs_nobufs);
40156+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
40157 _leave(" = -ENOBUFS");
40158 return -ENOBUFS;
40159 }
40160@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
40161
40162 spin_lock(&cookie->stores_lock);
40163
40164- fscache_stat(&fscache_n_store_calls);
40165+ fscache_stat_unchecked(&fscache_n_store_calls);
40166
40167 /* find a page to store */
40168 page = NULL;
40169@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
40170 page = results[0];
40171 _debug("gang %d [%lx]", n, page->index);
40172 if (page->index > op->store_limit) {
40173- fscache_stat(&fscache_n_store_pages_over_limit);
40174+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
40175 goto superseded;
40176 }
40177
40178@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
40179
40180 if (page) {
40181 fscache_set_op_state(&op->op, "Store");
40182- fscache_stat(&fscache_n_store_pages);
40183+ fscache_stat_unchecked(&fscache_n_store_pages);
40184 fscache_stat(&fscache_n_cop_write_page);
40185 ret = object->cache->ops->write_page(op, page);
40186 fscache_stat_d(&fscache_n_cop_write_page);
40187@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
40188 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40189 ASSERT(PageFsCache(page));
40190
40191- fscache_stat(&fscache_n_stores);
40192+ fscache_stat_unchecked(&fscache_n_stores);
40193
40194 op = kzalloc(sizeof(*op), GFP_NOIO);
40195 if (!op)
40196@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
40197 spin_unlock(&cookie->stores_lock);
40198 spin_unlock(&object->lock);
40199
40200- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
40201+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
40202 op->store_limit = object->store_limit;
40203
40204 if (fscache_submit_op(object, &op->op) < 0)
40205@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
40206
40207 spin_unlock(&cookie->lock);
40208 radix_tree_preload_end();
40209- fscache_stat(&fscache_n_store_ops);
40210- fscache_stat(&fscache_n_stores_ok);
40211+ fscache_stat_unchecked(&fscache_n_store_ops);
40212+ fscache_stat_unchecked(&fscache_n_stores_ok);
40213
40214 /* the slow work queue now carries its own ref on the object */
40215 fscache_put_operation(&op->op);
40216@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
40217 return 0;
40218
40219 already_queued:
40220- fscache_stat(&fscache_n_stores_again);
40221+ fscache_stat_unchecked(&fscache_n_stores_again);
40222 already_pending:
40223 spin_unlock(&cookie->stores_lock);
40224 spin_unlock(&object->lock);
40225 spin_unlock(&cookie->lock);
40226 radix_tree_preload_end();
40227 kfree(op);
40228- fscache_stat(&fscache_n_stores_ok);
40229+ fscache_stat_unchecked(&fscache_n_stores_ok);
40230 _leave(" = 0");
40231 return 0;
40232
40233@@ -886,14 +886,14 @@ nobufs:
40234 spin_unlock(&cookie->lock);
40235 radix_tree_preload_end();
40236 kfree(op);
40237- fscache_stat(&fscache_n_stores_nobufs);
40238+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
40239 _leave(" = -ENOBUFS");
40240 return -ENOBUFS;
40241
40242 nomem_free:
40243 kfree(op);
40244 nomem:
40245- fscache_stat(&fscache_n_stores_oom);
40246+ fscache_stat_unchecked(&fscache_n_stores_oom);
40247 _leave(" = -ENOMEM");
40248 return -ENOMEM;
40249 }
40250@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
40251 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
40252 ASSERTCMP(page, !=, NULL);
40253
40254- fscache_stat(&fscache_n_uncaches);
40255+ fscache_stat_unchecked(&fscache_n_uncaches);
40256
40257 /* cache withdrawal may beat us to it */
40258 if (!PageFsCache(page))
40259@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
40260 unsigned long loop;
40261
40262 #ifdef CONFIG_FSCACHE_STATS
40263- atomic_add(pagevec->nr, &fscache_n_marks);
40264+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
40265 #endif
40266
40267 for (loop = 0; loop < pagevec->nr; loop++) {
40268diff -urNp linux-2.6.32.43/fs/fscache/stats.c linux-2.6.32.43/fs/fscache/stats.c
40269--- linux-2.6.32.43/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
40270+++ linux-2.6.32.43/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
40271@@ -18,95 +18,95 @@
40272 /*
40273 * operation counters
40274 */
40275-atomic_t fscache_n_op_pend;
40276-atomic_t fscache_n_op_run;
40277-atomic_t fscache_n_op_enqueue;
40278-atomic_t fscache_n_op_requeue;
40279-atomic_t fscache_n_op_deferred_release;
40280-atomic_t fscache_n_op_release;
40281-atomic_t fscache_n_op_gc;
40282-atomic_t fscache_n_op_cancelled;
40283-atomic_t fscache_n_op_rejected;
40284-
40285-atomic_t fscache_n_attr_changed;
40286-atomic_t fscache_n_attr_changed_ok;
40287-atomic_t fscache_n_attr_changed_nobufs;
40288-atomic_t fscache_n_attr_changed_nomem;
40289-atomic_t fscache_n_attr_changed_calls;
40290-
40291-atomic_t fscache_n_allocs;
40292-atomic_t fscache_n_allocs_ok;
40293-atomic_t fscache_n_allocs_wait;
40294-atomic_t fscache_n_allocs_nobufs;
40295-atomic_t fscache_n_allocs_intr;
40296-atomic_t fscache_n_allocs_object_dead;
40297-atomic_t fscache_n_alloc_ops;
40298-atomic_t fscache_n_alloc_op_waits;
40299-
40300-atomic_t fscache_n_retrievals;
40301-atomic_t fscache_n_retrievals_ok;
40302-atomic_t fscache_n_retrievals_wait;
40303-atomic_t fscache_n_retrievals_nodata;
40304-atomic_t fscache_n_retrievals_nobufs;
40305-atomic_t fscache_n_retrievals_intr;
40306-atomic_t fscache_n_retrievals_nomem;
40307-atomic_t fscache_n_retrievals_object_dead;
40308-atomic_t fscache_n_retrieval_ops;
40309-atomic_t fscache_n_retrieval_op_waits;
40310-
40311-atomic_t fscache_n_stores;
40312-atomic_t fscache_n_stores_ok;
40313-atomic_t fscache_n_stores_again;
40314-atomic_t fscache_n_stores_nobufs;
40315-atomic_t fscache_n_stores_oom;
40316-atomic_t fscache_n_store_ops;
40317-atomic_t fscache_n_store_calls;
40318-atomic_t fscache_n_store_pages;
40319-atomic_t fscache_n_store_radix_deletes;
40320-atomic_t fscache_n_store_pages_over_limit;
40321-
40322-atomic_t fscache_n_store_vmscan_not_storing;
40323-atomic_t fscache_n_store_vmscan_gone;
40324-atomic_t fscache_n_store_vmscan_busy;
40325-atomic_t fscache_n_store_vmscan_cancelled;
40326-
40327-atomic_t fscache_n_marks;
40328-atomic_t fscache_n_uncaches;
40329-
40330-atomic_t fscache_n_acquires;
40331-atomic_t fscache_n_acquires_null;
40332-atomic_t fscache_n_acquires_no_cache;
40333-atomic_t fscache_n_acquires_ok;
40334-atomic_t fscache_n_acquires_nobufs;
40335-atomic_t fscache_n_acquires_oom;
40336-
40337-atomic_t fscache_n_updates;
40338-atomic_t fscache_n_updates_null;
40339-atomic_t fscache_n_updates_run;
40340-
40341-atomic_t fscache_n_relinquishes;
40342-atomic_t fscache_n_relinquishes_null;
40343-atomic_t fscache_n_relinquishes_waitcrt;
40344-atomic_t fscache_n_relinquishes_retire;
40345-
40346-atomic_t fscache_n_cookie_index;
40347-atomic_t fscache_n_cookie_data;
40348-atomic_t fscache_n_cookie_special;
40349-
40350-atomic_t fscache_n_object_alloc;
40351-atomic_t fscache_n_object_no_alloc;
40352-atomic_t fscache_n_object_lookups;
40353-atomic_t fscache_n_object_lookups_negative;
40354-atomic_t fscache_n_object_lookups_positive;
40355-atomic_t fscache_n_object_lookups_timed_out;
40356-atomic_t fscache_n_object_created;
40357-atomic_t fscache_n_object_avail;
40358-atomic_t fscache_n_object_dead;
40359-
40360-atomic_t fscache_n_checkaux_none;
40361-atomic_t fscache_n_checkaux_okay;
40362-atomic_t fscache_n_checkaux_update;
40363-atomic_t fscache_n_checkaux_obsolete;
40364+atomic_unchecked_t fscache_n_op_pend;
40365+atomic_unchecked_t fscache_n_op_run;
40366+atomic_unchecked_t fscache_n_op_enqueue;
40367+atomic_unchecked_t fscache_n_op_requeue;
40368+atomic_unchecked_t fscache_n_op_deferred_release;
40369+atomic_unchecked_t fscache_n_op_release;
40370+atomic_unchecked_t fscache_n_op_gc;
40371+atomic_unchecked_t fscache_n_op_cancelled;
40372+atomic_unchecked_t fscache_n_op_rejected;
40373+
40374+atomic_unchecked_t fscache_n_attr_changed;
40375+atomic_unchecked_t fscache_n_attr_changed_ok;
40376+atomic_unchecked_t fscache_n_attr_changed_nobufs;
40377+atomic_unchecked_t fscache_n_attr_changed_nomem;
40378+atomic_unchecked_t fscache_n_attr_changed_calls;
40379+
40380+atomic_unchecked_t fscache_n_allocs;
40381+atomic_unchecked_t fscache_n_allocs_ok;
40382+atomic_unchecked_t fscache_n_allocs_wait;
40383+atomic_unchecked_t fscache_n_allocs_nobufs;
40384+atomic_unchecked_t fscache_n_allocs_intr;
40385+atomic_unchecked_t fscache_n_allocs_object_dead;
40386+atomic_unchecked_t fscache_n_alloc_ops;
40387+atomic_unchecked_t fscache_n_alloc_op_waits;
40388+
40389+atomic_unchecked_t fscache_n_retrievals;
40390+atomic_unchecked_t fscache_n_retrievals_ok;
40391+atomic_unchecked_t fscache_n_retrievals_wait;
40392+atomic_unchecked_t fscache_n_retrievals_nodata;
40393+atomic_unchecked_t fscache_n_retrievals_nobufs;
40394+atomic_unchecked_t fscache_n_retrievals_intr;
40395+atomic_unchecked_t fscache_n_retrievals_nomem;
40396+atomic_unchecked_t fscache_n_retrievals_object_dead;
40397+atomic_unchecked_t fscache_n_retrieval_ops;
40398+atomic_unchecked_t fscache_n_retrieval_op_waits;
40399+
40400+atomic_unchecked_t fscache_n_stores;
40401+atomic_unchecked_t fscache_n_stores_ok;
40402+atomic_unchecked_t fscache_n_stores_again;
40403+atomic_unchecked_t fscache_n_stores_nobufs;
40404+atomic_unchecked_t fscache_n_stores_oom;
40405+atomic_unchecked_t fscache_n_store_ops;
40406+atomic_unchecked_t fscache_n_store_calls;
40407+atomic_unchecked_t fscache_n_store_pages;
40408+atomic_unchecked_t fscache_n_store_radix_deletes;
40409+atomic_unchecked_t fscache_n_store_pages_over_limit;
40410+
40411+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
40412+atomic_unchecked_t fscache_n_store_vmscan_gone;
40413+atomic_unchecked_t fscache_n_store_vmscan_busy;
40414+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
40415+
40416+atomic_unchecked_t fscache_n_marks;
40417+atomic_unchecked_t fscache_n_uncaches;
40418+
40419+atomic_unchecked_t fscache_n_acquires;
40420+atomic_unchecked_t fscache_n_acquires_null;
40421+atomic_unchecked_t fscache_n_acquires_no_cache;
40422+atomic_unchecked_t fscache_n_acquires_ok;
40423+atomic_unchecked_t fscache_n_acquires_nobufs;
40424+atomic_unchecked_t fscache_n_acquires_oom;
40425+
40426+atomic_unchecked_t fscache_n_updates;
40427+atomic_unchecked_t fscache_n_updates_null;
40428+atomic_unchecked_t fscache_n_updates_run;
40429+
40430+atomic_unchecked_t fscache_n_relinquishes;
40431+atomic_unchecked_t fscache_n_relinquishes_null;
40432+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
40433+atomic_unchecked_t fscache_n_relinquishes_retire;
40434+
40435+atomic_unchecked_t fscache_n_cookie_index;
40436+atomic_unchecked_t fscache_n_cookie_data;
40437+atomic_unchecked_t fscache_n_cookie_special;
40438+
40439+atomic_unchecked_t fscache_n_object_alloc;
40440+atomic_unchecked_t fscache_n_object_no_alloc;
40441+atomic_unchecked_t fscache_n_object_lookups;
40442+atomic_unchecked_t fscache_n_object_lookups_negative;
40443+atomic_unchecked_t fscache_n_object_lookups_positive;
40444+atomic_unchecked_t fscache_n_object_lookups_timed_out;
40445+atomic_unchecked_t fscache_n_object_created;
40446+atomic_unchecked_t fscache_n_object_avail;
40447+atomic_unchecked_t fscache_n_object_dead;
40448+
40449+atomic_unchecked_t fscache_n_checkaux_none;
40450+atomic_unchecked_t fscache_n_checkaux_okay;
40451+atomic_unchecked_t fscache_n_checkaux_update;
40452+atomic_unchecked_t fscache_n_checkaux_obsolete;
40453
40454 atomic_t fscache_n_cop_alloc_object;
40455 atomic_t fscache_n_cop_lookup_object;
40456@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
40457 seq_puts(m, "FS-Cache statistics\n");
40458
40459 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
40460- atomic_read(&fscache_n_cookie_index),
40461- atomic_read(&fscache_n_cookie_data),
40462- atomic_read(&fscache_n_cookie_special));
40463+ atomic_read_unchecked(&fscache_n_cookie_index),
40464+ atomic_read_unchecked(&fscache_n_cookie_data),
40465+ atomic_read_unchecked(&fscache_n_cookie_special));
40466
40467 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
40468- atomic_read(&fscache_n_object_alloc),
40469- atomic_read(&fscache_n_object_no_alloc),
40470- atomic_read(&fscache_n_object_avail),
40471- atomic_read(&fscache_n_object_dead));
40472+ atomic_read_unchecked(&fscache_n_object_alloc),
40473+ atomic_read_unchecked(&fscache_n_object_no_alloc),
40474+ atomic_read_unchecked(&fscache_n_object_avail),
40475+ atomic_read_unchecked(&fscache_n_object_dead));
40476 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
40477- atomic_read(&fscache_n_checkaux_none),
40478- atomic_read(&fscache_n_checkaux_okay),
40479- atomic_read(&fscache_n_checkaux_update),
40480- atomic_read(&fscache_n_checkaux_obsolete));
40481+ atomic_read_unchecked(&fscache_n_checkaux_none),
40482+ atomic_read_unchecked(&fscache_n_checkaux_okay),
40483+ atomic_read_unchecked(&fscache_n_checkaux_update),
40484+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
40485
40486 seq_printf(m, "Pages : mrk=%u unc=%u\n",
40487- atomic_read(&fscache_n_marks),
40488- atomic_read(&fscache_n_uncaches));
40489+ atomic_read_unchecked(&fscache_n_marks),
40490+ atomic_read_unchecked(&fscache_n_uncaches));
40491
40492 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
40493 " oom=%u\n",
40494- atomic_read(&fscache_n_acquires),
40495- atomic_read(&fscache_n_acquires_null),
40496- atomic_read(&fscache_n_acquires_no_cache),
40497- atomic_read(&fscache_n_acquires_ok),
40498- atomic_read(&fscache_n_acquires_nobufs),
40499- atomic_read(&fscache_n_acquires_oom));
40500+ atomic_read_unchecked(&fscache_n_acquires),
40501+ atomic_read_unchecked(&fscache_n_acquires_null),
40502+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
40503+ atomic_read_unchecked(&fscache_n_acquires_ok),
40504+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
40505+ atomic_read_unchecked(&fscache_n_acquires_oom));
40506
40507 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
40508- atomic_read(&fscache_n_object_lookups),
40509- atomic_read(&fscache_n_object_lookups_negative),
40510- atomic_read(&fscache_n_object_lookups_positive),
40511- atomic_read(&fscache_n_object_lookups_timed_out),
40512- atomic_read(&fscache_n_object_created));
40513+ atomic_read_unchecked(&fscache_n_object_lookups),
40514+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
40515+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
40516+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
40517+ atomic_read_unchecked(&fscache_n_object_created));
40518
40519 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
40520- atomic_read(&fscache_n_updates),
40521- atomic_read(&fscache_n_updates_null),
40522- atomic_read(&fscache_n_updates_run));
40523+ atomic_read_unchecked(&fscache_n_updates),
40524+ atomic_read_unchecked(&fscache_n_updates_null),
40525+ atomic_read_unchecked(&fscache_n_updates_run));
40526
40527 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
40528- atomic_read(&fscache_n_relinquishes),
40529- atomic_read(&fscache_n_relinquishes_null),
40530- atomic_read(&fscache_n_relinquishes_waitcrt),
40531- atomic_read(&fscache_n_relinquishes_retire));
40532+ atomic_read_unchecked(&fscache_n_relinquishes),
40533+ atomic_read_unchecked(&fscache_n_relinquishes_null),
40534+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
40535+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
40536
40537 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
40538- atomic_read(&fscache_n_attr_changed),
40539- atomic_read(&fscache_n_attr_changed_ok),
40540- atomic_read(&fscache_n_attr_changed_nobufs),
40541- atomic_read(&fscache_n_attr_changed_nomem),
40542- atomic_read(&fscache_n_attr_changed_calls));
40543+ atomic_read_unchecked(&fscache_n_attr_changed),
40544+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
40545+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
40546+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
40547+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
40548
40549 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
40550- atomic_read(&fscache_n_allocs),
40551- atomic_read(&fscache_n_allocs_ok),
40552- atomic_read(&fscache_n_allocs_wait),
40553- atomic_read(&fscache_n_allocs_nobufs),
40554- atomic_read(&fscache_n_allocs_intr));
40555+ atomic_read_unchecked(&fscache_n_allocs),
40556+ atomic_read_unchecked(&fscache_n_allocs_ok),
40557+ atomic_read_unchecked(&fscache_n_allocs_wait),
40558+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
40559+ atomic_read_unchecked(&fscache_n_allocs_intr));
40560 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
40561- atomic_read(&fscache_n_alloc_ops),
40562- atomic_read(&fscache_n_alloc_op_waits),
40563- atomic_read(&fscache_n_allocs_object_dead));
40564+ atomic_read_unchecked(&fscache_n_alloc_ops),
40565+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
40566+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
40567
40568 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
40569 " int=%u oom=%u\n",
40570- atomic_read(&fscache_n_retrievals),
40571- atomic_read(&fscache_n_retrievals_ok),
40572- atomic_read(&fscache_n_retrievals_wait),
40573- atomic_read(&fscache_n_retrievals_nodata),
40574- atomic_read(&fscache_n_retrievals_nobufs),
40575- atomic_read(&fscache_n_retrievals_intr),
40576- atomic_read(&fscache_n_retrievals_nomem));
40577+ atomic_read_unchecked(&fscache_n_retrievals),
40578+ atomic_read_unchecked(&fscache_n_retrievals_ok),
40579+ atomic_read_unchecked(&fscache_n_retrievals_wait),
40580+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
40581+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
40582+ atomic_read_unchecked(&fscache_n_retrievals_intr),
40583+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
40584 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
40585- atomic_read(&fscache_n_retrieval_ops),
40586- atomic_read(&fscache_n_retrieval_op_waits),
40587- atomic_read(&fscache_n_retrievals_object_dead));
40588+ atomic_read_unchecked(&fscache_n_retrieval_ops),
40589+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
40590+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
40591
40592 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
40593- atomic_read(&fscache_n_stores),
40594- atomic_read(&fscache_n_stores_ok),
40595- atomic_read(&fscache_n_stores_again),
40596- atomic_read(&fscache_n_stores_nobufs),
40597- atomic_read(&fscache_n_stores_oom));
40598+ atomic_read_unchecked(&fscache_n_stores),
40599+ atomic_read_unchecked(&fscache_n_stores_ok),
40600+ atomic_read_unchecked(&fscache_n_stores_again),
40601+ atomic_read_unchecked(&fscache_n_stores_nobufs),
40602+ atomic_read_unchecked(&fscache_n_stores_oom));
40603 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
40604- atomic_read(&fscache_n_store_ops),
40605- atomic_read(&fscache_n_store_calls),
40606- atomic_read(&fscache_n_store_pages),
40607- atomic_read(&fscache_n_store_radix_deletes),
40608- atomic_read(&fscache_n_store_pages_over_limit));
40609+ atomic_read_unchecked(&fscache_n_store_ops),
40610+ atomic_read_unchecked(&fscache_n_store_calls),
40611+ atomic_read_unchecked(&fscache_n_store_pages),
40612+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
40613+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
40614
40615 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
40616- atomic_read(&fscache_n_store_vmscan_not_storing),
40617- atomic_read(&fscache_n_store_vmscan_gone),
40618- atomic_read(&fscache_n_store_vmscan_busy),
40619- atomic_read(&fscache_n_store_vmscan_cancelled));
40620+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
40621+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
40622+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
40623+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
40624
40625 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
40626- atomic_read(&fscache_n_op_pend),
40627- atomic_read(&fscache_n_op_run),
40628- atomic_read(&fscache_n_op_enqueue),
40629- atomic_read(&fscache_n_op_cancelled),
40630- atomic_read(&fscache_n_op_rejected));
40631+ atomic_read_unchecked(&fscache_n_op_pend),
40632+ atomic_read_unchecked(&fscache_n_op_run),
40633+ atomic_read_unchecked(&fscache_n_op_enqueue),
40634+ atomic_read_unchecked(&fscache_n_op_cancelled),
40635+ atomic_read_unchecked(&fscache_n_op_rejected));
40636 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
40637- atomic_read(&fscache_n_op_deferred_release),
40638- atomic_read(&fscache_n_op_release),
40639- atomic_read(&fscache_n_op_gc));
40640+ atomic_read_unchecked(&fscache_n_op_deferred_release),
40641+ atomic_read_unchecked(&fscache_n_op_release),
40642+ atomic_read_unchecked(&fscache_n_op_gc));
40643
40644 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
40645 atomic_read(&fscache_n_cop_alloc_object),
40646diff -urNp linux-2.6.32.43/fs/fs_struct.c linux-2.6.32.43/fs/fs_struct.c
40647--- linux-2.6.32.43/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
40648+++ linux-2.6.32.43/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
40649@@ -4,6 +4,7 @@
40650 #include <linux/path.h>
40651 #include <linux/slab.h>
40652 #include <linux/fs_struct.h>
40653+#include <linux/grsecurity.h>
40654
40655 /*
40656 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
40657@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
40658 old_root = fs->root;
40659 fs->root = *path;
40660 path_get(path);
40661+ gr_set_chroot_entries(current, path);
40662 write_unlock(&fs->lock);
40663 if (old_root.dentry)
40664 path_put(&old_root);
40665@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
40666 && fs->root.mnt == old_root->mnt) {
40667 path_get(new_root);
40668 fs->root = *new_root;
40669+ gr_set_chroot_entries(p, new_root);
40670 count++;
40671 }
40672 if (fs->pwd.dentry == old_root->dentry
40673@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
40674 task_lock(tsk);
40675 write_lock(&fs->lock);
40676 tsk->fs = NULL;
40677- kill = !--fs->users;
40678+ gr_clear_chroot_entries(tsk);
40679+ kill = !atomic_dec_return(&fs->users);
40680 write_unlock(&fs->lock);
40681 task_unlock(tsk);
40682 if (kill)
40683@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
40684 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
40685 /* We don't need to lock fs - think why ;-) */
40686 if (fs) {
40687- fs->users = 1;
40688+ atomic_set(&fs->users, 1);
40689 fs->in_exec = 0;
40690 rwlock_init(&fs->lock);
40691 fs->umask = old->umask;
40692@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
40693
40694 task_lock(current);
40695 write_lock(&fs->lock);
40696- kill = !--fs->users;
40697+ kill = !atomic_dec_return(&fs->users);
40698 current->fs = new_fs;
40699+ gr_set_chroot_entries(current, &new_fs->root);
40700 write_unlock(&fs->lock);
40701 task_unlock(current);
40702
40703@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
40704
40705 /* to be mentioned only in INIT_TASK */
40706 struct fs_struct init_fs = {
40707- .users = 1,
40708+ .users = ATOMIC_INIT(1),
40709 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
40710 .umask = 0022,
40711 };
40712@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
40713 task_lock(current);
40714
40715 write_lock(&init_fs.lock);
40716- init_fs.users++;
40717+ atomic_inc(&init_fs.users);
40718 write_unlock(&init_fs.lock);
40719
40720 write_lock(&fs->lock);
40721 current->fs = &init_fs;
40722- kill = !--fs->users;
40723+ gr_set_chroot_entries(current, &current->fs->root);
40724+ kill = !atomic_dec_return(&fs->users);
40725 write_unlock(&fs->lock);
40726
40727 task_unlock(current);
40728diff -urNp linux-2.6.32.43/fs/fuse/cuse.c linux-2.6.32.43/fs/fuse/cuse.c
40729--- linux-2.6.32.43/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
40730+++ linux-2.6.32.43/fs/fuse/cuse.c 2011-04-17 15:56:46.000000000 -0400
40731@@ -528,8 +528,18 @@ static int cuse_channel_release(struct i
40732 return rc;
40733 }
40734
40735-static struct file_operations cuse_channel_fops; /* initialized during init */
40736-
40737+static const struct file_operations cuse_channel_fops = { /* initialized during init */
40738+ .owner = THIS_MODULE,
40739+ .llseek = no_llseek,
40740+ .read = do_sync_read,
40741+ .aio_read = fuse_dev_read,
40742+ .write = do_sync_write,
40743+ .aio_write = fuse_dev_write,
40744+ .poll = fuse_dev_poll,
40745+ .open = cuse_channel_open,
40746+ .release = cuse_channel_release,
40747+ .fasync = fuse_dev_fasync,
40748+};
40749
40750 /**************************************************************************
40751 * Misc stuff and module initializatiion
40752@@ -575,12 +585,6 @@ static int __init cuse_init(void)
40753 for (i = 0; i < CUSE_CONNTBL_LEN; i++)
40754 INIT_LIST_HEAD(&cuse_conntbl[i]);
40755
40756- /* inherit and extend fuse_dev_operations */
40757- cuse_channel_fops = fuse_dev_operations;
40758- cuse_channel_fops.owner = THIS_MODULE;
40759- cuse_channel_fops.open = cuse_channel_open;
40760- cuse_channel_fops.release = cuse_channel_release;
40761-
40762 cuse_class = class_create(THIS_MODULE, "cuse");
40763 if (IS_ERR(cuse_class))
40764 return PTR_ERR(cuse_class);
40765diff -urNp linux-2.6.32.43/fs/fuse/dev.c linux-2.6.32.43/fs/fuse/dev.c
40766--- linux-2.6.32.43/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
40767+++ linux-2.6.32.43/fs/fuse/dev.c 2011-04-17 15:56:46.000000000 -0400
40768@@ -745,7 +745,7 @@ __releases(&fc->lock)
40769 * request_end(). Otherwise add it to the processing list, and set
40770 * the 'sent' flag.
40771 */
40772-static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40773+ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40774 unsigned long nr_segs, loff_t pos)
40775 {
40776 int err;
40777@@ -827,6 +827,7 @@ static ssize_t fuse_dev_read(struct kioc
40778 spin_unlock(&fc->lock);
40779 return err;
40780 }
40781+EXPORT_SYMBOL_GPL(fuse_dev_read);
40782
40783 static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
40784 struct fuse_copy_state *cs)
40785@@ -885,7 +886,7 @@ static int fuse_notify_inval_entry(struc
40786 {
40787 struct fuse_notify_inval_entry_out outarg;
40788 int err = -EINVAL;
40789- char buf[FUSE_NAME_MAX+1];
40790+ char *buf = NULL;
40791 struct qstr name;
40792
40793 if (size < sizeof(outarg))
40794@@ -899,6 +900,11 @@ static int fuse_notify_inval_entry(struc
40795 if (outarg.namelen > FUSE_NAME_MAX)
40796 goto err;
40797
40798+ err = -ENOMEM;
40799+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
40800+ if (!buf)
40801+ goto err;
40802+
40803 name.name = buf;
40804 name.len = outarg.namelen;
40805 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
40806@@ -910,17 +916,15 @@ static int fuse_notify_inval_entry(struc
40807
40808 down_read(&fc->killsb);
40809 err = -ENOENT;
40810- if (!fc->sb)
40811- goto err_unlock;
40812-
40813- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40814-
40815-err_unlock:
40816+ if (fc->sb)
40817+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
40818 up_read(&fc->killsb);
40819+ kfree(buf);
40820 return err;
40821
40822 err:
40823 fuse_copy_finish(cs);
40824+ kfree(buf);
40825 return err;
40826 }
40827
40828@@ -987,7 +991,7 @@ static int copy_out_args(struct fuse_cop
40829 * it from the list and copy the rest of the buffer to the request.
40830 * The request is finished by calling request_end()
40831 */
40832-static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40833+ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40834 unsigned long nr_segs, loff_t pos)
40835 {
40836 int err;
40837@@ -1083,8 +1087,9 @@ static ssize_t fuse_dev_write(struct kio
40838 fuse_copy_finish(&cs);
40839 return err;
40840 }
40841+EXPORT_SYMBOL_GPL(fuse_dev_write);
40842
40843-static unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40844+unsigned fuse_dev_poll(struct file *file, poll_table *wait)
40845 {
40846 unsigned mask = POLLOUT | POLLWRNORM;
40847 struct fuse_conn *fc = fuse_get_conn(file);
40848@@ -1102,6 +1107,7 @@ static unsigned fuse_dev_poll(struct fil
40849
40850 return mask;
40851 }
40852+EXPORT_SYMBOL_GPL(fuse_dev_poll);
40853
40854 /*
40855 * Abort all requests on the given list (pending or processing)
40856@@ -1218,7 +1224,7 @@ int fuse_dev_release(struct inode *inode
40857 }
40858 EXPORT_SYMBOL_GPL(fuse_dev_release);
40859
40860-static int fuse_dev_fasync(int fd, struct file *file, int on)
40861+int fuse_dev_fasync(int fd, struct file *file, int on)
40862 {
40863 struct fuse_conn *fc = fuse_get_conn(file);
40864 if (!fc)
40865@@ -1227,6 +1233,7 @@ static int fuse_dev_fasync(int fd, struc
40866 /* No locking - fasync_helper does its own locking */
40867 return fasync_helper(fd, file, on, &fc->fasync);
40868 }
40869+EXPORT_SYMBOL_GPL(fuse_dev_fasync);
40870
40871 const struct file_operations fuse_dev_operations = {
40872 .owner = THIS_MODULE,
40873diff -urNp linux-2.6.32.43/fs/fuse/dir.c linux-2.6.32.43/fs/fuse/dir.c
40874--- linux-2.6.32.43/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
40875+++ linux-2.6.32.43/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
40876@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
40877 return link;
40878 }
40879
40880-static void free_link(char *link)
40881+static void free_link(const char *link)
40882 {
40883 if (!IS_ERR(link))
40884 free_page((unsigned long) link);
40885diff -urNp linux-2.6.32.43/fs/fuse/fuse_i.h linux-2.6.32.43/fs/fuse/fuse_i.h
40886--- linux-2.6.32.43/fs/fuse/fuse_i.h 2011-03-27 14:31:47.000000000 -0400
40887+++ linux-2.6.32.43/fs/fuse/fuse_i.h 2011-04-17 15:56:46.000000000 -0400
40888@@ -525,6 +525,16 @@ extern const struct file_operations fuse
40889
40890 extern const struct dentry_operations fuse_dentry_operations;
40891
40892+extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov,
40893+ unsigned long nr_segs, loff_t pos);
40894+
40895+extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
40896+ unsigned long nr_segs, loff_t pos);
40897+
40898+extern unsigned fuse_dev_poll(struct file *file, poll_table *wait);
40899+
40900+extern int fuse_dev_fasync(int fd, struct file *file, int on);
40901+
40902 /**
40903 * Inode to nodeid comparison.
40904 */
40905diff -urNp linux-2.6.32.43/fs/gfs2/ops_inode.c linux-2.6.32.43/fs/gfs2/ops_inode.c
40906--- linux-2.6.32.43/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
40907+++ linux-2.6.32.43/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
40908@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
40909 unsigned int x;
40910 int error;
40911
40912+ pax_track_stack();
40913+
40914 if (ndentry->d_inode) {
40915 nip = GFS2_I(ndentry->d_inode);
40916 if (ip == nip)
40917diff -urNp linux-2.6.32.43/fs/gfs2/sys.c linux-2.6.32.43/fs/gfs2/sys.c
40918--- linux-2.6.32.43/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
40919+++ linux-2.6.32.43/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
40920@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
40921 return a->store ? a->store(sdp, buf, len) : len;
40922 }
40923
40924-static struct sysfs_ops gfs2_attr_ops = {
40925+static const struct sysfs_ops gfs2_attr_ops = {
40926 .show = gfs2_attr_show,
40927 .store = gfs2_attr_store,
40928 };
40929@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
40930 return 0;
40931 }
40932
40933-static struct kset_uevent_ops gfs2_uevent_ops = {
40934+static const struct kset_uevent_ops gfs2_uevent_ops = {
40935 .uevent = gfs2_uevent,
40936 };
40937
40938diff -urNp linux-2.6.32.43/fs/hfsplus/catalog.c linux-2.6.32.43/fs/hfsplus/catalog.c
40939--- linux-2.6.32.43/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
40940+++ linux-2.6.32.43/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
40941@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
40942 int err;
40943 u16 type;
40944
40945+ pax_track_stack();
40946+
40947 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
40948 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
40949 if (err)
40950@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
40951 int entry_size;
40952 int err;
40953
40954+ pax_track_stack();
40955+
40956 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
40957 sb = dir->i_sb;
40958 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
40959@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
40960 int entry_size, type;
40961 int err = 0;
40962
40963+ pax_track_stack();
40964+
40965 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
40966 dst_dir->i_ino, dst_name->name);
40967 sb = src_dir->i_sb;
40968diff -urNp linux-2.6.32.43/fs/hfsplus/dir.c linux-2.6.32.43/fs/hfsplus/dir.c
40969--- linux-2.6.32.43/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
40970+++ linux-2.6.32.43/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
40971@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
40972 struct hfsplus_readdir_data *rd;
40973 u16 type;
40974
40975+ pax_track_stack();
40976+
40977 if (filp->f_pos >= inode->i_size)
40978 return 0;
40979
40980diff -urNp linux-2.6.32.43/fs/hfsplus/inode.c linux-2.6.32.43/fs/hfsplus/inode.c
40981--- linux-2.6.32.43/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
40982+++ linux-2.6.32.43/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
40983@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
40984 int res = 0;
40985 u16 type;
40986
40987+ pax_track_stack();
40988+
40989 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
40990
40991 HFSPLUS_I(inode).dev = 0;
40992@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
40993 struct hfs_find_data fd;
40994 hfsplus_cat_entry entry;
40995
40996+ pax_track_stack();
40997+
40998 if (HFSPLUS_IS_RSRC(inode))
40999 main_inode = HFSPLUS_I(inode).rsrc_inode;
41000
41001diff -urNp linux-2.6.32.43/fs/hfsplus/ioctl.c linux-2.6.32.43/fs/hfsplus/ioctl.c
41002--- linux-2.6.32.43/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41003+++ linux-2.6.32.43/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
41004@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
41005 struct hfsplus_cat_file *file;
41006 int res;
41007
41008+ pax_track_stack();
41009+
41010 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
41011 return -EOPNOTSUPP;
41012
41013@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
41014 struct hfsplus_cat_file *file;
41015 ssize_t res = 0;
41016
41017+ pax_track_stack();
41018+
41019 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
41020 return -EOPNOTSUPP;
41021
41022diff -urNp linux-2.6.32.43/fs/hfsplus/super.c linux-2.6.32.43/fs/hfsplus/super.c
41023--- linux-2.6.32.43/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
41024+++ linux-2.6.32.43/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
41025@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
41026 struct nls_table *nls = NULL;
41027 int err = -EINVAL;
41028
41029+ pax_track_stack();
41030+
41031 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
41032 if (!sbi)
41033 return -ENOMEM;
41034diff -urNp linux-2.6.32.43/fs/hugetlbfs/inode.c linux-2.6.32.43/fs/hugetlbfs/inode.c
41035--- linux-2.6.32.43/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41036+++ linux-2.6.32.43/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
41037@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
41038 .kill_sb = kill_litter_super,
41039 };
41040
41041-static struct vfsmount *hugetlbfs_vfsmount;
41042+struct vfsmount *hugetlbfs_vfsmount;
41043
41044 static int can_do_hugetlb_shm(void)
41045 {
41046diff -urNp linux-2.6.32.43/fs/ioctl.c linux-2.6.32.43/fs/ioctl.c
41047--- linux-2.6.32.43/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41048+++ linux-2.6.32.43/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
41049@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
41050 u64 phys, u64 len, u32 flags)
41051 {
41052 struct fiemap_extent extent;
41053- struct fiemap_extent *dest = fieinfo->fi_extents_start;
41054+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
41055
41056 /* only count the extents */
41057 if (fieinfo->fi_extents_max == 0) {
41058@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
41059
41060 fieinfo.fi_flags = fiemap.fm_flags;
41061 fieinfo.fi_extents_max = fiemap.fm_extent_count;
41062- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
41063+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
41064
41065 if (fiemap.fm_extent_count != 0 &&
41066 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
41067@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
41068 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
41069 fiemap.fm_flags = fieinfo.fi_flags;
41070 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
41071- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
41072+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
41073 error = -EFAULT;
41074
41075 return error;
41076diff -urNp linux-2.6.32.43/fs/jbd/checkpoint.c linux-2.6.32.43/fs/jbd/checkpoint.c
41077--- linux-2.6.32.43/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
41078+++ linux-2.6.32.43/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
41079@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
41080 tid_t this_tid;
41081 int result;
41082
41083+ pax_track_stack();
41084+
41085 jbd_debug(1, "Start checkpoint\n");
41086
41087 /*
41088diff -urNp linux-2.6.32.43/fs/jffs2/compr_rtime.c linux-2.6.32.43/fs/jffs2/compr_rtime.c
41089--- linux-2.6.32.43/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
41090+++ linux-2.6.32.43/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
41091@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
41092 int outpos = 0;
41093 int pos=0;
41094
41095+ pax_track_stack();
41096+
41097 memset(positions,0,sizeof(positions));
41098
41099 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
41100@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
41101 int outpos = 0;
41102 int pos=0;
41103
41104+ pax_track_stack();
41105+
41106 memset(positions,0,sizeof(positions));
41107
41108 while (outpos<destlen) {
41109diff -urNp linux-2.6.32.43/fs/jffs2/compr_rubin.c linux-2.6.32.43/fs/jffs2/compr_rubin.c
41110--- linux-2.6.32.43/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
41111+++ linux-2.6.32.43/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
41112@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
41113 int ret;
41114 uint32_t mysrclen, mydstlen;
41115
41116+ pax_track_stack();
41117+
41118 mysrclen = *sourcelen;
41119 mydstlen = *dstlen - 8;
41120
41121diff -urNp linux-2.6.32.43/fs/jffs2/erase.c linux-2.6.32.43/fs/jffs2/erase.c
41122--- linux-2.6.32.43/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
41123+++ linux-2.6.32.43/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
41124@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
41125 struct jffs2_unknown_node marker = {
41126 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
41127 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41128- .totlen = cpu_to_je32(c->cleanmarker_size)
41129+ .totlen = cpu_to_je32(c->cleanmarker_size),
41130+ .hdr_crc = cpu_to_je32(0)
41131 };
41132
41133 jffs2_prealloc_raw_node_refs(c, jeb, 1);
41134diff -urNp linux-2.6.32.43/fs/jffs2/wbuf.c linux-2.6.32.43/fs/jffs2/wbuf.c
41135--- linux-2.6.32.43/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
41136+++ linux-2.6.32.43/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
41137@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
41138 {
41139 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
41140 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
41141- .totlen = constant_cpu_to_je32(8)
41142+ .totlen = constant_cpu_to_je32(8),
41143+ .hdr_crc = constant_cpu_to_je32(0)
41144 };
41145
41146 /*
41147diff -urNp linux-2.6.32.43/fs/jffs2/xattr.c linux-2.6.32.43/fs/jffs2/xattr.c
41148--- linux-2.6.32.43/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
41149+++ linux-2.6.32.43/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
41150@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
41151
41152 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
41153
41154+ pax_track_stack();
41155+
41156 /* Phase.1 : Merge same xref */
41157 for (i=0; i < XREF_TMPHASH_SIZE; i++)
41158 xref_tmphash[i] = NULL;
41159diff -urNp linux-2.6.32.43/fs/jfs/super.c linux-2.6.32.43/fs/jfs/super.c
41160--- linux-2.6.32.43/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
41161+++ linux-2.6.32.43/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
41162@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
41163
41164 jfs_inode_cachep =
41165 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
41166- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
41167+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
41168 init_once);
41169 if (jfs_inode_cachep == NULL)
41170 return -ENOMEM;
41171diff -urNp linux-2.6.32.43/fs/Kconfig.binfmt linux-2.6.32.43/fs/Kconfig.binfmt
41172--- linux-2.6.32.43/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
41173+++ linux-2.6.32.43/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
41174@@ -86,7 +86,7 @@ config HAVE_AOUT
41175
41176 config BINFMT_AOUT
41177 tristate "Kernel support for a.out and ECOFF binaries"
41178- depends on HAVE_AOUT
41179+ depends on HAVE_AOUT && BROKEN
41180 ---help---
41181 A.out (Assembler.OUTput) is a set of formats for libraries and
41182 executables used in the earliest versions of UNIX. Linux used
41183diff -urNp linux-2.6.32.43/fs/libfs.c linux-2.6.32.43/fs/libfs.c
41184--- linux-2.6.32.43/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
41185+++ linux-2.6.32.43/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
41186@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
41187
41188 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
41189 struct dentry *next;
41190+ char d_name[sizeof(next->d_iname)];
41191+ const unsigned char *name;
41192+
41193 next = list_entry(p, struct dentry, d_u.d_child);
41194 if (d_unhashed(next) || !next->d_inode)
41195 continue;
41196
41197 spin_unlock(&dcache_lock);
41198- if (filldir(dirent, next->d_name.name,
41199+ name = next->d_name.name;
41200+ if (name == next->d_iname) {
41201+ memcpy(d_name, name, next->d_name.len);
41202+ name = d_name;
41203+ }
41204+ if (filldir(dirent, name,
41205 next->d_name.len, filp->f_pos,
41206 next->d_inode->i_ino,
41207 dt_type(next->d_inode)) < 0)
41208diff -urNp linux-2.6.32.43/fs/lockd/clntproc.c linux-2.6.32.43/fs/lockd/clntproc.c
41209--- linux-2.6.32.43/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
41210+++ linux-2.6.32.43/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
41211@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
41212 /*
41213 * Cookie counter for NLM requests
41214 */
41215-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
41216+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
41217
41218 void nlmclnt_next_cookie(struct nlm_cookie *c)
41219 {
41220- u32 cookie = atomic_inc_return(&nlm_cookie);
41221+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
41222
41223 memcpy(c->data, &cookie, 4);
41224 c->len=4;
41225@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
41226 struct nlm_rqst reqst, *req;
41227 int status;
41228
41229+ pax_track_stack();
41230+
41231 req = &reqst;
41232 memset(req, 0, sizeof(*req));
41233 locks_init_lock(&req->a_args.lock.fl);
41234diff -urNp linux-2.6.32.43/fs/lockd/svc.c linux-2.6.32.43/fs/lockd/svc.c
41235--- linux-2.6.32.43/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
41236+++ linux-2.6.32.43/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
41237@@ -43,7 +43,7 @@
41238
41239 static struct svc_program nlmsvc_program;
41240
41241-struct nlmsvc_binding * nlmsvc_ops;
41242+const struct nlmsvc_binding * nlmsvc_ops;
41243 EXPORT_SYMBOL_GPL(nlmsvc_ops);
41244
41245 static DEFINE_MUTEX(nlmsvc_mutex);
41246diff -urNp linux-2.6.32.43/fs/locks.c linux-2.6.32.43/fs/locks.c
41247--- linux-2.6.32.43/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
41248+++ linux-2.6.32.43/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
41249@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
41250
41251 static struct kmem_cache *filelock_cache __read_mostly;
41252
41253+static void locks_init_lock_always(struct file_lock *fl)
41254+{
41255+ fl->fl_next = NULL;
41256+ fl->fl_fasync = NULL;
41257+ fl->fl_owner = NULL;
41258+ fl->fl_pid = 0;
41259+ fl->fl_nspid = NULL;
41260+ fl->fl_file = NULL;
41261+ fl->fl_flags = 0;
41262+ fl->fl_type = 0;
41263+ fl->fl_start = fl->fl_end = 0;
41264+}
41265+
41266 /* Allocate an empty lock structure. */
41267 static struct file_lock *locks_alloc_lock(void)
41268 {
41269- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
41270+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
41271+
41272+ if (fl)
41273+ locks_init_lock_always(fl);
41274+
41275+ return fl;
41276 }
41277
41278 void locks_release_private(struct file_lock *fl)
41279@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
41280 INIT_LIST_HEAD(&fl->fl_link);
41281 INIT_LIST_HEAD(&fl->fl_block);
41282 init_waitqueue_head(&fl->fl_wait);
41283- fl->fl_next = NULL;
41284- fl->fl_fasync = NULL;
41285- fl->fl_owner = NULL;
41286- fl->fl_pid = 0;
41287- fl->fl_nspid = NULL;
41288- fl->fl_file = NULL;
41289- fl->fl_flags = 0;
41290- fl->fl_type = 0;
41291- fl->fl_start = fl->fl_end = 0;
41292 fl->fl_ops = NULL;
41293 fl->fl_lmops = NULL;
41294+ locks_init_lock_always(fl);
41295 }
41296
41297 EXPORT_SYMBOL(locks_init_lock);
41298@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
41299 return;
41300
41301 if (filp->f_op && filp->f_op->flock) {
41302- struct file_lock fl = {
41303+ struct file_lock flock = {
41304 .fl_pid = current->tgid,
41305 .fl_file = filp,
41306 .fl_flags = FL_FLOCK,
41307 .fl_type = F_UNLCK,
41308 .fl_end = OFFSET_MAX,
41309 };
41310- filp->f_op->flock(filp, F_SETLKW, &fl);
41311- if (fl.fl_ops && fl.fl_ops->fl_release_private)
41312- fl.fl_ops->fl_release_private(&fl);
41313+ filp->f_op->flock(filp, F_SETLKW, &flock);
41314+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
41315+ flock.fl_ops->fl_release_private(&flock);
41316 }
41317
41318 lock_kernel();
41319diff -urNp linux-2.6.32.43/fs/namei.c linux-2.6.32.43/fs/namei.c
41320--- linux-2.6.32.43/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
41321+++ linux-2.6.32.43/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
41322@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
41323 return ret;
41324
41325 /*
41326- * Read/write DACs are always overridable.
41327- * Executable DACs are overridable if at least one exec bit is set.
41328- */
41329- if (!(mask & MAY_EXEC) || execute_ok(inode))
41330- if (capable(CAP_DAC_OVERRIDE))
41331- return 0;
41332-
41333- /*
41334 * Searching includes executable on directories, else just read.
41335 */
41336 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
41337@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
41338 if (capable(CAP_DAC_READ_SEARCH))
41339 return 0;
41340
41341+ /*
41342+ * Read/write DACs are always overridable.
41343+ * Executable DACs are overridable if at least one exec bit is set.
41344+ */
41345+ if (!(mask & MAY_EXEC) || execute_ok(inode))
41346+ if (capable(CAP_DAC_OVERRIDE))
41347+ return 0;
41348+
41349 return -EACCES;
41350 }
41351
41352@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
41353 if (!ret)
41354 goto ok;
41355
41356- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
41357+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
41358+ capable(CAP_DAC_OVERRIDE))
41359 goto ok;
41360
41361 return ret;
41362@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
41363 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
41364 error = PTR_ERR(cookie);
41365 if (!IS_ERR(cookie)) {
41366- char *s = nd_get_link(nd);
41367+ const char *s = nd_get_link(nd);
41368 error = 0;
41369 if (s)
41370 error = __vfs_follow_link(nd, s);
41371@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
41372 err = security_inode_follow_link(path->dentry, nd);
41373 if (err)
41374 goto loop;
41375+
41376+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
41377+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
41378+ err = -EACCES;
41379+ goto loop;
41380+ }
41381+
41382 current->link_count++;
41383 current->total_link_count++;
41384 nd->depth++;
41385@@ -1016,11 +1024,18 @@ return_reval:
41386 break;
41387 }
41388 return_base:
41389+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
41390+ path_put(&nd->path);
41391+ return -ENOENT;
41392+ }
41393 return 0;
41394 out_dput:
41395 path_put_conditional(&next, nd);
41396 break;
41397 }
41398+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
41399+ err = -ENOENT;
41400+
41401 path_put(&nd->path);
41402 return_err:
41403 return err;
41404@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
41405 int retval = path_init(dfd, name, flags, nd);
41406 if (!retval)
41407 retval = path_walk(name, nd);
41408- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
41409- nd->path.dentry->d_inode))
41410- audit_inode(name, nd->path.dentry);
41411+
41412+ if (likely(!retval)) {
41413+ if (nd->path.dentry && nd->path.dentry->d_inode) {
41414+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
41415+ retval = -ENOENT;
41416+ if (!audit_dummy_context())
41417+ audit_inode(name, nd->path.dentry);
41418+ }
41419+ }
41420 if (nd->root.mnt) {
41421 path_put(&nd->root);
41422 nd->root.mnt = NULL;
41423 }
41424+
41425 return retval;
41426 }
41427
41428@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
41429 if (error)
41430 goto err_out;
41431
41432+
41433+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
41434+ error = -EPERM;
41435+ goto err_out;
41436+ }
41437+ if (gr_handle_rawio(inode)) {
41438+ error = -EPERM;
41439+ goto err_out;
41440+ }
41441+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
41442+ error = -EACCES;
41443+ goto err_out;
41444+ }
41445+
41446 if (flag & O_TRUNC) {
41447 error = get_write_access(inode);
41448 if (error)
41449@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
41450 int error;
41451 struct dentry *dir = nd->path.dentry;
41452
41453+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
41454+ error = -EACCES;
41455+ goto out_unlock;
41456+ }
41457+
41458 if (!IS_POSIXACL(dir->d_inode))
41459 mode &= ~current_umask();
41460 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
41461 if (error)
41462 goto out_unlock;
41463 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
41464+ if (!error)
41465+ gr_handle_create(path->dentry, nd->path.mnt);
41466 out_unlock:
41467 mutex_unlock(&dir->d_inode->i_mutex);
41468 dput(nd->path.dentry);
41469@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
41470 &nd, flag);
41471 if (error)
41472 return ERR_PTR(error);
41473+
41474+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
41475+ error = -EPERM;
41476+ goto exit;
41477+ }
41478+
41479+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
41480+ error = -EPERM;
41481+ goto exit;
41482+ }
41483+
41484+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
41485+ error = -EACCES;
41486+ goto exit;
41487+ }
41488+
41489 goto ok;
41490 }
41491
41492@@ -1795,6 +1854,14 @@ do_last:
41493 /*
41494 * It already exists.
41495 */
41496+
41497+ /* only check if O_CREAT is specified, all other checks need
41498+ to go into may_open */
41499+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
41500+ error = -EACCES;
41501+ goto exit_mutex_unlock;
41502+ }
41503+
41504 mutex_unlock(&dir->d_inode->i_mutex);
41505 audit_inode(pathname, path.dentry);
41506
41507@@ -1887,6 +1954,13 @@ do_link:
41508 error = security_inode_follow_link(path.dentry, &nd);
41509 if (error)
41510 goto exit_dput;
41511+
41512+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
41513+ path.dentry, nd.path.mnt)) {
41514+ error = -EACCES;
41515+ goto exit_dput;
41516+ }
41517+
41518 error = __do_follow_link(&path, &nd);
41519 if (error) {
41520 /* Does someone understand code flow here? Or it is only
41521@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41522 error = may_mknod(mode);
41523 if (error)
41524 goto out_dput;
41525+
41526+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
41527+ error = -EPERM;
41528+ goto out_dput;
41529+ }
41530+
41531+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
41532+ error = -EACCES;
41533+ goto out_dput;
41534+ }
41535+
41536 error = mnt_want_write(nd.path.mnt);
41537 if (error)
41538 goto out_dput;
41539@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
41540 }
41541 out_drop_write:
41542 mnt_drop_write(nd.path.mnt);
41543+
41544+ if (!error)
41545+ gr_handle_create(dentry, nd.path.mnt);
41546 out_dput:
41547 dput(dentry);
41548 out_unlock:
41549@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41550 if (IS_ERR(dentry))
41551 goto out_unlock;
41552
41553+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
41554+ error = -EACCES;
41555+ goto out_dput;
41556+ }
41557+
41558 if (!IS_POSIXACL(nd.path.dentry->d_inode))
41559 mode &= ~current_umask();
41560 error = mnt_want_write(nd.path.mnt);
41561@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
41562 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
41563 out_drop_write:
41564 mnt_drop_write(nd.path.mnt);
41565+
41566+ if (!error)
41567+ gr_handle_create(dentry, nd.path.mnt);
41568+
41569 out_dput:
41570 dput(dentry);
41571 out_unlock:
41572@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
41573 char * name;
41574 struct dentry *dentry;
41575 struct nameidata nd;
41576+ ino_t saved_ino = 0;
41577+ dev_t saved_dev = 0;
41578
41579 error = user_path_parent(dfd, pathname, &nd, &name);
41580 if (error)
41581@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
41582 error = PTR_ERR(dentry);
41583 if (IS_ERR(dentry))
41584 goto exit2;
41585+
41586+ if (dentry->d_inode != NULL) {
41587+ if (dentry->d_inode->i_nlink <= 1) {
41588+ saved_ino = dentry->d_inode->i_ino;
41589+ saved_dev = gr_get_dev_from_dentry(dentry);
41590+ }
41591+
41592+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
41593+ error = -EACCES;
41594+ goto exit3;
41595+ }
41596+ }
41597+
41598 error = mnt_want_write(nd.path.mnt);
41599 if (error)
41600 goto exit3;
41601@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
41602 if (error)
41603 goto exit4;
41604 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
41605+ if (!error && (saved_dev || saved_ino))
41606+ gr_handle_delete(saved_ino, saved_dev);
41607 exit4:
41608 mnt_drop_write(nd.path.mnt);
41609 exit3:
41610@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
41611 struct dentry *dentry;
41612 struct nameidata nd;
41613 struct inode *inode = NULL;
41614+ ino_t saved_ino = 0;
41615+ dev_t saved_dev = 0;
41616
41617 error = user_path_parent(dfd, pathname, &nd, &name);
41618 if (error)
41619@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
41620 if (nd.last.name[nd.last.len])
41621 goto slashes;
41622 inode = dentry->d_inode;
41623- if (inode)
41624+ if (inode) {
41625+ if (inode->i_nlink <= 1) {
41626+ saved_ino = inode->i_ino;
41627+ saved_dev = gr_get_dev_from_dentry(dentry);
41628+ }
41629+
41630 atomic_inc(&inode->i_count);
41631+
41632+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
41633+ error = -EACCES;
41634+ goto exit2;
41635+ }
41636+ }
41637 error = mnt_want_write(nd.path.mnt);
41638 if (error)
41639 goto exit2;
41640@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
41641 if (error)
41642 goto exit3;
41643 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
41644+ if (!error && (saved_ino || saved_dev))
41645+ gr_handle_delete(saved_ino, saved_dev);
41646 exit3:
41647 mnt_drop_write(nd.path.mnt);
41648 exit2:
41649@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
41650 if (IS_ERR(dentry))
41651 goto out_unlock;
41652
41653+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
41654+ error = -EACCES;
41655+ goto out_dput;
41656+ }
41657+
41658 error = mnt_want_write(nd.path.mnt);
41659 if (error)
41660 goto out_dput;
41661@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
41662 if (error)
41663 goto out_drop_write;
41664 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
41665+ if (!error)
41666+ gr_handle_create(dentry, nd.path.mnt);
41667 out_drop_write:
41668 mnt_drop_write(nd.path.mnt);
41669 out_dput:
41670@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41671 error = PTR_ERR(new_dentry);
41672 if (IS_ERR(new_dentry))
41673 goto out_unlock;
41674+
41675+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
41676+ old_path.dentry->d_inode,
41677+ old_path.dentry->d_inode->i_mode, to)) {
41678+ error = -EACCES;
41679+ goto out_dput;
41680+ }
41681+
41682+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
41683+ old_path.dentry, old_path.mnt, to)) {
41684+ error = -EACCES;
41685+ goto out_dput;
41686+ }
41687+
41688 error = mnt_want_write(nd.path.mnt);
41689 if (error)
41690 goto out_dput;
41691@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
41692 if (error)
41693 goto out_drop_write;
41694 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
41695+ if (!error)
41696+ gr_handle_create(new_dentry, nd.path.mnt);
41697 out_drop_write:
41698 mnt_drop_write(nd.path.mnt);
41699 out_dput:
41700@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41701 char *to;
41702 int error;
41703
41704+ pax_track_stack();
41705+
41706 error = user_path_parent(olddfd, oldname, &oldnd, &from);
41707 if (error)
41708 goto exit;
41709@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41710 if (new_dentry == trap)
41711 goto exit5;
41712
41713+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
41714+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
41715+ to);
41716+ if (error)
41717+ goto exit5;
41718+
41719 error = mnt_want_write(oldnd.path.mnt);
41720 if (error)
41721 goto exit5;
41722@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
41723 goto exit6;
41724 error = vfs_rename(old_dir->d_inode, old_dentry,
41725 new_dir->d_inode, new_dentry);
41726+ if (!error)
41727+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
41728+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
41729 exit6:
41730 mnt_drop_write(oldnd.path.mnt);
41731 exit5:
41732@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
41733
41734 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
41735 {
41736+ char tmpbuf[64];
41737+ const char *newlink;
41738 int len;
41739
41740 len = PTR_ERR(link);
41741@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
41742 len = strlen(link);
41743 if (len > (unsigned) buflen)
41744 len = buflen;
41745- if (copy_to_user(buffer, link, len))
41746+
41747+ if (len < sizeof(tmpbuf)) {
41748+ memcpy(tmpbuf, link, len);
41749+ newlink = tmpbuf;
41750+ } else
41751+ newlink = link;
41752+
41753+ if (copy_to_user(buffer, newlink, len))
41754 len = -EFAULT;
41755 out:
41756 return len;
41757diff -urNp linux-2.6.32.43/fs/namespace.c linux-2.6.32.43/fs/namespace.c
41758--- linux-2.6.32.43/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
41759+++ linux-2.6.32.43/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
41760@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
41761 if (!(sb->s_flags & MS_RDONLY))
41762 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
41763 up_write(&sb->s_umount);
41764+
41765+ gr_log_remount(mnt->mnt_devname, retval);
41766+
41767 return retval;
41768 }
41769
41770@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
41771 security_sb_umount_busy(mnt);
41772 up_write(&namespace_sem);
41773 release_mounts(&umount_list);
41774+
41775+ gr_log_unmount(mnt->mnt_devname, retval);
41776+
41777 return retval;
41778 }
41779
41780@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
41781 if (retval)
41782 goto dput_out;
41783
41784+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
41785+ retval = -EPERM;
41786+ goto dput_out;
41787+ }
41788+
41789+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
41790+ retval = -EPERM;
41791+ goto dput_out;
41792+ }
41793+
41794 if (flags & MS_REMOUNT)
41795 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
41796 data_page);
41797@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
41798 dev_name, data_page);
41799 dput_out:
41800 path_put(&path);
41801+
41802+ gr_log_mount(dev_name, dir_name, retval);
41803+
41804 return retval;
41805 }
41806
41807@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
41808 goto out1;
41809 }
41810
41811+ if (gr_handle_chroot_pivot()) {
41812+ error = -EPERM;
41813+ path_put(&old);
41814+ goto out1;
41815+ }
41816+
41817 read_lock(&current->fs->lock);
41818 root = current->fs->root;
41819 path_get(&current->fs->root);
41820diff -urNp linux-2.6.32.43/fs/ncpfs/dir.c linux-2.6.32.43/fs/ncpfs/dir.c
41821--- linux-2.6.32.43/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41822+++ linux-2.6.32.43/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
41823@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
41824 int res, val = 0, len;
41825 __u8 __name[NCP_MAXPATHLEN + 1];
41826
41827+ pax_track_stack();
41828+
41829 parent = dget_parent(dentry);
41830 dir = parent->d_inode;
41831
41832@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
41833 int error, res, len;
41834 __u8 __name[NCP_MAXPATHLEN + 1];
41835
41836+ pax_track_stack();
41837+
41838 lock_kernel();
41839 error = -EIO;
41840 if (!ncp_conn_valid(server))
41841@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
41842 int error, result, len;
41843 int opmode;
41844 __u8 __name[NCP_MAXPATHLEN + 1];
41845-
41846+
41847 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
41848 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
41849
41850+ pax_track_stack();
41851+
41852 error = -EIO;
41853 lock_kernel();
41854 if (!ncp_conn_valid(server))
41855@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
41856 int error, len;
41857 __u8 __name[NCP_MAXPATHLEN + 1];
41858
41859+ pax_track_stack();
41860+
41861 DPRINTK("ncp_mkdir: making %s/%s\n",
41862 dentry->d_parent->d_name.name, dentry->d_name.name);
41863
41864@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
41865 if (!ncp_conn_valid(server))
41866 goto out;
41867
41868+ pax_track_stack();
41869+
41870 ncp_age_dentry(server, dentry);
41871 len = sizeof(__name);
41872 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
41873@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
41874 int old_len, new_len;
41875 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
41876
41877+ pax_track_stack();
41878+
41879 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
41880 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
41881 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
41882diff -urNp linux-2.6.32.43/fs/ncpfs/inode.c linux-2.6.32.43/fs/ncpfs/inode.c
41883--- linux-2.6.32.43/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41884+++ linux-2.6.32.43/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
41885@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
41886 #endif
41887 struct ncp_entry_info finfo;
41888
41889+ pax_track_stack();
41890+
41891 data.wdog_pid = NULL;
41892 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
41893 if (!server)
41894diff -urNp linux-2.6.32.43/fs/nfs/inode.c linux-2.6.32.43/fs/nfs/inode.c
41895--- linux-2.6.32.43/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
41896+++ linux-2.6.32.43/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
41897@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
41898 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
41899 nfsi->attrtimeo_timestamp = jiffies;
41900
41901- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
41902+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
41903 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
41904 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
41905 else
41906@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
41907 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
41908 }
41909
41910-static atomic_long_t nfs_attr_generation_counter;
41911+static atomic_long_unchecked_t nfs_attr_generation_counter;
41912
41913 static unsigned long nfs_read_attr_generation_counter(void)
41914 {
41915- return atomic_long_read(&nfs_attr_generation_counter);
41916+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
41917 }
41918
41919 unsigned long nfs_inc_attr_generation_counter(void)
41920 {
41921- return atomic_long_inc_return(&nfs_attr_generation_counter);
41922+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
41923 }
41924
41925 void nfs_fattr_init(struct nfs_fattr *fattr)
41926diff -urNp linux-2.6.32.43/fs/nfsd/lockd.c linux-2.6.32.43/fs/nfsd/lockd.c
41927--- linux-2.6.32.43/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
41928+++ linux-2.6.32.43/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
41929@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
41930 fput(filp);
41931 }
41932
41933-static struct nlmsvc_binding nfsd_nlm_ops = {
41934+static const struct nlmsvc_binding nfsd_nlm_ops = {
41935 .fopen = nlm_fopen, /* open file for locking */
41936 .fclose = nlm_fclose, /* close file */
41937 };
41938diff -urNp linux-2.6.32.43/fs/nfsd/nfs4state.c linux-2.6.32.43/fs/nfsd/nfs4state.c
41939--- linux-2.6.32.43/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
41940+++ linux-2.6.32.43/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
41941@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
41942 unsigned int cmd;
41943 int err;
41944
41945+ pax_track_stack();
41946+
41947 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
41948 (long long) lock->lk_offset,
41949 (long long) lock->lk_length);
41950diff -urNp linux-2.6.32.43/fs/nfsd/nfs4xdr.c linux-2.6.32.43/fs/nfsd/nfs4xdr.c
41951--- linux-2.6.32.43/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
41952+++ linux-2.6.32.43/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
41953@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
41954 struct nfsd4_compoundres *resp = rqstp->rq_resp;
41955 u32 minorversion = resp->cstate.minorversion;
41956
41957+ pax_track_stack();
41958+
41959 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
41960 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
41961 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
41962diff -urNp linux-2.6.32.43/fs/nfsd/vfs.c linux-2.6.32.43/fs/nfsd/vfs.c
41963--- linux-2.6.32.43/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
41964+++ linux-2.6.32.43/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
41965@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
41966 } else {
41967 oldfs = get_fs();
41968 set_fs(KERNEL_DS);
41969- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
41970+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
41971 set_fs(oldfs);
41972 }
41973
41974@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
41975
41976 /* Write the data. */
41977 oldfs = get_fs(); set_fs(KERNEL_DS);
41978- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
41979+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
41980 set_fs(oldfs);
41981 if (host_err < 0)
41982 goto out_nfserr;
41983@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
41984 */
41985
41986 oldfs = get_fs(); set_fs(KERNEL_DS);
41987- host_err = inode->i_op->readlink(dentry, buf, *lenp);
41988+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
41989 set_fs(oldfs);
41990
41991 if (host_err < 0)
41992diff -urNp linux-2.6.32.43/fs/nilfs2/ioctl.c linux-2.6.32.43/fs/nilfs2/ioctl.c
41993--- linux-2.6.32.43/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
41994+++ linux-2.6.32.43/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
41995@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
41996 unsigned int cmd, void __user *argp)
41997 {
41998 struct nilfs_argv argv[5];
41999- const static size_t argsz[5] = {
42000+ static const size_t argsz[5] = {
42001 sizeof(struct nilfs_vdesc),
42002 sizeof(struct nilfs_period),
42003 sizeof(__u64),
42004diff -urNp linux-2.6.32.43/fs/notify/dnotify/dnotify.c linux-2.6.32.43/fs/notify/dnotify/dnotify.c
42005--- linux-2.6.32.43/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
42006+++ linux-2.6.32.43/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
42007@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
42008 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
42009 }
42010
42011-static struct fsnotify_ops dnotify_fsnotify_ops = {
42012+static const struct fsnotify_ops dnotify_fsnotify_ops = {
42013 .handle_event = dnotify_handle_event,
42014 .should_send_event = dnotify_should_send_event,
42015 .free_group_priv = NULL,
42016diff -urNp linux-2.6.32.43/fs/notify/notification.c linux-2.6.32.43/fs/notify/notification.c
42017--- linux-2.6.32.43/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
42018+++ linux-2.6.32.43/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
42019@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
42020 * get set to 0 so it will never get 'freed'
42021 */
42022 static struct fsnotify_event q_overflow_event;
42023-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
42024+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
42025
42026 /**
42027 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
42028@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
42029 */
42030 u32 fsnotify_get_cookie(void)
42031 {
42032- return atomic_inc_return(&fsnotify_sync_cookie);
42033+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
42034 }
42035 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
42036
42037diff -urNp linux-2.6.32.43/fs/ntfs/dir.c linux-2.6.32.43/fs/ntfs/dir.c
42038--- linux-2.6.32.43/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
42039+++ linux-2.6.32.43/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
42040@@ -1328,7 +1328,7 @@ find_next_index_buffer:
42041 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
42042 ~(s64)(ndir->itype.index.block_size - 1)));
42043 /* Bounds checks. */
42044- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42045+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
42046 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
42047 "inode 0x%lx or driver bug.", vdir->i_ino);
42048 goto err_out;
42049diff -urNp linux-2.6.32.43/fs/ntfs/file.c linux-2.6.32.43/fs/ntfs/file.c
42050--- linux-2.6.32.43/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
42051+++ linux-2.6.32.43/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
42052@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
42053 #endif /* NTFS_RW */
42054 };
42055
42056-const struct file_operations ntfs_empty_file_ops = {};
42057+const struct file_operations ntfs_empty_file_ops __read_only;
42058
42059-const struct inode_operations ntfs_empty_inode_ops = {};
42060+const struct inode_operations ntfs_empty_inode_ops __read_only;
42061diff -urNp linux-2.6.32.43/fs/ocfs2/cluster/masklog.c linux-2.6.32.43/fs/ocfs2/cluster/masklog.c
42062--- linux-2.6.32.43/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
42063+++ linux-2.6.32.43/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
42064@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
42065 return mlog_mask_store(mlog_attr->mask, buf, count);
42066 }
42067
42068-static struct sysfs_ops mlog_attr_ops = {
42069+static const struct sysfs_ops mlog_attr_ops = {
42070 .show = mlog_show,
42071 .store = mlog_store,
42072 };
42073diff -urNp linux-2.6.32.43/fs/ocfs2/localalloc.c linux-2.6.32.43/fs/ocfs2/localalloc.c
42074--- linux-2.6.32.43/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
42075+++ linux-2.6.32.43/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
42076@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
42077 goto bail;
42078 }
42079
42080- atomic_inc(&osb->alloc_stats.moves);
42081+ atomic_inc_unchecked(&osb->alloc_stats.moves);
42082
42083 status = 0;
42084 bail:
42085diff -urNp linux-2.6.32.43/fs/ocfs2/namei.c linux-2.6.32.43/fs/ocfs2/namei.c
42086--- linux-2.6.32.43/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
42087+++ linux-2.6.32.43/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
42088@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
42089 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
42090 struct ocfs2_dir_lookup_result target_insert = { NULL, };
42091
42092+ pax_track_stack();
42093+
42094 /* At some point it might be nice to break this function up a
42095 * bit. */
42096
42097diff -urNp linux-2.6.32.43/fs/ocfs2/ocfs2.h linux-2.6.32.43/fs/ocfs2/ocfs2.h
42098--- linux-2.6.32.43/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
42099+++ linux-2.6.32.43/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
42100@@ -217,11 +217,11 @@ enum ocfs2_vol_state
42101
42102 struct ocfs2_alloc_stats
42103 {
42104- atomic_t moves;
42105- atomic_t local_data;
42106- atomic_t bitmap_data;
42107- atomic_t bg_allocs;
42108- atomic_t bg_extends;
42109+ atomic_unchecked_t moves;
42110+ atomic_unchecked_t local_data;
42111+ atomic_unchecked_t bitmap_data;
42112+ atomic_unchecked_t bg_allocs;
42113+ atomic_unchecked_t bg_extends;
42114 };
42115
42116 enum ocfs2_local_alloc_state
42117diff -urNp linux-2.6.32.43/fs/ocfs2/suballoc.c linux-2.6.32.43/fs/ocfs2/suballoc.c
42118--- linux-2.6.32.43/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
42119+++ linux-2.6.32.43/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
42120@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
42121 mlog_errno(status);
42122 goto bail;
42123 }
42124- atomic_inc(&osb->alloc_stats.bg_extends);
42125+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
42126
42127 /* You should never ask for this much metadata */
42128 BUG_ON(bits_wanted >
42129@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
42130 mlog_errno(status);
42131 goto bail;
42132 }
42133- atomic_inc(&osb->alloc_stats.bg_allocs);
42134+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
42135
42136 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
42137 ac->ac_bits_given += (*num_bits);
42138@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
42139 mlog_errno(status);
42140 goto bail;
42141 }
42142- atomic_inc(&osb->alloc_stats.bg_allocs);
42143+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
42144
42145 BUG_ON(num_bits != 1);
42146
42147@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
42148 cluster_start,
42149 num_clusters);
42150 if (!status)
42151- atomic_inc(&osb->alloc_stats.local_data);
42152+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
42153 } else {
42154 if (min_clusters > (osb->bitmap_cpg - 1)) {
42155 /* The only paths asking for contiguousness
42156@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
42157 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
42158 bg_blkno,
42159 bg_bit_off);
42160- atomic_inc(&osb->alloc_stats.bitmap_data);
42161+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
42162 }
42163 }
42164 if (status < 0) {
42165diff -urNp linux-2.6.32.43/fs/ocfs2/super.c linux-2.6.32.43/fs/ocfs2/super.c
42166--- linux-2.6.32.43/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
42167+++ linux-2.6.32.43/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
42168@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
42169 "%10s => GlobalAllocs: %d LocalAllocs: %d "
42170 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
42171 "Stats",
42172- atomic_read(&osb->alloc_stats.bitmap_data),
42173- atomic_read(&osb->alloc_stats.local_data),
42174- atomic_read(&osb->alloc_stats.bg_allocs),
42175- atomic_read(&osb->alloc_stats.moves),
42176- atomic_read(&osb->alloc_stats.bg_extends));
42177+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
42178+ atomic_read_unchecked(&osb->alloc_stats.local_data),
42179+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
42180+ atomic_read_unchecked(&osb->alloc_stats.moves),
42181+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
42182
42183 out += snprintf(buf + out, len - out,
42184 "%10s => State: %u Descriptor: %llu Size: %u bits "
42185@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
42186 spin_lock_init(&osb->osb_xattr_lock);
42187 ocfs2_init_inode_steal_slot(osb);
42188
42189- atomic_set(&osb->alloc_stats.moves, 0);
42190- atomic_set(&osb->alloc_stats.local_data, 0);
42191- atomic_set(&osb->alloc_stats.bitmap_data, 0);
42192- atomic_set(&osb->alloc_stats.bg_allocs, 0);
42193- atomic_set(&osb->alloc_stats.bg_extends, 0);
42194+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
42195+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
42196+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
42197+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
42198+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
42199
42200 /* Copy the blockcheck stats from the superblock probe */
42201 osb->osb_ecc_stats = *stats;
42202diff -urNp linux-2.6.32.43/fs/open.c linux-2.6.32.43/fs/open.c
42203--- linux-2.6.32.43/fs/open.c 2011-03-27 14:31:47.000000000 -0400
42204+++ linux-2.6.32.43/fs/open.c 2011-04-17 15:56:46.000000000 -0400
42205@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
42206 error = locks_verify_truncate(inode, NULL, length);
42207 if (!error)
42208 error = security_path_truncate(&path, length, 0);
42209+
42210+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
42211+ error = -EACCES;
42212+
42213 if (!error) {
42214 vfs_dq_init(inode);
42215 error = do_truncate(path.dentry, length, 0, NULL);
42216@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
42217 if (__mnt_is_readonly(path.mnt))
42218 res = -EROFS;
42219
42220+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
42221+ res = -EACCES;
42222+
42223 out_path_release:
42224 path_put(&path);
42225 out:
42226@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
42227 if (error)
42228 goto dput_and_out;
42229
42230+ gr_log_chdir(path.dentry, path.mnt);
42231+
42232 set_fs_pwd(current->fs, &path);
42233
42234 dput_and_out:
42235@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
42236 goto out_putf;
42237
42238 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
42239+
42240+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
42241+ error = -EPERM;
42242+
42243+ if (!error)
42244+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
42245+
42246 if (!error)
42247 set_fs_pwd(current->fs, &file->f_path);
42248 out_putf:
42249@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
42250 if (!capable(CAP_SYS_CHROOT))
42251 goto dput_and_out;
42252
42253+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
42254+ goto dput_and_out;
42255+
42256+ if (gr_handle_chroot_caps(&path)) {
42257+ error = -ENOMEM;
42258+ goto dput_and_out;
42259+ }
42260+
42261 set_fs_root(current->fs, &path);
42262+
42263+ gr_handle_chroot_chdir(&path);
42264+
42265 error = 0;
42266 dput_and_out:
42267 path_put(&path);
42268@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
42269 err = mnt_want_write_file(file);
42270 if (err)
42271 goto out_putf;
42272+
42273 mutex_lock(&inode->i_mutex);
42274+
42275+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
42276+ err = -EACCES;
42277+ goto out_unlock;
42278+ }
42279+
42280 if (mode == (mode_t) -1)
42281 mode = inode->i_mode;
42282+
42283+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
42284+ err = -EPERM;
42285+ goto out_unlock;
42286+ }
42287+
42288 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42289 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42290 err = notify_change(dentry, &newattrs);
42291+
42292+out_unlock:
42293 mutex_unlock(&inode->i_mutex);
42294 mnt_drop_write(file->f_path.mnt);
42295 out_putf:
42296@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
42297 error = mnt_want_write(path.mnt);
42298 if (error)
42299 goto dput_and_out;
42300+
42301 mutex_lock(&inode->i_mutex);
42302+
42303+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
42304+ error = -EACCES;
42305+ goto out_unlock;
42306+ }
42307+
42308 if (mode == (mode_t) -1)
42309 mode = inode->i_mode;
42310+
42311+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
42312+ error = -EACCES;
42313+ goto out_unlock;
42314+ }
42315+
42316 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
42317 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
42318 error = notify_change(path.dentry, &newattrs);
42319+
42320+out_unlock:
42321 mutex_unlock(&inode->i_mutex);
42322 mnt_drop_write(path.mnt);
42323 dput_and_out:
42324@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
42325 return sys_fchmodat(AT_FDCWD, filename, mode);
42326 }
42327
42328-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
42329+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
42330 {
42331 struct inode *inode = dentry->d_inode;
42332 int error;
42333 struct iattr newattrs;
42334
42335+ if (!gr_acl_handle_chown(dentry, mnt))
42336+ return -EACCES;
42337+
42338 newattrs.ia_valid = ATTR_CTIME;
42339 if (user != (uid_t) -1) {
42340 newattrs.ia_valid |= ATTR_UID;
42341@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
42342 error = mnt_want_write(path.mnt);
42343 if (error)
42344 goto out_release;
42345- error = chown_common(path.dentry, user, group);
42346+ error = chown_common(path.dentry, user, group, path.mnt);
42347 mnt_drop_write(path.mnt);
42348 out_release:
42349 path_put(&path);
42350@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
42351 error = mnt_want_write(path.mnt);
42352 if (error)
42353 goto out_release;
42354- error = chown_common(path.dentry, user, group);
42355+ error = chown_common(path.dentry, user, group, path.mnt);
42356 mnt_drop_write(path.mnt);
42357 out_release:
42358 path_put(&path);
42359@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
42360 error = mnt_want_write(path.mnt);
42361 if (error)
42362 goto out_release;
42363- error = chown_common(path.dentry, user, group);
42364+ error = chown_common(path.dentry, user, group, path.mnt);
42365 mnt_drop_write(path.mnt);
42366 out_release:
42367 path_put(&path);
42368@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
42369 goto out_fput;
42370 dentry = file->f_path.dentry;
42371 audit_inode(NULL, dentry);
42372- error = chown_common(dentry, user, group);
42373+ error = chown_common(dentry, user, group, file->f_path.mnt);
42374 mnt_drop_write(file->f_path.mnt);
42375 out_fput:
42376 fput(file);
42377@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
42378 if (!IS_ERR(tmp)) {
42379 fd = get_unused_fd_flags(flags);
42380 if (fd >= 0) {
42381- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
42382+ struct file *f;
42383+ /* don't allow to be set by userland */
42384+ flags &= ~FMODE_GREXEC;
42385+ f = do_filp_open(dfd, tmp, flags, mode, 0);
42386 if (IS_ERR(f)) {
42387 put_unused_fd(fd);
42388 fd = PTR_ERR(f);
42389diff -urNp linux-2.6.32.43/fs/partitions/ldm.c linux-2.6.32.43/fs/partitions/ldm.c
42390--- linux-2.6.32.43/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
42391+++ linux-2.6.32.43/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
42392@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
42393 ldm_error ("A VBLK claims to have %d parts.", num);
42394 return false;
42395 }
42396+
42397 if (rec >= num) {
42398 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
42399 return false;
42400@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
42401 goto found;
42402 }
42403
42404- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
42405+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
42406 if (!f) {
42407 ldm_crit ("Out of memory.");
42408 return false;
42409diff -urNp linux-2.6.32.43/fs/partitions/mac.c linux-2.6.32.43/fs/partitions/mac.c
42410--- linux-2.6.32.43/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
42411+++ linux-2.6.32.43/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
42412@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
42413 return 0; /* not a MacOS disk */
42414 }
42415 blocks_in_map = be32_to_cpu(part->map_count);
42416+ printk(" [mac]");
42417 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
42418 put_dev_sector(sect);
42419 return 0;
42420 }
42421- printk(" [mac]");
42422 for (slot = 1; slot <= blocks_in_map; ++slot) {
42423 int pos = slot * secsize;
42424 put_dev_sector(sect);
42425diff -urNp linux-2.6.32.43/fs/pipe.c linux-2.6.32.43/fs/pipe.c
42426--- linux-2.6.32.43/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
42427+++ linux-2.6.32.43/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
42428@@ -401,9 +401,9 @@ redo:
42429 }
42430 if (bufs) /* More to do? */
42431 continue;
42432- if (!pipe->writers)
42433+ if (!atomic_read(&pipe->writers))
42434 break;
42435- if (!pipe->waiting_writers) {
42436+ if (!atomic_read(&pipe->waiting_writers)) {
42437 /* syscall merging: Usually we must not sleep
42438 * if O_NONBLOCK is set, or if we got some data.
42439 * But if a writer sleeps in kernel space, then
42440@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
42441 mutex_lock(&inode->i_mutex);
42442 pipe = inode->i_pipe;
42443
42444- if (!pipe->readers) {
42445+ if (!atomic_read(&pipe->readers)) {
42446 send_sig(SIGPIPE, current, 0);
42447 ret = -EPIPE;
42448 goto out;
42449@@ -511,7 +511,7 @@ redo1:
42450 for (;;) {
42451 int bufs;
42452
42453- if (!pipe->readers) {
42454+ if (!atomic_read(&pipe->readers)) {
42455 send_sig(SIGPIPE, current, 0);
42456 if (!ret)
42457 ret = -EPIPE;
42458@@ -597,9 +597,9 @@ redo2:
42459 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
42460 do_wakeup = 0;
42461 }
42462- pipe->waiting_writers++;
42463+ atomic_inc(&pipe->waiting_writers);
42464 pipe_wait(pipe);
42465- pipe->waiting_writers--;
42466+ atomic_dec(&pipe->waiting_writers);
42467 }
42468 out:
42469 mutex_unlock(&inode->i_mutex);
42470@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
42471 mask = 0;
42472 if (filp->f_mode & FMODE_READ) {
42473 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
42474- if (!pipe->writers && filp->f_version != pipe->w_counter)
42475+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
42476 mask |= POLLHUP;
42477 }
42478
42479@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
42480 * Most Unices do not set POLLERR for FIFOs but on Linux they
42481 * behave exactly like pipes for poll().
42482 */
42483- if (!pipe->readers)
42484+ if (!atomic_read(&pipe->readers))
42485 mask |= POLLERR;
42486 }
42487
42488@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
42489
42490 mutex_lock(&inode->i_mutex);
42491 pipe = inode->i_pipe;
42492- pipe->readers -= decr;
42493- pipe->writers -= decw;
42494+ atomic_sub(decr, &pipe->readers);
42495+ atomic_sub(decw, &pipe->writers);
42496
42497- if (!pipe->readers && !pipe->writers) {
42498+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
42499 free_pipe_info(inode);
42500 } else {
42501 wake_up_interruptible_sync(&pipe->wait);
42502@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
42503
42504 if (inode->i_pipe) {
42505 ret = 0;
42506- inode->i_pipe->readers++;
42507+ atomic_inc(&inode->i_pipe->readers);
42508 }
42509
42510 mutex_unlock(&inode->i_mutex);
42511@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
42512
42513 if (inode->i_pipe) {
42514 ret = 0;
42515- inode->i_pipe->writers++;
42516+ atomic_inc(&inode->i_pipe->writers);
42517 }
42518
42519 mutex_unlock(&inode->i_mutex);
42520@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
42521 if (inode->i_pipe) {
42522 ret = 0;
42523 if (filp->f_mode & FMODE_READ)
42524- inode->i_pipe->readers++;
42525+ atomic_inc(&inode->i_pipe->readers);
42526 if (filp->f_mode & FMODE_WRITE)
42527- inode->i_pipe->writers++;
42528+ atomic_inc(&inode->i_pipe->writers);
42529 }
42530
42531 mutex_unlock(&inode->i_mutex);
42532@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
42533 inode->i_pipe = NULL;
42534 }
42535
42536-static struct vfsmount *pipe_mnt __read_mostly;
42537+struct vfsmount *pipe_mnt __read_mostly;
42538 static int pipefs_delete_dentry(struct dentry *dentry)
42539 {
42540 /*
42541@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
42542 goto fail_iput;
42543 inode->i_pipe = pipe;
42544
42545- pipe->readers = pipe->writers = 1;
42546+ atomic_set(&pipe->readers, 1);
42547+ atomic_set(&pipe->writers, 1);
42548 inode->i_fop = &rdwr_pipefifo_fops;
42549
42550 /*
42551diff -urNp linux-2.6.32.43/fs/proc/array.c linux-2.6.32.43/fs/proc/array.c
42552--- linux-2.6.32.43/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
42553+++ linux-2.6.32.43/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
42554@@ -60,6 +60,7 @@
42555 #include <linux/tty.h>
42556 #include <linux/string.h>
42557 #include <linux/mman.h>
42558+#include <linux/grsecurity.h>
42559 #include <linux/proc_fs.h>
42560 #include <linux/ioport.h>
42561 #include <linux/uaccess.h>
42562@@ -321,6 +322,21 @@ static inline void task_context_switch_c
42563 p->nivcsw);
42564 }
42565
42566+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42567+static inline void task_pax(struct seq_file *m, struct task_struct *p)
42568+{
42569+ if (p->mm)
42570+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
42571+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
42572+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
42573+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
42574+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
42575+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
42576+ else
42577+ seq_printf(m, "PaX:\t-----\n");
42578+}
42579+#endif
42580+
42581 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
42582 struct pid *pid, struct task_struct *task)
42583 {
42584@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
42585 task_cap(m, task);
42586 cpuset_task_status_allowed(m, task);
42587 task_context_switch_counts(m, task);
42588+
42589+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
42590+ task_pax(m, task);
42591+#endif
42592+
42593+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
42594+ task_grsec_rbac(m, task);
42595+#endif
42596+
42597 return 0;
42598 }
42599
42600+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42601+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42602+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42603+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42604+#endif
42605+
42606 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
42607 struct pid *pid, struct task_struct *task, int whole)
42608 {
42609@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
42610 cputime_t cutime, cstime, utime, stime;
42611 cputime_t cgtime, gtime;
42612 unsigned long rsslim = 0;
42613- char tcomm[sizeof(task->comm)];
42614+ char tcomm[sizeof(task->comm)] = { 0 };
42615 unsigned long flags;
42616
42617+ pax_track_stack();
42618+
42619 state = *get_task_state(task);
42620 vsize = eip = esp = 0;
42621 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
42622@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
42623 gtime = task_gtime(task);
42624 }
42625
42626+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42627+ if (PAX_RAND_FLAGS(mm)) {
42628+ eip = 0;
42629+ esp = 0;
42630+ wchan = 0;
42631+ }
42632+#endif
42633+#ifdef CONFIG_GRKERNSEC_HIDESYM
42634+ wchan = 0;
42635+ eip =0;
42636+ esp =0;
42637+#endif
42638+
42639 /* scale priority and nice values from timeslices to -20..20 */
42640 /* to make it look like a "normal" Unix priority/nice value */
42641 priority = task_prio(task);
42642@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
42643 vsize,
42644 mm ? get_mm_rss(mm) : 0,
42645 rsslim,
42646+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42647+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
42648+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
42649+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
42650+#else
42651 mm ? (permitted ? mm->start_code : 1) : 0,
42652 mm ? (permitted ? mm->end_code : 1) : 0,
42653 (permitted && mm) ? mm->start_stack : 0,
42654+#endif
42655 esp,
42656 eip,
42657 /* The signal information here is obsolete.
42658@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
42659
42660 return 0;
42661 }
42662+
42663+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
42664+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
42665+{
42666+ u32 curr_ip = 0;
42667+ unsigned long flags;
42668+
42669+ if (lock_task_sighand(task, &flags)) {
42670+ curr_ip = task->signal->curr_ip;
42671+ unlock_task_sighand(task, &flags);
42672+ }
42673+
42674+ return sprintf(buffer, "%pI4\n", &curr_ip);
42675+}
42676+#endif
42677diff -urNp linux-2.6.32.43/fs/proc/base.c linux-2.6.32.43/fs/proc/base.c
42678--- linux-2.6.32.43/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
42679+++ linux-2.6.32.43/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
42680@@ -102,6 +102,22 @@ struct pid_entry {
42681 union proc_op op;
42682 };
42683
42684+struct getdents_callback {
42685+ struct linux_dirent __user * current_dir;
42686+ struct linux_dirent __user * previous;
42687+ struct file * file;
42688+ int count;
42689+ int error;
42690+};
42691+
42692+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
42693+ loff_t offset, u64 ino, unsigned int d_type)
42694+{
42695+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
42696+ buf->error = -EINVAL;
42697+ return 0;
42698+}
42699+
42700 #define NOD(NAME, MODE, IOP, FOP, OP) { \
42701 .name = (NAME), \
42702 .len = sizeof(NAME) - 1, \
42703@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
42704 if (task == current)
42705 return 0;
42706
42707+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
42708+ return -EPERM;
42709+
42710 /*
42711 * If current is actively ptrace'ing, and would also be
42712 * permitted to freshly attach with ptrace now, permit it.
42713@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
42714 if (!mm->arg_end)
42715 goto out_mm; /* Shh! No looking before we're done */
42716
42717+ if (gr_acl_handle_procpidmem(task))
42718+ goto out_mm;
42719+
42720 len = mm->arg_end - mm->arg_start;
42721
42722 if (len > PAGE_SIZE)
42723@@ -287,12 +309,28 @@ out:
42724 return res;
42725 }
42726
42727+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42728+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
42729+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
42730+ _mm->pax_flags & MF_PAX_SEGMEXEC))
42731+#endif
42732+
42733 static int proc_pid_auxv(struct task_struct *task, char *buffer)
42734 {
42735 int res = 0;
42736 struct mm_struct *mm = get_task_mm(task);
42737 if (mm) {
42738 unsigned int nwords = 0;
42739+
42740+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
42741+ /* allow if we're currently ptracing this task */
42742+ if (PAX_RAND_FLAGS(mm) &&
42743+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
42744+ mmput(mm);
42745+ return res;
42746+ }
42747+#endif
42748+
42749 do {
42750 nwords += 2;
42751 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
42752@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
42753 }
42754
42755
42756-#ifdef CONFIG_KALLSYMS
42757+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42758 /*
42759 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
42760 * Returns the resolved symbol. If that fails, simply return the address.
42761@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
42762 }
42763 #endif /* CONFIG_KALLSYMS */
42764
42765-#ifdef CONFIG_STACKTRACE
42766+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
42767
42768 #define MAX_STACK_TRACE_DEPTH 64
42769
42770@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
42771 return count;
42772 }
42773
42774-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42775+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42776 static int proc_pid_syscall(struct task_struct *task, char *buffer)
42777 {
42778 long nr;
42779@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
42780 /************************************************************************/
42781
42782 /* permission checks */
42783-static int proc_fd_access_allowed(struct inode *inode)
42784+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
42785 {
42786 struct task_struct *task;
42787 int allowed = 0;
42788@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
42789 */
42790 task = get_proc_task(inode);
42791 if (task) {
42792- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42793+ if (log)
42794+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
42795+ else
42796+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
42797 put_task_struct(task);
42798 }
42799 return allowed;
42800@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
42801 if (!task)
42802 goto out_no_task;
42803
42804+ if (gr_acl_handle_procpidmem(task))
42805+ goto out;
42806+
42807 if (!ptrace_may_access(task, PTRACE_MODE_READ))
42808 goto out;
42809
42810@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
42811 path_put(&nd->path);
42812
42813 /* Are we allowed to snoop on the tasks file descriptors? */
42814- if (!proc_fd_access_allowed(inode))
42815+ if (!proc_fd_access_allowed(inode,0))
42816 goto out;
42817
42818 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
42819@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
42820 struct path path;
42821
42822 /* Are we allowed to snoop on the tasks file descriptors? */
42823- if (!proc_fd_access_allowed(inode))
42824- goto out;
42825+ /* logging this is needed for learning on chromium to work properly,
42826+ but we don't want to flood the logs from 'ps' which does a readlink
42827+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
42828+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
42829+ */
42830+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
42831+ if (!proc_fd_access_allowed(inode,0))
42832+ goto out;
42833+ } else {
42834+ if (!proc_fd_access_allowed(inode,1))
42835+ goto out;
42836+ }
42837
42838 error = PROC_I(inode)->op.proc_get_link(inode, &path);
42839 if (error)
42840@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
42841 rcu_read_lock();
42842 cred = __task_cred(task);
42843 inode->i_uid = cred->euid;
42844+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42845+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42846+#else
42847 inode->i_gid = cred->egid;
42848+#endif
42849 rcu_read_unlock();
42850 }
42851 security_task_to_inode(task, inode);
42852@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
42853 struct inode *inode = dentry->d_inode;
42854 struct task_struct *task;
42855 const struct cred *cred;
42856+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42857+ const struct cred *tmpcred = current_cred();
42858+#endif
42859
42860 generic_fillattr(inode, stat);
42861
42862@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
42863 stat->uid = 0;
42864 stat->gid = 0;
42865 task = pid_task(proc_pid(inode), PIDTYPE_PID);
42866+
42867+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
42868+ rcu_read_unlock();
42869+ return -ENOENT;
42870+ }
42871+
42872 if (task) {
42873+ cred = __task_cred(task);
42874+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42875+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
42876+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42877+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
42878+#endif
42879+ ) {
42880+#endif
42881 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42882+#ifdef CONFIG_GRKERNSEC_PROC_USER
42883+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42884+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42885+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42886+#endif
42887 task_dumpable(task)) {
42888- cred = __task_cred(task);
42889 stat->uid = cred->euid;
42890+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42891+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
42892+#else
42893 stat->gid = cred->egid;
42894+#endif
42895 }
42896+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42897+ } else {
42898+ rcu_read_unlock();
42899+ return -ENOENT;
42900+ }
42901+#endif
42902 }
42903 rcu_read_unlock();
42904 return 0;
42905@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
42906
42907 if (task) {
42908 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
42909+#ifdef CONFIG_GRKERNSEC_PROC_USER
42910+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
42911+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
42912+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
42913+#endif
42914 task_dumpable(task)) {
42915 rcu_read_lock();
42916 cred = __task_cred(task);
42917 inode->i_uid = cred->euid;
42918+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
42919+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
42920+#else
42921 inode->i_gid = cred->egid;
42922+#endif
42923 rcu_read_unlock();
42924 } else {
42925 inode->i_uid = 0;
42926@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
42927 int fd = proc_fd(inode);
42928
42929 if (task) {
42930- files = get_files_struct(task);
42931+ if (!gr_acl_handle_procpidmem(task))
42932+ files = get_files_struct(task);
42933 put_task_struct(task);
42934 }
42935 if (files) {
42936@@ -1895,12 +1994,22 @@ static const struct file_operations proc
42937 static int proc_fd_permission(struct inode *inode, int mask)
42938 {
42939 int rv;
42940+ struct task_struct *task;
42941
42942 rv = generic_permission(inode, mask, NULL);
42943- if (rv == 0)
42944- return 0;
42945+
42946 if (task_pid(current) == proc_pid(inode))
42947 rv = 0;
42948+
42949+ task = get_proc_task(inode);
42950+ if (task == NULL)
42951+ return rv;
42952+
42953+ if (gr_acl_handle_procpidmem(task))
42954+ rv = -EACCES;
42955+
42956+ put_task_struct(task);
42957+
42958 return rv;
42959 }
42960
42961@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
42962 if (!task)
42963 goto out_no_task;
42964
42965+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42966+ goto out;
42967+
42968 /*
42969 * Yes, it does not scale. And it should not. Don't add
42970 * new entries into /proc/<tgid>/ without very good reasons.
42971@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
42972 if (!task)
42973 goto out_no_task;
42974
42975+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
42976+ goto out;
42977+
42978 ret = 0;
42979 i = filp->f_pos;
42980 switch (i) {
42981@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
42982 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
42983 void *cookie)
42984 {
42985- char *s = nd_get_link(nd);
42986+ const char *s = nd_get_link(nd);
42987 if (!IS_ERR(s))
42988 __putname(s);
42989 }
42990@@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
42991 #ifdef CONFIG_SCHED_DEBUG
42992 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
42993 #endif
42994-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
42995+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
42996 INF("syscall", S_IRUSR, proc_pid_syscall),
42997 #endif
42998 INF("cmdline", S_IRUGO, proc_pid_cmdline),
42999@@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
43000 #ifdef CONFIG_SECURITY
43001 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43002 #endif
43003-#ifdef CONFIG_KALLSYMS
43004+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43005 INF("wchan", S_IRUGO, proc_pid_wchan),
43006 #endif
43007-#ifdef CONFIG_STACKTRACE
43008+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43009 ONE("stack", S_IRUSR, proc_pid_stack),
43010 #endif
43011 #ifdef CONFIG_SCHEDSTATS
43012@@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
43013 #ifdef CONFIG_TASK_IO_ACCOUNTING
43014 INF("io", S_IRUGO, proc_tgid_io_accounting),
43015 #endif
43016+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43017+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
43018+#endif
43019 };
43020
43021 static int proc_tgid_base_readdir(struct file * filp,
43022@@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
43023 if (!inode)
43024 goto out;
43025
43026+#ifdef CONFIG_GRKERNSEC_PROC_USER
43027+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
43028+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43029+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43030+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
43031+#else
43032 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
43033+#endif
43034 inode->i_op = &proc_tgid_base_inode_operations;
43035 inode->i_fop = &proc_tgid_base_operations;
43036 inode->i_flags|=S_IMMUTABLE;
43037@@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
43038 if (!task)
43039 goto out;
43040
43041+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
43042+ goto out_put_task;
43043+
43044 result = proc_pid_instantiate(dir, dentry, task, NULL);
43045+out_put_task:
43046 put_task_struct(task);
43047 out:
43048 return result;
43049@@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
43050 {
43051 unsigned int nr;
43052 struct task_struct *reaper;
43053+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43054+ const struct cred *tmpcred = current_cred();
43055+ const struct cred *itercred;
43056+#endif
43057+ filldir_t __filldir = filldir;
43058 struct tgid_iter iter;
43059 struct pid_namespace *ns;
43060
43061@@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
43062 for (iter = next_tgid(ns, iter);
43063 iter.task;
43064 iter.tgid += 1, iter = next_tgid(ns, iter)) {
43065+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43066+ rcu_read_lock();
43067+ itercred = __task_cred(iter.task);
43068+#endif
43069+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
43070+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43071+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
43072+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43073+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
43074+#endif
43075+ )
43076+#endif
43077+ )
43078+ __filldir = &gr_fake_filldir;
43079+ else
43080+ __filldir = filldir;
43081+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43082+ rcu_read_unlock();
43083+#endif
43084 filp->f_pos = iter.tgid + TGID_OFFSET;
43085- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
43086+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
43087 put_task_struct(iter.task);
43088 goto out;
43089 }
43090@@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
43091 #ifdef CONFIG_SCHED_DEBUG
43092 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
43093 #endif
43094-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
43095+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
43096 INF("syscall", S_IRUSR, proc_pid_syscall),
43097 #endif
43098 INF("cmdline", S_IRUGO, proc_pid_cmdline),
43099@@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
43100 #ifdef CONFIG_SECURITY
43101 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
43102 #endif
43103-#ifdef CONFIG_KALLSYMS
43104+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43105 INF("wchan", S_IRUGO, proc_pid_wchan),
43106 #endif
43107-#ifdef CONFIG_STACKTRACE
43108+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
43109 ONE("stack", S_IRUSR, proc_pid_stack),
43110 #endif
43111 #ifdef CONFIG_SCHEDSTATS
43112diff -urNp linux-2.6.32.43/fs/proc/cmdline.c linux-2.6.32.43/fs/proc/cmdline.c
43113--- linux-2.6.32.43/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
43114+++ linux-2.6.32.43/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
43115@@ -23,7 +23,11 @@ static const struct file_operations cmdl
43116
43117 static int __init proc_cmdline_init(void)
43118 {
43119+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43120+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
43121+#else
43122 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
43123+#endif
43124 return 0;
43125 }
43126 module_init(proc_cmdline_init);
43127diff -urNp linux-2.6.32.43/fs/proc/devices.c linux-2.6.32.43/fs/proc/devices.c
43128--- linux-2.6.32.43/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
43129+++ linux-2.6.32.43/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
43130@@ -64,7 +64,11 @@ static const struct file_operations proc
43131
43132 static int __init proc_devices_init(void)
43133 {
43134+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43135+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
43136+#else
43137 proc_create("devices", 0, NULL, &proc_devinfo_operations);
43138+#endif
43139 return 0;
43140 }
43141 module_init(proc_devices_init);
43142diff -urNp linux-2.6.32.43/fs/proc/inode.c linux-2.6.32.43/fs/proc/inode.c
43143--- linux-2.6.32.43/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
43144+++ linux-2.6.32.43/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
43145@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
43146 if (de->mode) {
43147 inode->i_mode = de->mode;
43148 inode->i_uid = de->uid;
43149+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
43150+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
43151+#else
43152 inode->i_gid = de->gid;
43153+#endif
43154 }
43155 if (de->size)
43156 inode->i_size = de->size;
43157diff -urNp linux-2.6.32.43/fs/proc/internal.h linux-2.6.32.43/fs/proc/internal.h
43158--- linux-2.6.32.43/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
43159+++ linux-2.6.32.43/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
43160@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
43161 struct pid *pid, struct task_struct *task);
43162 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
43163 struct pid *pid, struct task_struct *task);
43164+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
43165+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
43166+#endif
43167 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
43168
43169 extern const struct file_operations proc_maps_operations;
43170diff -urNp linux-2.6.32.43/fs/proc/Kconfig linux-2.6.32.43/fs/proc/Kconfig
43171--- linux-2.6.32.43/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
43172+++ linux-2.6.32.43/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
43173@@ -30,12 +30,12 @@ config PROC_FS
43174
43175 config PROC_KCORE
43176 bool "/proc/kcore support" if !ARM
43177- depends on PROC_FS && MMU
43178+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
43179
43180 config PROC_VMCORE
43181 bool "/proc/vmcore support (EXPERIMENTAL)"
43182- depends on PROC_FS && CRASH_DUMP
43183- default y
43184+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
43185+ default n
43186 help
43187 Exports the dump image of crashed kernel in ELF format.
43188
43189@@ -59,8 +59,8 @@ config PROC_SYSCTL
43190 limited in memory.
43191
43192 config PROC_PAGE_MONITOR
43193- default y
43194- depends on PROC_FS && MMU
43195+ default n
43196+ depends on PROC_FS && MMU && !GRKERNSEC
43197 bool "Enable /proc page monitoring" if EMBEDDED
43198 help
43199 Various /proc files exist to monitor process memory utilization:
43200diff -urNp linux-2.6.32.43/fs/proc/kcore.c linux-2.6.32.43/fs/proc/kcore.c
43201--- linux-2.6.32.43/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
43202+++ linux-2.6.32.43/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
43203@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
43204 off_t offset = 0;
43205 struct kcore_list *m;
43206
43207+ pax_track_stack();
43208+
43209 /* setup ELF header */
43210 elf = (struct elfhdr *) bufp;
43211 bufp += sizeof(struct elfhdr);
43212@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
43213 * the addresses in the elf_phdr on our list.
43214 */
43215 start = kc_offset_to_vaddr(*fpos - elf_buflen);
43216- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
43217+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
43218+ if (tsz > buflen)
43219 tsz = buflen;
43220-
43221+
43222 while (buflen) {
43223 struct kcore_list *m;
43224
43225@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
43226 kfree(elf_buf);
43227 } else {
43228 if (kern_addr_valid(start)) {
43229- unsigned long n;
43230+ char *elf_buf;
43231+ mm_segment_t oldfs;
43232
43233- n = copy_to_user(buffer, (char *)start, tsz);
43234- /*
43235- * We cannot distingush between fault on source
43236- * and fault on destination. When this happens
43237- * we clear too and hope it will trigger the
43238- * EFAULT again.
43239- */
43240- if (n) {
43241- if (clear_user(buffer + tsz - n,
43242- n))
43243+ elf_buf = kmalloc(tsz, GFP_KERNEL);
43244+ if (!elf_buf)
43245+ return -ENOMEM;
43246+ oldfs = get_fs();
43247+ set_fs(KERNEL_DS);
43248+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
43249+ set_fs(oldfs);
43250+ if (copy_to_user(buffer, elf_buf, tsz)) {
43251+ kfree(elf_buf);
43252 return -EFAULT;
43253+ }
43254 }
43255+ set_fs(oldfs);
43256+ kfree(elf_buf);
43257 } else {
43258 if (clear_user(buffer, tsz))
43259 return -EFAULT;
43260@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
43261
43262 static int open_kcore(struct inode *inode, struct file *filp)
43263 {
43264+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
43265+ return -EPERM;
43266+#endif
43267 if (!capable(CAP_SYS_RAWIO))
43268 return -EPERM;
43269 if (kcore_need_update)
43270diff -urNp linux-2.6.32.43/fs/proc/meminfo.c linux-2.6.32.43/fs/proc/meminfo.c
43271--- linux-2.6.32.43/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
43272+++ linux-2.6.32.43/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
43273@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
43274 unsigned long pages[NR_LRU_LISTS];
43275 int lru;
43276
43277+ pax_track_stack();
43278+
43279 /*
43280 * display in kilobytes.
43281 */
43282@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
43283 vmi.used >> 10,
43284 vmi.largest_chunk >> 10
43285 #ifdef CONFIG_MEMORY_FAILURE
43286- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
43287+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
43288 #endif
43289 );
43290
43291diff -urNp linux-2.6.32.43/fs/proc/nommu.c linux-2.6.32.43/fs/proc/nommu.c
43292--- linux-2.6.32.43/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
43293+++ linux-2.6.32.43/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
43294@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
43295 if (len < 1)
43296 len = 1;
43297 seq_printf(m, "%*c", len, ' ');
43298- seq_path(m, &file->f_path, "");
43299+ seq_path(m, &file->f_path, "\n\\");
43300 }
43301
43302 seq_putc(m, '\n');
43303diff -urNp linux-2.6.32.43/fs/proc/proc_net.c linux-2.6.32.43/fs/proc/proc_net.c
43304--- linux-2.6.32.43/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
43305+++ linux-2.6.32.43/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
43306@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
43307 struct task_struct *task;
43308 struct nsproxy *ns;
43309 struct net *net = NULL;
43310+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43311+ const struct cred *cred = current_cred();
43312+#endif
43313+
43314+#ifdef CONFIG_GRKERNSEC_PROC_USER
43315+ if (cred->fsuid)
43316+ return net;
43317+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43318+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
43319+ return net;
43320+#endif
43321
43322 rcu_read_lock();
43323 task = pid_task(proc_pid(dir), PIDTYPE_PID);
43324diff -urNp linux-2.6.32.43/fs/proc/proc_sysctl.c linux-2.6.32.43/fs/proc/proc_sysctl.c
43325--- linux-2.6.32.43/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
43326+++ linux-2.6.32.43/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
43327@@ -7,6 +7,8 @@
43328 #include <linux/security.h>
43329 #include "internal.h"
43330
43331+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
43332+
43333 static const struct dentry_operations proc_sys_dentry_operations;
43334 static const struct file_operations proc_sys_file_operations;
43335 static const struct inode_operations proc_sys_inode_operations;
43336@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
43337 if (!p)
43338 goto out;
43339
43340+ if (gr_handle_sysctl(p, MAY_EXEC))
43341+ goto out;
43342+
43343 err = ERR_PTR(-ENOMEM);
43344 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
43345 if (h)
43346@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
43347 if (*pos < file->f_pos)
43348 continue;
43349
43350+ if (gr_handle_sysctl(table, 0))
43351+ continue;
43352+
43353 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
43354 if (res)
43355 return res;
43356@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
43357 if (IS_ERR(head))
43358 return PTR_ERR(head);
43359
43360+ if (table && gr_handle_sysctl(table, MAY_EXEC))
43361+ return -ENOENT;
43362+
43363 generic_fillattr(inode, stat);
43364 if (table)
43365 stat->mode = (stat->mode & S_IFMT) | table->mode;
43366diff -urNp linux-2.6.32.43/fs/proc/root.c linux-2.6.32.43/fs/proc/root.c
43367--- linux-2.6.32.43/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
43368+++ linux-2.6.32.43/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
43369@@ -134,7 +134,15 @@ void __init proc_root_init(void)
43370 #ifdef CONFIG_PROC_DEVICETREE
43371 proc_device_tree_init();
43372 #endif
43373+#ifdef CONFIG_GRKERNSEC_PROC_ADD
43374+#ifdef CONFIG_GRKERNSEC_PROC_USER
43375+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
43376+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
43377+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
43378+#endif
43379+#else
43380 proc_mkdir("bus", NULL);
43381+#endif
43382 proc_sys_init();
43383 }
43384
43385diff -urNp linux-2.6.32.43/fs/proc/task_mmu.c linux-2.6.32.43/fs/proc/task_mmu.c
43386--- linux-2.6.32.43/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
43387+++ linux-2.6.32.43/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
43388@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
43389 "VmStk:\t%8lu kB\n"
43390 "VmExe:\t%8lu kB\n"
43391 "VmLib:\t%8lu kB\n"
43392- "VmPTE:\t%8lu kB\n",
43393- hiwater_vm << (PAGE_SHIFT-10),
43394+ "VmPTE:\t%8lu kB\n"
43395+
43396+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43397+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
43398+#endif
43399+
43400+ ,hiwater_vm << (PAGE_SHIFT-10),
43401 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
43402 mm->locked_vm << (PAGE_SHIFT-10),
43403 hiwater_rss << (PAGE_SHIFT-10),
43404 total_rss << (PAGE_SHIFT-10),
43405 data << (PAGE_SHIFT-10),
43406 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
43407- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
43408+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
43409+
43410+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
43411+ , mm->context.user_cs_base, mm->context.user_cs_limit
43412+#endif
43413+
43414+ );
43415 }
43416
43417 unsigned long task_vsize(struct mm_struct *mm)
43418@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
43419 struct proc_maps_private *priv = m->private;
43420 struct vm_area_struct *vma = v;
43421
43422- vma_stop(priv, vma);
43423+ if (!IS_ERR(vma))
43424+ vma_stop(priv, vma);
43425 if (priv->task)
43426 put_task_struct(priv->task);
43427 }
43428@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
43429 return ret;
43430 }
43431
43432+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43433+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
43434+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
43435+ _mm->pax_flags & MF_PAX_SEGMEXEC))
43436+#endif
43437+
43438 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
43439 {
43440 struct mm_struct *mm = vma->vm_mm;
43441@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
43442 int flags = vma->vm_flags;
43443 unsigned long ino = 0;
43444 unsigned long long pgoff = 0;
43445- unsigned long start;
43446 dev_t dev = 0;
43447 int len;
43448
43449@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
43450 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
43451 }
43452
43453- /* We don't show the stack guard page in /proc/maps */
43454- start = vma->vm_start;
43455- if (vma->vm_flags & VM_GROWSDOWN)
43456- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
43457- start += PAGE_SIZE;
43458-
43459 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
43460- start,
43461+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43462+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
43463+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
43464+#else
43465+ vma->vm_start,
43466 vma->vm_end,
43467+#endif
43468 flags & VM_READ ? 'r' : '-',
43469 flags & VM_WRITE ? 'w' : '-',
43470 flags & VM_EXEC ? 'x' : '-',
43471 flags & VM_MAYSHARE ? 's' : 'p',
43472+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43473+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
43474+#else
43475 pgoff,
43476+#endif
43477 MAJOR(dev), MINOR(dev), ino, &len);
43478
43479 /*
43480@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
43481 */
43482 if (file) {
43483 pad_len_spaces(m, len);
43484- seq_path(m, &file->f_path, "\n");
43485+ seq_path(m, &file->f_path, "\n\\");
43486 } else {
43487 const char *name = arch_vma_name(vma);
43488 if (!name) {
43489@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
43490 if (vma->vm_start <= mm->brk &&
43491 vma->vm_end >= mm->start_brk) {
43492 name = "[heap]";
43493- } else if (vma->vm_start <= mm->start_stack &&
43494- vma->vm_end >= mm->start_stack) {
43495+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
43496+ (vma->vm_start <= mm->start_stack &&
43497+ vma->vm_end >= mm->start_stack)) {
43498 name = "[stack]";
43499 }
43500 } else {
43501@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
43502 };
43503
43504 memset(&mss, 0, sizeof mss);
43505- mss.vma = vma;
43506- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43507- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43508+
43509+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43510+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
43511+#endif
43512+ mss.vma = vma;
43513+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
43514+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
43515+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43516+ }
43517+#endif
43518
43519 show_map_vma(m, vma);
43520
43521@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
43522 "Swap: %8lu kB\n"
43523 "KernelPageSize: %8lu kB\n"
43524 "MMUPageSize: %8lu kB\n",
43525+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
43526+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
43527+#else
43528 (vma->vm_end - vma->vm_start) >> 10,
43529+#endif
43530 mss.resident >> 10,
43531 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
43532 mss.shared_clean >> 10,
43533diff -urNp linux-2.6.32.43/fs/proc/task_nommu.c linux-2.6.32.43/fs/proc/task_nommu.c
43534--- linux-2.6.32.43/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
43535+++ linux-2.6.32.43/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
43536@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
43537 else
43538 bytes += kobjsize(mm);
43539
43540- if (current->fs && current->fs->users > 1)
43541+ if (current->fs && atomic_read(&current->fs->users) > 1)
43542 sbytes += kobjsize(current->fs);
43543 else
43544 bytes += kobjsize(current->fs);
43545@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
43546 if (len < 1)
43547 len = 1;
43548 seq_printf(m, "%*c", len, ' ');
43549- seq_path(m, &file->f_path, "");
43550+ seq_path(m, &file->f_path, "\n\\");
43551 }
43552
43553 seq_putc(m, '\n');
43554diff -urNp linux-2.6.32.43/fs/readdir.c linux-2.6.32.43/fs/readdir.c
43555--- linux-2.6.32.43/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
43556+++ linux-2.6.32.43/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
43557@@ -16,6 +16,7 @@
43558 #include <linux/security.h>
43559 #include <linux/syscalls.h>
43560 #include <linux/unistd.h>
43561+#include <linux/namei.h>
43562
43563 #include <asm/uaccess.h>
43564
43565@@ -67,6 +68,7 @@ struct old_linux_dirent {
43566
43567 struct readdir_callback {
43568 struct old_linux_dirent __user * dirent;
43569+ struct file * file;
43570 int result;
43571 };
43572
43573@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
43574 buf->result = -EOVERFLOW;
43575 return -EOVERFLOW;
43576 }
43577+
43578+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43579+ return 0;
43580+
43581 buf->result++;
43582 dirent = buf->dirent;
43583 if (!access_ok(VERIFY_WRITE, dirent,
43584@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
43585
43586 buf.result = 0;
43587 buf.dirent = dirent;
43588+ buf.file = file;
43589
43590 error = vfs_readdir(file, fillonedir, &buf);
43591 if (buf.result)
43592@@ -142,6 +149,7 @@ struct linux_dirent {
43593 struct getdents_callback {
43594 struct linux_dirent __user * current_dir;
43595 struct linux_dirent __user * previous;
43596+ struct file * file;
43597 int count;
43598 int error;
43599 };
43600@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
43601 buf->error = -EOVERFLOW;
43602 return -EOVERFLOW;
43603 }
43604+
43605+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43606+ return 0;
43607+
43608 dirent = buf->previous;
43609 if (dirent) {
43610 if (__put_user(offset, &dirent->d_off))
43611@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
43612 buf.previous = NULL;
43613 buf.count = count;
43614 buf.error = 0;
43615+ buf.file = file;
43616
43617 error = vfs_readdir(file, filldir, &buf);
43618 if (error >= 0)
43619@@ -228,6 +241,7 @@ out:
43620 struct getdents_callback64 {
43621 struct linux_dirent64 __user * current_dir;
43622 struct linux_dirent64 __user * previous;
43623+ struct file *file;
43624 int count;
43625 int error;
43626 };
43627@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
43628 buf->error = -EINVAL; /* only used if we fail.. */
43629 if (reclen > buf->count)
43630 return -EINVAL;
43631+
43632+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
43633+ return 0;
43634+
43635 dirent = buf->previous;
43636 if (dirent) {
43637 if (__put_user(offset, &dirent->d_off))
43638@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
43639
43640 buf.current_dir = dirent;
43641 buf.previous = NULL;
43642+ buf.file = file;
43643 buf.count = count;
43644 buf.error = 0;
43645
43646diff -urNp linux-2.6.32.43/fs/reiserfs/dir.c linux-2.6.32.43/fs/reiserfs/dir.c
43647--- linux-2.6.32.43/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
43648+++ linux-2.6.32.43/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
43649@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
43650 struct reiserfs_dir_entry de;
43651 int ret = 0;
43652
43653+ pax_track_stack();
43654+
43655 reiserfs_write_lock(inode->i_sb);
43656
43657 reiserfs_check_lock_depth(inode->i_sb, "readdir");
43658diff -urNp linux-2.6.32.43/fs/reiserfs/do_balan.c linux-2.6.32.43/fs/reiserfs/do_balan.c
43659--- linux-2.6.32.43/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
43660+++ linux-2.6.32.43/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
43661@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
43662 return;
43663 }
43664
43665- atomic_inc(&(fs_generation(tb->tb_sb)));
43666+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
43667 do_balance_starts(tb);
43668
43669 /* balance leaf returns 0 except if combining L R and S into
43670diff -urNp linux-2.6.32.43/fs/reiserfs/item_ops.c linux-2.6.32.43/fs/reiserfs/item_ops.c
43671--- linux-2.6.32.43/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
43672+++ linux-2.6.32.43/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
43673@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
43674 vi->vi_index, vi->vi_type, vi->vi_ih);
43675 }
43676
43677-static struct item_operations stat_data_ops = {
43678+static const struct item_operations stat_data_ops = {
43679 .bytes_number = sd_bytes_number,
43680 .decrement_key = sd_decrement_key,
43681 .is_left_mergeable = sd_is_left_mergeable,
43682@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
43683 vi->vi_index, vi->vi_type, vi->vi_ih);
43684 }
43685
43686-static struct item_operations direct_ops = {
43687+static const struct item_operations direct_ops = {
43688 .bytes_number = direct_bytes_number,
43689 .decrement_key = direct_decrement_key,
43690 .is_left_mergeable = direct_is_left_mergeable,
43691@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
43692 vi->vi_index, vi->vi_type, vi->vi_ih);
43693 }
43694
43695-static struct item_operations indirect_ops = {
43696+static const struct item_operations indirect_ops = {
43697 .bytes_number = indirect_bytes_number,
43698 .decrement_key = indirect_decrement_key,
43699 .is_left_mergeable = indirect_is_left_mergeable,
43700@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
43701 printk("\n");
43702 }
43703
43704-static struct item_operations direntry_ops = {
43705+static const struct item_operations direntry_ops = {
43706 .bytes_number = direntry_bytes_number,
43707 .decrement_key = direntry_decrement_key,
43708 .is_left_mergeable = direntry_is_left_mergeable,
43709@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
43710 "Invalid item type observed, run fsck ASAP");
43711 }
43712
43713-static struct item_operations errcatch_ops = {
43714+static const struct item_operations errcatch_ops = {
43715 errcatch_bytes_number,
43716 errcatch_decrement_key,
43717 errcatch_is_left_mergeable,
43718@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
43719 #error Item types must use disk-format assigned values.
43720 #endif
43721
43722-struct item_operations *item_ops[TYPE_ANY + 1] = {
43723+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
43724 &stat_data_ops,
43725 &indirect_ops,
43726 &direct_ops,
43727diff -urNp linux-2.6.32.43/fs/reiserfs/journal.c linux-2.6.32.43/fs/reiserfs/journal.c
43728--- linux-2.6.32.43/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
43729+++ linux-2.6.32.43/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
43730@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
43731 struct buffer_head *bh;
43732 int i, j;
43733
43734+ pax_track_stack();
43735+
43736 bh = __getblk(dev, block, bufsize);
43737 if (buffer_uptodate(bh))
43738 return (bh);
43739diff -urNp linux-2.6.32.43/fs/reiserfs/namei.c linux-2.6.32.43/fs/reiserfs/namei.c
43740--- linux-2.6.32.43/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
43741+++ linux-2.6.32.43/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
43742@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
43743 unsigned long savelink = 1;
43744 struct timespec ctime;
43745
43746+ pax_track_stack();
43747+
43748 /* three balancings: (1) old name removal, (2) new name insertion
43749 and (3) maybe "save" link insertion
43750 stat data updates: (1) old directory,
43751diff -urNp linux-2.6.32.43/fs/reiserfs/procfs.c linux-2.6.32.43/fs/reiserfs/procfs.c
43752--- linux-2.6.32.43/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
43753+++ linux-2.6.32.43/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
43754@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
43755 "SMALL_TAILS " : "NO_TAILS ",
43756 replay_only(sb) ? "REPLAY_ONLY " : "",
43757 convert_reiserfs(sb) ? "CONV " : "",
43758- atomic_read(&r->s_generation_counter),
43759+ atomic_read_unchecked(&r->s_generation_counter),
43760 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
43761 SF(s_do_balance), SF(s_unneeded_left_neighbor),
43762 SF(s_good_search_by_key_reada), SF(s_bmaps),
43763@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
43764 struct journal_params *jp = &rs->s_v1.s_journal;
43765 char b[BDEVNAME_SIZE];
43766
43767+ pax_track_stack();
43768+
43769 seq_printf(m, /* on-disk fields */
43770 "jp_journal_1st_block: \t%i\n"
43771 "jp_journal_dev: \t%s[%x]\n"
43772diff -urNp linux-2.6.32.43/fs/reiserfs/stree.c linux-2.6.32.43/fs/reiserfs/stree.c
43773--- linux-2.6.32.43/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
43774+++ linux-2.6.32.43/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
43775@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
43776 int iter = 0;
43777 #endif
43778
43779+ pax_track_stack();
43780+
43781 BUG_ON(!th->t_trans_id);
43782
43783 init_tb_struct(th, &s_del_balance, sb, path,
43784@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
43785 int retval;
43786 int quota_cut_bytes = 0;
43787
43788+ pax_track_stack();
43789+
43790 BUG_ON(!th->t_trans_id);
43791
43792 le_key2cpu_key(&cpu_key, key);
43793@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
43794 int quota_cut_bytes;
43795 loff_t tail_pos = 0;
43796
43797+ pax_track_stack();
43798+
43799 BUG_ON(!th->t_trans_id);
43800
43801 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
43802@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
43803 int retval;
43804 int fs_gen;
43805
43806+ pax_track_stack();
43807+
43808 BUG_ON(!th->t_trans_id);
43809
43810 fs_gen = get_generation(inode->i_sb);
43811@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
43812 int fs_gen = 0;
43813 int quota_bytes = 0;
43814
43815+ pax_track_stack();
43816+
43817 BUG_ON(!th->t_trans_id);
43818
43819 if (inode) { /* Do we count quotas for item? */
43820diff -urNp linux-2.6.32.43/fs/reiserfs/super.c linux-2.6.32.43/fs/reiserfs/super.c
43821--- linux-2.6.32.43/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
43822+++ linux-2.6.32.43/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
43823@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
43824 {.option_name = NULL}
43825 };
43826
43827+ pax_track_stack();
43828+
43829 *blocks = 0;
43830 if (!options || !*options)
43831 /* use default configuration: create tails, journaling on, no
43832diff -urNp linux-2.6.32.43/fs/select.c linux-2.6.32.43/fs/select.c
43833--- linux-2.6.32.43/fs/select.c 2011-03-27 14:31:47.000000000 -0400
43834+++ linux-2.6.32.43/fs/select.c 2011-05-16 21:46:57.000000000 -0400
43835@@ -20,6 +20,7 @@
43836 #include <linux/module.h>
43837 #include <linux/slab.h>
43838 #include <linux/poll.h>
43839+#include <linux/security.h>
43840 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
43841 #include <linux/file.h>
43842 #include <linux/fdtable.h>
43843@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
43844 int retval, i, timed_out = 0;
43845 unsigned long slack = 0;
43846
43847+ pax_track_stack();
43848+
43849 rcu_read_lock();
43850 retval = max_select_fd(n, fds);
43851 rcu_read_unlock();
43852@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
43853 /* Allocate small arguments on the stack to save memory and be faster */
43854 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
43855
43856+ pax_track_stack();
43857+
43858 ret = -EINVAL;
43859 if (n < 0)
43860 goto out_nofds;
43861@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
43862 struct poll_list *walk = head;
43863 unsigned long todo = nfds;
43864
43865+ pax_track_stack();
43866+
43867+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
43868 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
43869 return -EINVAL;
43870
43871diff -urNp linux-2.6.32.43/fs/seq_file.c linux-2.6.32.43/fs/seq_file.c
43872--- linux-2.6.32.43/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
43873+++ linux-2.6.32.43/fs/seq_file.c 2011-04-17 15:56:46.000000000 -0400
43874@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
43875 return 0;
43876 }
43877 if (!m->buf) {
43878- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43879+ m->size = PAGE_SIZE;
43880+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43881 if (!m->buf)
43882 return -ENOMEM;
43883 }
43884@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
43885 Eoverflow:
43886 m->op->stop(m, p);
43887 kfree(m->buf);
43888- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43889+ m->size <<= 1;
43890+ m->buf = kmalloc(m->size, GFP_KERNEL);
43891 return !m->buf ? -ENOMEM : -EAGAIN;
43892 }
43893
43894@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
43895 m->version = file->f_version;
43896 /* grab buffer if we didn't have one */
43897 if (!m->buf) {
43898- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
43899+ m->size = PAGE_SIZE;
43900+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
43901 if (!m->buf)
43902 goto Enomem;
43903 }
43904@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
43905 goto Fill;
43906 m->op->stop(m, p);
43907 kfree(m->buf);
43908- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
43909+ m->size <<= 1;
43910+ m->buf = kmalloc(m->size, GFP_KERNEL);
43911 if (!m->buf)
43912 goto Enomem;
43913 m->count = 0;
43914diff -urNp linux-2.6.32.43/fs/smbfs/symlink.c linux-2.6.32.43/fs/smbfs/symlink.c
43915--- linux-2.6.32.43/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
43916+++ linux-2.6.32.43/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
43917@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
43918
43919 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
43920 {
43921- char *s = nd_get_link(nd);
43922+ const char *s = nd_get_link(nd);
43923 if (!IS_ERR(s))
43924 __putname(s);
43925 }
43926diff -urNp linux-2.6.32.43/fs/splice.c linux-2.6.32.43/fs/splice.c
43927--- linux-2.6.32.43/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
43928+++ linux-2.6.32.43/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
43929@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
43930 pipe_lock(pipe);
43931
43932 for (;;) {
43933- if (!pipe->readers) {
43934+ if (!atomic_read(&pipe->readers)) {
43935 send_sig(SIGPIPE, current, 0);
43936 if (!ret)
43937 ret = -EPIPE;
43938@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
43939 do_wakeup = 0;
43940 }
43941
43942- pipe->waiting_writers++;
43943+ atomic_inc(&pipe->waiting_writers);
43944 pipe_wait(pipe);
43945- pipe->waiting_writers--;
43946+ atomic_dec(&pipe->waiting_writers);
43947 }
43948
43949 pipe_unlock(pipe);
43950@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
43951 .spd_release = spd_release_page,
43952 };
43953
43954+ pax_track_stack();
43955+
43956 index = *ppos >> PAGE_CACHE_SHIFT;
43957 loff = *ppos & ~PAGE_CACHE_MASK;
43958 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43959@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
43960 old_fs = get_fs();
43961 set_fs(get_ds());
43962 /* The cast to a user pointer is valid due to the set_fs() */
43963- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
43964+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
43965 set_fs(old_fs);
43966
43967 return res;
43968@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
43969 old_fs = get_fs();
43970 set_fs(get_ds());
43971 /* The cast to a user pointer is valid due to the set_fs() */
43972- res = vfs_write(file, (const char __user *)buf, count, &pos);
43973+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
43974 set_fs(old_fs);
43975
43976 return res;
43977@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
43978 .spd_release = spd_release_page,
43979 };
43980
43981+ pax_track_stack();
43982+
43983 index = *ppos >> PAGE_CACHE_SHIFT;
43984 offset = *ppos & ~PAGE_CACHE_MASK;
43985 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
43986@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
43987 goto err;
43988
43989 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
43990- vec[i].iov_base = (void __user *) page_address(page);
43991+ vec[i].iov_base = (__force void __user *) page_address(page);
43992 vec[i].iov_len = this_len;
43993 pages[i] = page;
43994 spd.nr_pages++;
43995@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
43996 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
43997 {
43998 while (!pipe->nrbufs) {
43999- if (!pipe->writers)
44000+ if (!atomic_read(&pipe->writers))
44001 return 0;
44002
44003- if (!pipe->waiting_writers && sd->num_spliced)
44004+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
44005 return 0;
44006
44007 if (sd->flags & SPLICE_F_NONBLOCK)
44008@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
44009 * out of the pipe right after the splice_to_pipe(). So set
44010 * PIPE_READERS appropriately.
44011 */
44012- pipe->readers = 1;
44013+ atomic_set(&pipe->readers, 1);
44014
44015 current->splice_pipe = pipe;
44016 }
44017@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
44018 .spd_release = spd_release_page,
44019 };
44020
44021+ pax_track_stack();
44022+
44023 pipe = pipe_info(file->f_path.dentry->d_inode);
44024 if (!pipe)
44025 return -EBADF;
44026@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
44027 ret = -ERESTARTSYS;
44028 break;
44029 }
44030- if (!pipe->writers)
44031+ if (!atomic_read(&pipe->writers))
44032 break;
44033- if (!pipe->waiting_writers) {
44034+ if (!atomic_read(&pipe->waiting_writers)) {
44035 if (flags & SPLICE_F_NONBLOCK) {
44036 ret = -EAGAIN;
44037 break;
44038@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
44039 pipe_lock(pipe);
44040
44041 while (pipe->nrbufs >= PIPE_BUFFERS) {
44042- if (!pipe->readers) {
44043+ if (!atomic_read(&pipe->readers)) {
44044 send_sig(SIGPIPE, current, 0);
44045 ret = -EPIPE;
44046 break;
44047@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
44048 ret = -ERESTARTSYS;
44049 break;
44050 }
44051- pipe->waiting_writers++;
44052+ atomic_inc(&pipe->waiting_writers);
44053 pipe_wait(pipe);
44054- pipe->waiting_writers--;
44055+ atomic_dec(&pipe->waiting_writers);
44056 }
44057
44058 pipe_unlock(pipe);
44059@@ -1785,14 +1791,14 @@ retry:
44060 pipe_double_lock(ipipe, opipe);
44061
44062 do {
44063- if (!opipe->readers) {
44064+ if (!atomic_read(&opipe->readers)) {
44065 send_sig(SIGPIPE, current, 0);
44066 if (!ret)
44067 ret = -EPIPE;
44068 break;
44069 }
44070
44071- if (!ipipe->nrbufs && !ipipe->writers)
44072+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
44073 break;
44074
44075 /*
44076@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
44077 pipe_double_lock(ipipe, opipe);
44078
44079 do {
44080- if (!opipe->readers) {
44081+ if (!atomic_read(&opipe->readers)) {
44082 send_sig(SIGPIPE, current, 0);
44083 if (!ret)
44084 ret = -EPIPE;
44085@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
44086 * return EAGAIN if we have the potential of some data in the
44087 * future, otherwise just return 0
44088 */
44089- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
44090+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
44091 ret = -EAGAIN;
44092
44093 pipe_unlock(ipipe);
44094diff -urNp linux-2.6.32.43/fs/sysfs/file.c linux-2.6.32.43/fs/sysfs/file.c
44095--- linux-2.6.32.43/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
44096+++ linux-2.6.32.43/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
44097@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
44098
44099 struct sysfs_open_dirent {
44100 atomic_t refcnt;
44101- atomic_t event;
44102+ atomic_unchecked_t event;
44103 wait_queue_head_t poll;
44104 struct list_head buffers; /* goes through sysfs_buffer.list */
44105 };
44106@@ -53,7 +53,7 @@ struct sysfs_buffer {
44107 size_t count;
44108 loff_t pos;
44109 char * page;
44110- struct sysfs_ops * ops;
44111+ const struct sysfs_ops * ops;
44112 struct mutex mutex;
44113 int needs_read_fill;
44114 int event;
44115@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
44116 {
44117 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
44118 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44119- struct sysfs_ops * ops = buffer->ops;
44120+ const struct sysfs_ops * ops = buffer->ops;
44121 int ret = 0;
44122 ssize_t count;
44123
44124@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
44125 if (!sysfs_get_active_two(attr_sd))
44126 return -ENODEV;
44127
44128- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
44129+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
44130 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
44131
44132 sysfs_put_active_two(attr_sd);
44133@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
44134 {
44135 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
44136 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44137- struct sysfs_ops * ops = buffer->ops;
44138+ const struct sysfs_ops * ops = buffer->ops;
44139 int rc;
44140
44141 /* need attr_sd for attr and ops, its parent for kobj */
44142@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
44143 return -ENOMEM;
44144
44145 atomic_set(&new_od->refcnt, 0);
44146- atomic_set(&new_od->event, 1);
44147+ atomic_set_unchecked(&new_od->event, 1);
44148 init_waitqueue_head(&new_od->poll);
44149 INIT_LIST_HEAD(&new_od->buffers);
44150 goto retry;
44151@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
44152 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
44153 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
44154 struct sysfs_buffer *buffer;
44155- struct sysfs_ops *ops;
44156+ const struct sysfs_ops *ops;
44157 int error = -EACCES;
44158 char *p;
44159
44160@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
44161
44162 sysfs_put_active_two(attr_sd);
44163
44164- if (buffer->event != atomic_read(&od->event))
44165+ if (buffer->event != atomic_read_unchecked(&od->event))
44166 goto trigger;
44167
44168 return DEFAULT_POLLMASK;
44169@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
44170
44171 od = sd->s_attr.open;
44172 if (od) {
44173- atomic_inc(&od->event);
44174+ atomic_inc_unchecked(&od->event);
44175 wake_up_interruptible(&od->poll);
44176 }
44177
44178diff -urNp linux-2.6.32.43/fs/sysfs/mount.c linux-2.6.32.43/fs/sysfs/mount.c
44179--- linux-2.6.32.43/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
44180+++ linux-2.6.32.43/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
44181@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
44182 .s_name = "",
44183 .s_count = ATOMIC_INIT(1),
44184 .s_flags = SYSFS_DIR,
44185+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
44186+ .s_mode = S_IFDIR | S_IRWXU,
44187+#else
44188 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
44189+#endif
44190 .s_ino = 1,
44191 };
44192
44193diff -urNp linux-2.6.32.43/fs/sysfs/symlink.c linux-2.6.32.43/fs/sysfs/symlink.c
44194--- linux-2.6.32.43/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
44195+++ linux-2.6.32.43/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
44196@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
44197
44198 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
44199 {
44200- char *page = nd_get_link(nd);
44201+ const char *page = nd_get_link(nd);
44202 if (!IS_ERR(page))
44203 free_page((unsigned long)page);
44204 }
44205diff -urNp linux-2.6.32.43/fs/udf/balloc.c linux-2.6.32.43/fs/udf/balloc.c
44206--- linux-2.6.32.43/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
44207+++ linux-2.6.32.43/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
44208@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
44209
44210 mutex_lock(&sbi->s_alloc_mutex);
44211 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
44212- if (bloc->logicalBlockNum < 0 ||
44213- (bloc->logicalBlockNum + count) >
44214- partmap->s_partition_len) {
44215+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
44216 udf_debug("%d < %d || %d + %d > %d\n",
44217 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
44218 count, partmap->s_partition_len);
44219@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
44220
44221 mutex_lock(&sbi->s_alloc_mutex);
44222 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
44223- if (bloc->logicalBlockNum < 0 ||
44224- (bloc->logicalBlockNum + count) >
44225- partmap->s_partition_len) {
44226+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
44227 udf_debug("%d < %d || %d + %d > %d\n",
44228 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
44229 partmap->s_partition_len);
44230diff -urNp linux-2.6.32.43/fs/udf/inode.c linux-2.6.32.43/fs/udf/inode.c
44231--- linux-2.6.32.43/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
44232+++ linux-2.6.32.43/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
44233@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
44234 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
44235 int lastblock = 0;
44236
44237+ pax_track_stack();
44238+
44239 prev_epos.offset = udf_file_entry_alloc_offset(inode);
44240 prev_epos.block = iinfo->i_location;
44241 prev_epos.bh = NULL;
44242diff -urNp linux-2.6.32.43/fs/udf/misc.c linux-2.6.32.43/fs/udf/misc.c
44243--- linux-2.6.32.43/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
44244+++ linux-2.6.32.43/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
44245@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
44246
44247 u8 udf_tag_checksum(const struct tag *t)
44248 {
44249- u8 *data = (u8 *)t;
44250+ const u8 *data = (const u8 *)t;
44251 u8 checksum = 0;
44252 int i;
44253 for (i = 0; i < sizeof(struct tag); ++i)
44254diff -urNp linux-2.6.32.43/fs/utimes.c linux-2.6.32.43/fs/utimes.c
44255--- linux-2.6.32.43/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
44256+++ linux-2.6.32.43/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
44257@@ -1,6 +1,7 @@
44258 #include <linux/compiler.h>
44259 #include <linux/file.h>
44260 #include <linux/fs.h>
44261+#include <linux/security.h>
44262 #include <linux/linkage.h>
44263 #include <linux/mount.h>
44264 #include <linux/namei.h>
44265@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
44266 goto mnt_drop_write_and_out;
44267 }
44268 }
44269+
44270+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
44271+ error = -EACCES;
44272+ goto mnt_drop_write_and_out;
44273+ }
44274+
44275 mutex_lock(&inode->i_mutex);
44276 error = notify_change(path->dentry, &newattrs);
44277 mutex_unlock(&inode->i_mutex);
44278diff -urNp linux-2.6.32.43/fs/xattr_acl.c linux-2.6.32.43/fs/xattr_acl.c
44279--- linux-2.6.32.43/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
44280+++ linux-2.6.32.43/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
44281@@ -17,8 +17,8 @@
44282 struct posix_acl *
44283 posix_acl_from_xattr(const void *value, size_t size)
44284 {
44285- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
44286- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
44287+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
44288+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
44289 int count;
44290 struct posix_acl *acl;
44291 struct posix_acl_entry *acl_e;
44292diff -urNp linux-2.6.32.43/fs/xattr.c linux-2.6.32.43/fs/xattr.c
44293--- linux-2.6.32.43/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
44294+++ linux-2.6.32.43/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
44295@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
44296 * Extended attribute SET operations
44297 */
44298 static long
44299-setxattr(struct dentry *d, const char __user *name, const void __user *value,
44300+setxattr(struct path *path, const char __user *name, const void __user *value,
44301 size_t size, int flags)
44302 {
44303 int error;
44304@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
44305 return PTR_ERR(kvalue);
44306 }
44307
44308- error = vfs_setxattr(d, kname, kvalue, size, flags);
44309+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
44310+ error = -EACCES;
44311+ goto out;
44312+ }
44313+
44314+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
44315+out:
44316 kfree(kvalue);
44317 return error;
44318 }
44319@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
44320 return error;
44321 error = mnt_want_write(path.mnt);
44322 if (!error) {
44323- error = setxattr(path.dentry, name, value, size, flags);
44324+ error = setxattr(&path, name, value, size, flags);
44325 mnt_drop_write(path.mnt);
44326 }
44327 path_put(&path);
44328@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
44329 return error;
44330 error = mnt_want_write(path.mnt);
44331 if (!error) {
44332- error = setxattr(path.dentry, name, value, size, flags);
44333+ error = setxattr(&path, name, value, size, flags);
44334 mnt_drop_write(path.mnt);
44335 }
44336 path_put(&path);
44337@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
44338 const void __user *,value, size_t, size, int, flags)
44339 {
44340 struct file *f;
44341- struct dentry *dentry;
44342 int error = -EBADF;
44343
44344 f = fget(fd);
44345 if (!f)
44346 return error;
44347- dentry = f->f_path.dentry;
44348- audit_inode(NULL, dentry);
44349+ audit_inode(NULL, f->f_path.dentry);
44350 error = mnt_want_write_file(f);
44351 if (!error) {
44352- error = setxattr(dentry, name, value, size, flags);
44353+ error = setxattr(&f->f_path, name, value, size, flags);
44354 mnt_drop_write(f->f_path.mnt);
44355 }
44356 fput(f);
44357diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c
44358--- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
44359+++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
44360@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
44361 xfs_fsop_geom_t fsgeo;
44362 int error;
44363
44364+ memset(&fsgeo, 0, sizeof(fsgeo));
44365 error = xfs_fs_geometry(mp, &fsgeo, 3);
44366 if (error)
44367 return -error;
44368diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c
44369--- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
44370+++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
44371@@ -134,7 +134,7 @@ xfs_find_handle(
44372 }
44373
44374 error = -EFAULT;
44375- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
44376+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
44377 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
44378 goto out_put;
44379
44380@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
44381 if (IS_ERR(dentry))
44382 return PTR_ERR(dentry);
44383
44384- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
44385+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
44386 if (!kbuf)
44387 goto out_dput;
44388
44389@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
44390 xfs_mount_t *mp,
44391 void __user *arg)
44392 {
44393- xfs_fsop_geom_t fsgeo;
44394+ xfs_fsop_geom_t fsgeo;
44395 int error;
44396
44397 error = xfs_fs_geometry(mp, &fsgeo, 3);
44398diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c
44399--- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
44400+++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
44401@@ -468,7 +468,7 @@ xfs_vn_put_link(
44402 struct nameidata *nd,
44403 void *p)
44404 {
44405- char *s = nd_get_link(nd);
44406+ const char *s = nd_get_link(nd);
44407
44408 if (!IS_ERR(s))
44409 kfree(s);
44410diff -urNp linux-2.6.32.43/fs/xfs/xfs_bmap.c linux-2.6.32.43/fs/xfs/xfs_bmap.c
44411--- linux-2.6.32.43/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
44412+++ linux-2.6.32.43/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
44413@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
44414 int nmap,
44415 int ret_nmap);
44416 #else
44417-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
44418+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
44419 #endif /* DEBUG */
44420
44421 #if defined(XFS_RW_TRACE)
44422diff -urNp linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c
44423--- linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
44424+++ linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
44425@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
44426 }
44427
44428 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
44429- if (filldir(dirent, sfep->name, sfep->namelen,
44430+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
44431+ char name[sfep->namelen];
44432+ memcpy(name, sfep->name, sfep->namelen);
44433+ if (filldir(dirent, name, sfep->namelen,
44434+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
44435+ *offset = off & 0x7fffffff;
44436+ return 0;
44437+ }
44438+ } else if (filldir(dirent, sfep->name, sfep->namelen,
44439 off & 0x7fffffff, ino, DT_UNKNOWN)) {
44440 *offset = off & 0x7fffffff;
44441 return 0;
44442diff -urNp linux-2.6.32.43/grsecurity/gracl_alloc.c linux-2.6.32.43/grsecurity/gracl_alloc.c
44443--- linux-2.6.32.43/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
44444+++ linux-2.6.32.43/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
44445@@ -0,0 +1,105 @@
44446+#include <linux/kernel.h>
44447+#include <linux/mm.h>
44448+#include <linux/slab.h>
44449+#include <linux/vmalloc.h>
44450+#include <linux/gracl.h>
44451+#include <linux/grsecurity.h>
44452+
44453+static unsigned long alloc_stack_next = 1;
44454+static unsigned long alloc_stack_size = 1;
44455+static void **alloc_stack;
44456+
44457+static __inline__ int
44458+alloc_pop(void)
44459+{
44460+ if (alloc_stack_next == 1)
44461+ return 0;
44462+
44463+ kfree(alloc_stack[alloc_stack_next - 2]);
44464+
44465+ alloc_stack_next--;
44466+
44467+ return 1;
44468+}
44469+
44470+static __inline__ int
44471+alloc_push(void *buf)
44472+{
44473+ if (alloc_stack_next >= alloc_stack_size)
44474+ return 1;
44475+
44476+ alloc_stack[alloc_stack_next - 1] = buf;
44477+
44478+ alloc_stack_next++;
44479+
44480+ return 0;
44481+}
44482+
44483+void *
44484+acl_alloc(unsigned long len)
44485+{
44486+ void *ret = NULL;
44487+
44488+ if (!len || len > PAGE_SIZE)
44489+ goto out;
44490+
44491+ ret = kmalloc(len, GFP_KERNEL);
44492+
44493+ if (ret) {
44494+ if (alloc_push(ret)) {
44495+ kfree(ret);
44496+ ret = NULL;
44497+ }
44498+ }
44499+
44500+out:
44501+ return ret;
44502+}
44503+
44504+void *
44505+acl_alloc_num(unsigned long num, unsigned long len)
44506+{
44507+ if (!len || (num > (PAGE_SIZE / len)))
44508+ return NULL;
44509+
44510+ return acl_alloc(num * len);
44511+}
44512+
44513+void
44514+acl_free_all(void)
44515+{
44516+ if (gr_acl_is_enabled() || !alloc_stack)
44517+ return;
44518+
44519+ while (alloc_pop()) ;
44520+
44521+ if (alloc_stack) {
44522+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
44523+ kfree(alloc_stack);
44524+ else
44525+ vfree(alloc_stack);
44526+ }
44527+
44528+ alloc_stack = NULL;
44529+ alloc_stack_size = 1;
44530+ alloc_stack_next = 1;
44531+
44532+ return;
44533+}
44534+
44535+int
44536+acl_alloc_stack_init(unsigned long size)
44537+{
44538+ if ((size * sizeof (void *)) <= PAGE_SIZE)
44539+ alloc_stack =
44540+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
44541+ else
44542+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
44543+
44544+ alloc_stack_size = size;
44545+
44546+ if (!alloc_stack)
44547+ return 0;
44548+ else
44549+ return 1;
44550+}
44551diff -urNp linux-2.6.32.43/grsecurity/gracl.c linux-2.6.32.43/grsecurity/gracl.c
44552--- linux-2.6.32.43/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
44553+++ linux-2.6.32.43/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
44554@@ -0,0 +1,4082 @@
44555+#include <linux/kernel.h>
44556+#include <linux/module.h>
44557+#include <linux/sched.h>
44558+#include <linux/mm.h>
44559+#include <linux/file.h>
44560+#include <linux/fs.h>
44561+#include <linux/namei.h>
44562+#include <linux/mount.h>
44563+#include <linux/tty.h>
44564+#include <linux/proc_fs.h>
44565+#include <linux/smp_lock.h>
44566+#include <linux/slab.h>
44567+#include <linux/vmalloc.h>
44568+#include <linux/types.h>
44569+#include <linux/sysctl.h>
44570+#include <linux/netdevice.h>
44571+#include <linux/ptrace.h>
44572+#include <linux/gracl.h>
44573+#include <linux/gralloc.h>
44574+#include <linux/grsecurity.h>
44575+#include <linux/grinternal.h>
44576+#include <linux/pid_namespace.h>
44577+#include <linux/fdtable.h>
44578+#include <linux/percpu.h>
44579+
44580+#include <asm/uaccess.h>
44581+#include <asm/errno.h>
44582+#include <asm/mman.h>
44583+
44584+static struct acl_role_db acl_role_set;
44585+static struct name_db name_set;
44586+static struct inodev_db inodev_set;
44587+
44588+/* for keeping track of userspace pointers used for subjects, so we
44589+ can share references in the kernel as well
44590+*/
44591+
44592+static struct dentry *real_root;
44593+static struct vfsmount *real_root_mnt;
44594+
44595+static struct acl_subj_map_db subj_map_set;
44596+
44597+static struct acl_role_label *default_role;
44598+
44599+static struct acl_role_label *role_list;
44600+
44601+static u16 acl_sp_role_value;
44602+
44603+extern char *gr_shared_page[4];
44604+static DEFINE_MUTEX(gr_dev_mutex);
44605+DEFINE_RWLOCK(gr_inode_lock);
44606+
44607+struct gr_arg *gr_usermode;
44608+
44609+static unsigned int gr_status __read_only = GR_STATUS_INIT;
44610+
44611+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
44612+extern void gr_clear_learn_entries(void);
44613+
44614+#ifdef CONFIG_GRKERNSEC_RESLOG
44615+extern void gr_log_resource(const struct task_struct *task,
44616+ const int res, const unsigned long wanted, const int gt);
44617+#endif
44618+
44619+unsigned char *gr_system_salt;
44620+unsigned char *gr_system_sum;
44621+
44622+static struct sprole_pw **acl_special_roles = NULL;
44623+static __u16 num_sprole_pws = 0;
44624+
44625+static struct acl_role_label *kernel_role = NULL;
44626+
44627+static unsigned int gr_auth_attempts = 0;
44628+static unsigned long gr_auth_expires = 0UL;
44629+
44630+#ifdef CONFIG_NET
44631+extern struct vfsmount *sock_mnt;
44632+#endif
44633+extern struct vfsmount *pipe_mnt;
44634+extern struct vfsmount *shm_mnt;
44635+#ifdef CONFIG_HUGETLBFS
44636+extern struct vfsmount *hugetlbfs_vfsmount;
44637+#endif
44638+
44639+static struct acl_object_label *fakefs_obj_rw;
44640+static struct acl_object_label *fakefs_obj_rwx;
44641+
44642+extern int gr_init_uidset(void);
44643+extern void gr_free_uidset(void);
44644+extern void gr_remove_uid(uid_t uid);
44645+extern int gr_find_uid(uid_t uid);
44646+
44647+__inline__ int
44648+gr_acl_is_enabled(void)
44649+{
44650+ return (gr_status & GR_READY);
44651+}
44652+
44653+#ifdef CONFIG_BTRFS_FS
44654+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
44655+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
44656+#endif
44657+
44658+static inline dev_t __get_dev(const struct dentry *dentry)
44659+{
44660+#ifdef CONFIG_BTRFS_FS
44661+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
44662+ return get_btrfs_dev_from_inode(dentry->d_inode);
44663+ else
44664+#endif
44665+ return dentry->d_inode->i_sb->s_dev;
44666+}
44667+
44668+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
44669+{
44670+ return __get_dev(dentry);
44671+}
44672+
44673+static char gr_task_roletype_to_char(struct task_struct *task)
44674+{
44675+ switch (task->role->roletype &
44676+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
44677+ GR_ROLE_SPECIAL)) {
44678+ case GR_ROLE_DEFAULT:
44679+ return 'D';
44680+ case GR_ROLE_USER:
44681+ return 'U';
44682+ case GR_ROLE_GROUP:
44683+ return 'G';
44684+ case GR_ROLE_SPECIAL:
44685+ return 'S';
44686+ }
44687+
44688+ return 'X';
44689+}
44690+
44691+char gr_roletype_to_char(void)
44692+{
44693+ return gr_task_roletype_to_char(current);
44694+}
44695+
44696+__inline__ int
44697+gr_acl_tpe_check(void)
44698+{
44699+ if (unlikely(!(gr_status & GR_READY)))
44700+ return 0;
44701+ if (current->role->roletype & GR_ROLE_TPE)
44702+ return 1;
44703+ else
44704+ return 0;
44705+}
44706+
44707+int
44708+gr_handle_rawio(const struct inode *inode)
44709+{
44710+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
44711+ if (inode && S_ISBLK(inode->i_mode) &&
44712+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
44713+ !capable(CAP_SYS_RAWIO))
44714+ return 1;
44715+#endif
44716+ return 0;
44717+}
44718+
44719+static int
44720+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
44721+{
44722+ if (likely(lena != lenb))
44723+ return 0;
44724+
44725+ return !memcmp(a, b, lena);
44726+}
44727+
44728+/* this must be called with vfsmount_lock and dcache_lock held */
44729+
44730+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44731+ struct dentry *root, struct vfsmount *rootmnt,
44732+ char *buffer, int buflen)
44733+{
44734+ char * end = buffer+buflen;
44735+ char * retval;
44736+ int namelen;
44737+
44738+ *--end = '\0';
44739+ buflen--;
44740+
44741+ if (buflen < 1)
44742+ goto Elong;
44743+ /* Get '/' right */
44744+ retval = end-1;
44745+ *retval = '/';
44746+
44747+ for (;;) {
44748+ struct dentry * parent;
44749+
44750+ if (dentry == root && vfsmnt == rootmnt)
44751+ break;
44752+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
44753+ /* Global root? */
44754+ if (vfsmnt->mnt_parent == vfsmnt)
44755+ goto global_root;
44756+ dentry = vfsmnt->mnt_mountpoint;
44757+ vfsmnt = vfsmnt->mnt_parent;
44758+ continue;
44759+ }
44760+ parent = dentry->d_parent;
44761+ prefetch(parent);
44762+ namelen = dentry->d_name.len;
44763+ buflen -= namelen + 1;
44764+ if (buflen < 0)
44765+ goto Elong;
44766+ end -= namelen;
44767+ memcpy(end, dentry->d_name.name, namelen);
44768+ *--end = '/';
44769+ retval = end;
44770+ dentry = parent;
44771+ }
44772+
44773+out:
44774+ return retval;
44775+
44776+global_root:
44777+ namelen = dentry->d_name.len;
44778+ buflen -= namelen;
44779+ if (buflen < 0)
44780+ goto Elong;
44781+ retval -= namelen-1; /* hit the slash */
44782+ memcpy(retval, dentry->d_name.name, namelen);
44783+ goto out;
44784+Elong:
44785+ retval = ERR_PTR(-ENAMETOOLONG);
44786+ goto out;
44787+}
44788+
44789+static char *
44790+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
44791+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
44792+{
44793+ char *retval;
44794+
44795+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
44796+ if (unlikely(IS_ERR(retval)))
44797+ retval = strcpy(buf, "<path too long>");
44798+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
44799+ retval[1] = '\0';
44800+
44801+ return retval;
44802+}
44803+
44804+static char *
44805+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44806+ char *buf, int buflen)
44807+{
44808+ char *res;
44809+
44810+ /* we can use real_root, real_root_mnt, because this is only called
44811+ by the RBAC system */
44812+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
44813+
44814+ return res;
44815+}
44816+
44817+static char *
44818+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
44819+ char *buf, int buflen)
44820+{
44821+ char *res;
44822+ struct dentry *root;
44823+ struct vfsmount *rootmnt;
44824+ struct task_struct *reaper = &init_task;
44825+
44826+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
44827+ read_lock(&reaper->fs->lock);
44828+ root = dget(reaper->fs->root.dentry);
44829+ rootmnt = mntget(reaper->fs->root.mnt);
44830+ read_unlock(&reaper->fs->lock);
44831+
44832+ spin_lock(&dcache_lock);
44833+ spin_lock(&vfsmount_lock);
44834+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
44835+ spin_unlock(&vfsmount_lock);
44836+ spin_unlock(&dcache_lock);
44837+
44838+ dput(root);
44839+ mntput(rootmnt);
44840+ return res;
44841+}
44842+
44843+static char *
44844+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
44845+{
44846+ char *ret;
44847+ spin_lock(&dcache_lock);
44848+ spin_lock(&vfsmount_lock);
44849+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44850+ PAGE_SIZE);
44851+ spin_unlock(&vfsmount_lock);
44852+ spin_unlock(&dcache_lock);
44853+ return ret;
44854+}
44855+
44856+char *
44857+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
44858+{
44859+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
44860+ PAGE_SIZE);
44861+}
44862+
44863+char *
44864+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
44865+{
44866+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
44867+ PAGE_SIZE);
44868+}
44869+
44870+char *
44871+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
44872+{
44873+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
44874+ PAGE_SIZE);
44875+}
44876+
44877+char *
44878+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
44879+{
44880+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
44881+ PAGE_SIZE);
44882+}
44883+
44884+char *
44885+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
44886+{
44887+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
44888+ PAGE_SIZE);
44889+}
44890+
44891+__inline__ __u32
44892+to_gr_audit(const __u32 reqmode)
44893+{
44894+ /* masks off auditable permission flags, then shifts them to create
44895+ auditing flags, and adds the special case of append auditing if
44896+ we're requesting write */
44897+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
44898+}
44899+
44900+struct acl_subject_label *
44901+lookup_subject_map(const struct acl_subject_label *userp)
44902+{
44903+ unsigned int index = shash(userp, subj_map_set.s_size);
44904+ struct subject_map *match;
44905+
44906+ match = subj_map_set.s_hash[index];
44907+
44908+ while (match && match->user != userp)
44909+ match = match->next;
44910+
44911+ if (match != NULL)
44912+ return match->kernel;
44913+ else
44914+ return NULL;
44915+}
44916+
44917+static void
44918+insert_subj_map_entry(struct subject_map *subjmap)
44919+{
44920+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
44921+ struct subject_map **curr;
44922+
44923+ subjmap->prev = NULL;
44924+
44925+ curr = &subj_map_set.s_hash[index];
44926+ if (*curr != NULL)
44927+ (*curr)->prev = subjmap;
44928+
44929+ subjmap->next = *curr;
44930+ *curr = subjmap;
44931+
44932+ return;
44933+}
44934+
44935+static struct acl_role_label *
44936+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
44937+ const gid_t gid)
44938+{
44939+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
44940+ struct acl_role_label *match;
44941+ struct role_allowed_ip *ipp;
44942+ unsigned int x;
44943+ u32 curr_ip = task->signal->curr_ip;
44944+
44945+ task->signal->saved_ip = curr_ip;
44946+
44947+ match = acl_role_set.r_hash[index];
44948+
44949+ while (match) {
44950+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
44951+ for (x = 0; x < match->domain_child_num; x++) {
44952+ if (match->domain_children[x] == uid)
44953+ goto found;
44954+ }
44955+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
44956+ break;
44957+ match = match->next;
44958+ }
44959+found:
44960+ if (match == NULL) {
44961+ try_group:
44962+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
44963+ match = acl_role_set.r_hash[index];
44964+
44965+ while (match) {
44966+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
44967+ for (x = 0; x < match->domain_child_num; x++) {
44968+ if (match->domain_children[x] == gid)
44969+ goto found2;
44970+ }
44971+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
44972+ break;
44973+ match = match->next;
44974+ }
44975+found2:
44976+ if (match == NULL)
44977+ match = default_role;
44978+ if (match->allowed_ips == NULL)
44979+ return match;
44980+ else {
44981+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44982+ if (likely
44983+ ((ntohl(curr_ip) & ipp->netmask) ==
44984+ (ntohl(ipp->addr) & ipp->netmask)))
44985+ return match;
44986+ }
44987+ match = default_role;
44988+ }
44989+ } else if (match->allowed_ips == NULL) {
44990+ return match;
44991+ } else {
44992+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
44993+ if (likely
44994+ ((ntohl(curr_ip) & ipp->netmask) ==
44995+ (ntohl(ipp->addr) & ipp->netmask)))
44996+ return match;
44997+ }
44998+ goto try_group;
44999+ }
45000+
45001+ return match;
45002+}
45003+
45004+struct acl_subject_label *
45005+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
45006+ const struct acl_role_label *role)
45007+{
45008+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
45009+ struct acl_subject_label *match;
45010+
45011+ match = role->subj_hash[index];
45012+
45013+ while (match && (match->inode != ino || match->device != dev ||
45014+ (match->mode & GR_DELETED))) {
45015+ match = match->next;
45016+ }
45017+
45018+ if (match && !(match->mode & GR_DELETED))
45019+ return match;
45020+ else
45021+ return NULL;
45022+}
45023+
45024+struct acl_subject_label *
45025+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
45026+ const struct acl_role_label *role)
45027+{
45028+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
45029+ struct acl_subject_label *match;
45030+
45031+ match = role->subj_hash[index];
45032+
45033+ while (match && (match->inode != ino || match->device != dev ||
45034+ !(match->mode & GR_DELETED))) {
45035+ match = match->next;
45036+ }
45037+
45038+ if (match && (match->mode & GR_DELETED))
45039+ return match;
45040+ else
45041+ return NULL;
45042+}
45043+
45044+static struct acl_object_label *
45045+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
45046+ const struct acl_subject_label *subj)
45047+{
45048+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45049+ struct acl_object_label *match;
45050+
45051+ match = subj->obj_hash[index];
45052+
45053+ while (match && (match->inode != ino || match->device != dev ||
45054+ (match->mode & GR_DELETED))) {
45055+ match = match->next;
45056+ }
45057+
45058+ if (match && !(match->mode & GR_DELETED))
45059+ return match;
45060+ else
45061+ return NULL;
45062+}
45063+
45064+static struct acl_object_label *
45065+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
45066+ const struct acl_subject_label *subj)
45067+{
45068+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
45069+ struct acl_object_label *match;
45070+
45071+ match = subj->obj_hash[index];
45072+
45073+ while (match && (match->inode != ino || match->device != dev ||
45074+ !(match->mode & GR_DELETED))) {
45075+ match = match->next;
45076+ }
45077+
45078+ if (match && (match->mode & GR_DELETED))
45079+ return match;
45080+
45081+ match = subj->obj_hash[index];
45082+
45083+ while (match && (match->inode != ino || match->device != dev ||
45084+ (match->mode & GR_DELETED))) {
45085+ match = match->next;
45086+ }
45087+
45088+ if (match && !(match->mode & GR_DELETED))
45089+ return match;
45090+ else
45091+ return NULL;
45092+}
45093+
45094+static struct name_entry *
45095+lookup_name_entry(const char *name)
45096+{
45097+ unsigned int len = strlen(name);
45098+ unsigned int key = full_name_hash(name, len);
45099+ unsigned int index = key % name_set.n_size;
45100+ struct name_entry *match;
45101+
45102+ match = name_set.n_hash[index];
45103+
45104+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
45105+ match = match->next;
45106+
45107+ return match;
45108+}
45109+
45110+static struct name_entry *
45111+lookup_name_entry_create(const char *name)
45112+{
45113+ unsigned int len = strlen(name);
45114+ unsigned int key = full_name_hash(name, len);
45115+ unsigned int index = key % name_set.n_size;
45116+ struct name_entry *match;
45117+
45118+ match = name_set.n_hash[index];
45119+
45120+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45121+ !match->deleted))
45122+ match = match->next;
45123+
45124+ if (match && match->deleted)
45125+ return match;
45126+
45127+ match = name_set.n_hash[index];
45128+
45129+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
45130+ match->deleted))
45131+ match = match->next;
45132+
45133+ if (match && !match->deleted)
45134+ return match;
45135+ else
45136+ return NULL;
45137+}
45138+
45139+static struct inodev_entry *
45140+lookup_inodev_entry(const ino_t ino, const dev_t dev)
45141+{
45142+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
45143+ struct inodev_entry *match;
45144+
45145+ match = inodev_set.i_hash[index];
45146+
45147+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
45148+ match = match->next;
45149+
45150+ return match;
45151+}
45152+
45153+static void
45154+insert_inodev_entry(struct inodev_entry *entry)
45155+{
45156+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
45157+ inodev_set.i_size);
45158+ struct inodev_entry **curr;
45159+
45160+ entry->prev = NULL;
45161+
45162+ curr = &inodev_set.i_hash[index];
45163+ if (*curr != NULL)
45164+ (*curr)->prev = entry;
45165+
45166+ entry->next = *curr;
45167+ *curr = entry;
45168+
45169+ return;
45170+}
45171+
45172+static void
45173+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
45174+{
45175+ unsigned int index =
45176+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
45177+ struct acl_role_label **curr;
45178+ struct acl_role_label *tmp;
45179+
45180+ curr = &acl_role_set.r_hash[index];
45181+
45182+ /* if role was already inserted due to domains and already has
45183+ a role in the same bucket as it attached, then we need to
45184+ combine these two buckets
45185+ */
45186+ if (role->next) {
45187+ tmp = role->next;
45188+ while (tmp->next)
45189+ tmp = tmp->next;
45190+ tmp->next = *curr;
45191+ } else
45192+ role->next = *curr;
45193+ *curr = role;
45194+
45195+ return;
45196+}
45197+
45198+static void
45199+insert_acl_role_label(struct acl_role_label *role)
45200+{
45201+ int i;
45202+
45203+ if (role_list == NULL) {
45204+ role_list = role;
45205+ role->prev = NULL;
45206+ } else {
45207+ role->prev = role_list;
45208+ role_list = role;
45209+ }
45210+
45211+ /* used for hash chains */
45212+ role->next = NULL;
45213+
45214+ if (role->roletype & GR_ROLE_DOMAIN) {
45215+ for (i = 0; i < role->domain_child_num; i++)
45216+ __insert_acl_role_label(role, role->domain_children[i]);
45217+ } else
45218+ __insert_acl_role_label(role, role->uidgid);
45219+}
45220+
45221+static int
45222+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
45223+{
45224+ struct name_entry **curr, *nentry;
45225+ struct inodev_entry *ientry;
45226+ unsigned int len = strlen(name);
45227+ unsigned int key = full_name_hash(name, len);
45228+ unsigned int index = key % name_set.n_size;
45229+
45230+ curr = &name_set.n_hash[index];
45231+
45232+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
45233+ curr = &((*curr)->next);
45234+
45235+ if (*curr != NULL)
45236+ return 1;
45237+
45238+ nentry = acl_alloc(sizeof (struct name_entry));
45239+ if (nentry == NULL)
45240+ return 0;
45241+ ientry = acl_alloc(sizeof (struct inodev_entry));
45242+ if (ientry == NULL)
45243+ return 0;
45244+ ientry->nentry = nentry;
45245+
45246+ nentry->key = key;
45247+ nentry->name = name;
45248+ nentry->inode = inode;
45249+ nentry->device = device;
45250+ nentry->len = len;
45251+ nentry->deleted = deleted;
45252+
45253+ nentry->prev = NULL;
45254+ curr = &name_set.n_hash[index];
45255+ if (*curr != NULL)
45256+ (*curr)->prev = nentry;
45257+ nentry->next = *curr;
45258+ *curr = nentry;
45259+
45260+ /* insert us into the table searchable by inode/dev */
45261+ insert_inodev_entry(ientry);
45262+
45263+ return 1;
45264+}
45265+
45266+static void
45267+insert_acl_obj_label(struct acl_object_label *obj,
45268+ struct acl_subject_label *subj)
45269+{
45270+ unsigned int index =
45271+ fhash(obj->inode, obj->device, subj->obj_hash_size);
45272+ struct acl_object_label **curr;
45273+
45274+
45275+ obj->prev = NULL;
45276+
45277+ curr = &subj->obj_hash[index];
45278+ if (*curr != NULL)
45279+ (*curr)->prev = obj;
45280+
45281+ obj->next = *curr;
45282+ *curr = obj;
45283+
45284+ return;
45285+}
45286+
45287+static void
45288+insert_acl_subj_label(struct acl_subject_label *obj,
45289+ struct acl_role_label *role)
45290+{
45291+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
45292+ struct acl_subject_label **curr;
45293+
45294+ obj->prev = NULL;
45295+
45296+ curr = &role->subj_hash[index];
45297+ if (*curr != NULL)
45298+ (*curr)->prev = obj;
45299+
45300+ obj->next = *curr;
45301+ *curr = obj;
45302+
45303+ return;
45304+}
45305+
45306+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
45307+
45308+static void *
45309+create_table(__u32 * len, int elementsize)
45310+{
45311+ unsigned int table_sizes[] = {
45312+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
45313+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
45314+ 4194301, 8388593, 16777213, 33554393, 67108859
45315+ };
45316+ void *newtable = NULL;
45317+ unsigned int pwr = 0;
45318+
45319+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
45320+ table_sizes[pwr] <= *len)
45321+ pwr++;
45322+
45323+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
45324+ return newtable;
45325+
45326+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
45327+ newtable =
45328+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
45329+ else
45330+ newtable = vmalloc(table_sizes[pwr] * elementsize);
45331+
45332+ *len = table_sizes[pwr];
45333+
45334+ return newtable;
45335+}
45336+
45337+static int
45338+init_variables(const struct gr_arg *arg)
45339+{
45340+ struct task_struct *reaper = &init_task;
45341+ unsigned int stacksize;
45342+
45343+ subj_map_set.s_size = arg->role_db.num_subjects;
45344+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
45345+ name_set.n_size = arg->role_db.num_objects;
45346+ inodev_set.i_size = arg->role_db.num_objects;
45347+
45348+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
45349+ !name_set.n_size || !inodev_set.i_size)
45350+ return 1;
45351+
45352+ if (!gr_init_uidset())
45353+ return 1;
45354+
45355+ /* set up the stack that holds allocation info */
45356+
45357+ stacksize = arg->role_db.num_pointers + 5;
45358+
45359+ if (!acl_alloc_stack_init(stacksize))
45360+ return 1;
45361+
45362+ /* grab reference for the real root dentry and vfsmount */
45363+ read_lock(&reaper->fs->lock);
45364+ real_root = dget(reaper->fs->root.dentry);
45365+ real_root_mnt = mntget(reaper->fs->root.mnt);
45366+ read_unlock(&reaper->fs->lock);
45367+
45368+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
45369+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
45370+#endif
45371+
45372+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
45373+ if (fakefs_obj_rw == NULL)
45374+ return 1;
45375+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
45376+
45377+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
45378+ if (fakefs_obj_rwx == NULL)
45379+ return 1;
45380+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
45381+
45382+ subj_map_set.s_hash =
45383+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
45384+ acl_role_set.r_hash =
45385+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
45386+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
45387+ inodev_set.i_hash =
45388+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
45389+
45390+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
45391+ !name_set.n_hash || !inodev_set.i_hash)
45392+ return 1;
45393+
45394+ memset(subj_map_set.s_hash, 0,
45395+ sizeof(struct subject_map *) * subj_map_set.s_size);
45396+ memset(acl_role_set.r_hash, 0,
45397+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
45398+ memset(name_set.n_hash, 0,
45399+ sizeof (struct name_entry *) * name_set.n_size);
45400+ memset(inodev_set.i_hash, 0,
45401+ sizeof (struct inodev_entry *) * inodev_set.i_size);
45402+
45403+ return 0;
45404+}
45405+
45406+/* free information not needed after startup
45407+ currently contains user->kernel pointer mappings for subjects
45408+*/
45409+
45410+static void
45411+free_init_variables(void)
45412+{
45413+ __u32 i;
45414+
45415+ if (subj_map_set.s_hash) {
45416+ for (i = 0; i < subj_map_set.s_size; i++) {
45417+ if (subj_map_set.s_hash[i]) {
45418+ kfree(subj_map_set.s_hash[i]);
45419+ subj_map_set.s_hash[i] = NULL;
45420+ }
45421+ }
45422+
45423+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
45424+ PAGE_SIZE)
45425+ kfree(subj_map_set.s_hash);
45426+ else
45427+ vfree(subj_map_set.s_hash);
45428+ }
45429+
45430+ return;
45431+}
45432+
45433+static void
45434+free_variables(void)
45435+{
45436+ struct acl_subject_label *s;
45437+ struct acl_role_label *r;
45438+ struct task_struct *task, *task2;
45439+ unsigned int x;
45440+
45441+ gr_clear_learn_entries();
45442+
45443+ read_lock(&tasklist_lock);
45444+ do_each_thread(task2, task) {
45445+ task->acl_sp_role = 0;
45446+ task->acl_role_id = 0;
45447+ task->acl = NULL;
45448+ task->role = NULL;
45449+ } while_each_thread(task2, task);
45450+ read_unlock(&tasklist_lock);
45451+
45452+ /* release the reference to the real root dentry and vfsmount */
45453+ if (real_root)
45454+ dput(real_root);
45455+ real_root = NULL;
45456+ if (real_root_mnt)
45457+ mntput(real_root_mnt);
45458+ real_root_mnt = NULL;
45459+
45460+ /* free all object hash tables */
45461+
45462+ FOR_EACH_ROLE_START(r)
45463+ if (r->subj_hash == NULL)
45464+ goto next_role;
45465+ FOR_EACH_SUBJECT_START(r, s, x)
45466+ if (s->obj_hash == NULL)
45467+ break;
45468+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45469+ kfree(s->obj_hash);
45470+ else
45471+ vfree(s->obj_hash);
45472+ FOR_EACH_SUBJECT_END(s, x)
45473+ FOR_EACH_NESTED_SUBJECT_START(r, s)
45474+ if (s->obj_hash == NULL)
45475+ break;
45476+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
45477+ kfree(s->obj_hash);
45478+ else
45479+ vfree(s->obj_hash);
45480+ FOR_EACH_NESTED_SUBJECT_END(s)
45481+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
45482+ kfree(r->subj_hash);
45483+ else
45484+ vfree(r->subj_hash);
45485+ r->subj_hash = NULL;
45486+next_role:
45487+ FOR_EACH_ROLE_END(r)
45488+
45489+ acl_free_all();
45490+
45491+ if (acl_role_set.r_hash) {
45492+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
45493+ PAGE_SIZE)
45494+ kfree(acl_role_set.r_hash);
45495+ else
45496+ vfree(acl_role_set.r_hash);
45497+ }
45498+ if (name_set.n_hash) {
45499+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
45500+ PAGE_SIZE)
45501+ kfree(name_set.n_hash);
45502+ else
45503+ vfree(name_set.n_hash);
45504+ }
45505+
45506+ if (inodev_set.i_hash) {
45507+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
45508+ PAGE_SIZE)
45509+ kfree(inodev_set.i_hash);
45510+ else
45511+ vfree(inodev_set.i_hash);
45512+ }
45513+
45514+ gr_free_uidset();
45515+
45516+ memset(&name_set, 0, sizeof (struct name_db));
45517+ memset(&inodev_set, 0, sizeof (struct inodev_db));
45518+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
45519+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
45520+
45521+ default_role = NULL;
45522+ role_list = NULL;
45523+
45524+ return;
45525+}
45526+
45527+static __u32
45528+count_user_objs(struct acl_object_label *userp)
45529+{
45530+ struct acl_object_label o_tmp;
45531+ __u32 num = 0;
45532+
45533+ while (userp) {
45534+ if (copy_from_user(&o_tmp, userp,
45535+ sizeof (struct acl_object_label)))
45536+ break;
45537+
45538+ userp = o_tmp.prev;
45539+ num++;
45540+ }
45541+
45542+ return num;
45543+}
45544+
45545+static struct acl_subject_label *
45546+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
45547+
45548+static int
45549+copy_user_glob(struct acl_object_label *obj)
45550+{
45551+ struct acl_object_label *g_tmp, **guser;
45552+ unsigned int len;
45553+ char *tmp;
45554+
45555+ if (obj->globbed == NULL)
45556+ return 0;
45557+
45558+ guser = &obj->globbed;
45559+ while (*guser) {
45560+ g_tmp = (struct acl_object_label *)
45561+ acl_alloc(sizeof (struct acl_object_label));
45562+ if (g_tmp == NULL)
45563+ return -ENOMEM;
45564+
45565+ if (copy_from_user(g_tmp, *guser,
45566+ sizeof (struct acl_object_label)))
45567+ return -EFAULT;
45568+
45569+ len = strnlen_user(g_tmp->filename, PATH_MAX);
45570+
45571+ if (!len || len >= PATH_MAX)
45572+ return -EINVAL;
45573+
45574+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45575+ return -ENOMEM;
45576+
45577+ if (copy_from_user(tmp, g_tmp->filename, len))
45578+ return -EFAULT;
45579+ tmp[len-1] = '\0';
45580+ g_tmp->filename = tmp;
45581+
45582+ *guser = g_tmp;
45583+ guser = &(g_tmp->next);
45584+ }
45585+
45586+ return 0;
45587+}
45588+
45589+static int
45590+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
45591+ struct acl_role_label *role)
45592+{
45593+ struct acl_object_label *o_tmp;
45594+ unsigned int len;
45595+ int ret;
45596+ char *tmp;
45597+
45598+ while (userp) {
45599+ if ((o_tmp = (struct acl_object_label *)
45600+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
45601+ return -ENOMEM;
45602+
45603+ if (copy_from_user(o_tmp, userp,
45604+ sizeof (struct acl_object_label)))
45605+ return -EFAULT;
45606+
45607+ userp = o_tmp->prev;
45608+
45609+ len = strnlen_user(o_tmp->filename, PATH_MAX);
45610+
45611+ if (!len || len >= PATH_MAX)
45612+ return -EINVAL;
45613+
45614+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45615+ return -ENOMEM;
45616+
45617+ if (copy_from_user(tmp, o_tmp->filename, len))
45618+ return -EFAULT;
45619+ tmp[len-1] = '\0';
45620+ o_tmp->filename = tmp;
45621+
45622+ insert_acl_obj_label(o_tmp, subj);
45623+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
45624+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
45625+ return -ENOMEM;
45626+
45627+ ret = copy_user_glob(o_tmp);
45628+ if (ret)
45629+ return ret;
45630+
45631+ if (o_tmp->nested) {
45632+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
45633+ if (IS_ERR(o_tmp->nested))
45634+ return PTR_ERR(o_tmp->nested);
45635+
45636+ /* insert into nested subject list */
45637+ o_tmp->nested->next = role->hash->first;
45638+ role->hash->first = o_tmp->nested;
45639+ }
45640+ }
45641+
45642+ return 0;
45643+}
45644+
45645+static __u32
45646+count_user_subjs(struct acl_subject_label *userp)
45647+{
45648+ struct acl_subject_label s_tmp;
45649+ __u32 num = 0;
45650+
45651+ while (userp) {
45652+ if (copy_from_user(&s_tmp, userp,
45653+ sizeof (struct acl_subject_label)))
45654+ break;
45655+
45656+ userp = s_tmp.prev;
45657+ /* do not count nested subjects against this count, since
45658+ they are not included in the hash table, but are
45659+ attached to objects. We have already counted
45660+ the subjects in userspace for the allocation
45661+ stack
45662+ */
45663+ if (!(s_tmp.mode & GR_NESTED))
45664+ num++;
45665+ }
45666+
45667+ return num;
45668+}
45669+
45670+static int
45671+copy_user_allowedips(struct acl_role_label *rolep)
45672+{
45673+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
45674+
45675+ ruserip = rolep->allowed_ips;
45676+
45677+ while (ruserip) {
45678+ rlast = rtmp;
45679+
45680+ if ((rtmp = (struct role_allowed_ip *)
45681+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
45682+ return -ENOMEM;
45683+
45684+ if (copy_from_user(rtmp, ruserip,
45685+ sizeof (struct role_allowed_ip)))
45686+ return -EFAULT;
45687+
45688+ ruserip = rtmp->prev;
45689+
45690+ if (!rlast) {
45691+ rtmp->prev = NULL;
45692+ rolep->allowed_ips = rtmp;
45693+ } else {
45694+ rlast->next = rtmp;
45695+ rtmp->prev = rlast;
45696+ }
45697+
45698+ if (!ruserip)
45699+ rtmp->next = NULL;
45700+ }
45701+
45702+ return 0;
45703+}
45704+
45705+static int
45706+copy_user_transitions(struct acl_role_label *rolep)
45707+{
45708+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
45709+
45710+ unsigned int len;
45711+ char *tmp;
45712+
45713+ rusertp = rolep->transitions;
45714+
45715+ while (rusertp) {
45716+ rlast = rtmp;
45717+
45718+ if ((rtmp = (struct role_transition *)
45719+ acl_alloc(sizeof (struct role_transition))) == NULL)
45720+ return -ENOMEM;
45721+
45722+ if (copy_from_user(rtmp, rusertp,
45723+ sizeof (struct role_transition)))
45724+ return -EFAULT;
45725+
45726+ rusertp = rtmp->prev;
45727+
45728+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
45729+
45730+ if (!len || len >= GR_SPROLE_LEN)
45731+ return -EINVAL;
45732+
45733+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45734+ return -ENOMEM;
45735+
45736+ if (copy_from_user(tmp, rtmp->rolename, len))
45737+ return -EFAULT;
45738+ tmp[len-1] = '\0';
45739+ rtmp->rolename = tmp;
45740+
45741+ if (!rlast) {
45742+ rtmp->prev = NULL;
45743+ rolep->transitions = rtmp;
45744+ } else {
45745+ rlast->next = rtmp;
45746+ rtmp->prev = rlast;
45747+ }
45748+
45749+ if (!rusertp)
45750+ rtmp->next = NULL;
45751+ }
45752+
45753+ return 0;
45754+}
45755+
45756+static struct acl_subject_label *
45757+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
45758+{
45759+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
45760+ unsigned int len;
45761+ char *tmp;
45762+ __u32 num_objs;
45763+ struct acl_ip_label **i_tmp, *i_utmp2;
45764+ struct gr_hash_struct ghash;
45765+ struct subject_map *subjmap;
45766+ unsigned int i_num;
45767+ int err;
45768+
45769+ s_tmp = lookup_subject_map(userp);
45770+
45771+ /* we've already copied this subject into the kernel, just return
45772+ the reference to it, and don't copy it over again
45773+ */
45774+ if (s_tmp)
45775+ return(s_tmp);
45776+
45777+ if ((s_tmp = (struct acl_subject_label *)
45778+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
45779+ return ERR_PTR(-ENOMEM);
45780+
45781+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
45782+ if (subjmap == NULL)
45783+ return ERR_PTR(-ENOMEM);
45784+
45785+ subjmap->user = userp;
45786+ subjmap->kernel = s_tmp;
45787+ insert_subj_map_entry(subjmap);
45788+
45789+ if (copy_from_user(s_tmp, userp,
45790+ sizeof (struct acl_subject_label)))
45791+ return ERR_PTR(-EFAULT);
45792+
45793+ len = strnlen_user(s_tmp->filename, PATH_MAX);
45794+
45795+ if (!len || len >= PATH_MAX)
45796+ return ERR_PTR(-EINVAL);
45797+
45798+ if ((tmp = (char *) acl_alloc(len)) == NULL)
45799+ return ERR_PTR(-ENOMEM);
45800+
45801+ if (copy_from_user(tmp, s_tmp->filename, len))
45802+ return ERR_PTR(-EFAULT);
45803+ tmp[len-1] = '\0';
45804+ s_tmp->filename = tmp;
45805+
45806+ if (!strcmp(s_tmp->filename, "/"))
45807+ role->root_label = s_tmp;
45808+
45809+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
45810+ return ERR_PTR(-EFAULT);
45811+
45812+ /* copy user and group transition tables */
45813+
45814+ if (s_tmp->user_trans_num) {
45815+ uid_t *uidlist;
45816+
45817+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
45818+ if (uidlist == NULL)
45819+ return ERR_PTR(-ENOMEM);
45820+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
45821+ return ERR_PTR(-EFAULT);
45822+
45823+ s_tmp->user_transitions = uidlist;
45824+ }
45825+
45826+ if (s_tmp->group_trans_num) {
45827+ gid_t *gidlist;
45828+
45829+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
45830+ if (gidlist == NULL)
45831+ return ERR_PTR(-ENOMEM);
45832+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
45833+ return ERR_PTR(-EFAULT);
45834+
45835+ s_tmp->group_transitions = gidlist;
45836+ }
45837+
45838+ /* set up object hash table */
45839+ num_objs = count_user_objs(ghash.first);
45840+
45841+ s_tmp->obj_hash_size = num_objs;
45842+ s_tmp->obj_hash =
45843+ (struct acl_object_label **)
45844+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
45845+
45846+ if (!s_tmp->obj_hash)
45847+ return ERR_PTR(-ENOMEM);
45848+
45849+ memset(s_tmp->obj_hash, 0,
45850+ s_tmp->obj_hash_size *
45851+ sizeof (struct acl_object_label *));
45852+
45853+ /* add in objects */
45854+ err = copy_user_objs(ghash.first, s_tmp, role);
45855+
45856+ if (err)
45857+ return ERR_PTR(err);
45858+
45859+ /* set pointer for parent subject */
45860+ if (s_tmp->parent_subject) {
45861+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
45862+
45863+ if (IS_ERR(s_tmp2))
45864+ return s_tmp2;
45865+
45866+ s_tmp->parent_subject = s_tmp2;
45867+ }
45868+
45869+ /* add in ip acls */
45870+
45871+ if (!s_tmp->ip_num) {
45872+ s_tmp->ips = NULL;
45873+ goto insert;
45874+ }
45875+
45876+ i_tmp =
45877+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
45878+ sizeof (struct acl_ip_label *));
45879+
45880+ if (!i_tmp)
45881+ return ERR_PTR(-ENOMEM);
45882+
45883+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
45884+ *(i_tmp + i_num) =
45885+ (struct acl_ip_label *)
45886+ acl_alloc(sizeof (struct acl_ip_label));
45887+ if (!*(i_tmp + i_num))
45888+ return ERR_PTR(-ENOMEM);
45889+
45890+ if (copy_from_user
45891+ (&i_utmp2, s_tmp->ips + i_num,
45892+ sizeof (struct acl_ip_label *)))
45893+ return ERR_PTR(-EFAULT);
45894+
45895+ if (copy_from_user
45896+ (*(i_tmp + i_num), i_utmp2,
45897+ sizeof (struct acl_ip_label)))
45898+ return ERR_PTR(-EFAULT);
45899+
45900+ if ((*(i_tmp + i_num))->iface == NULL)
45901+ continue;
45902+
45903+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
45904+ if (!len || len >= IFNAMSIZ)
45905+ return ERR_PTR(-EINVAL);
45906+ tmp = acl_alloc(len);
45907+ if (tmp == NULL)
45908+ return ERR_PTR(-ENOMEM);
45909+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
45910+ return ERR_PTR(-EFAULT);
45911+ (*(i_tmp + i_num))->iface = tmp;
45912+ }
45913+
45914+ s_tmp->ips = i_tmp;
45915+
45916+insert:
45917+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
45918+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
45919+ return ERR_PTR(-ENOMEM);
45920+
45921+ return s_tmp;
45922+}
45923+
45924+static int
45925+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
45926+{
45927+ struct acl_subject_label s_pre;
45928+ struct acl_subject_label * ret;
45929+ int err;
45930+
45931+ while (userp) {
45932+ if (copy_from_user(&s_pre, userp,
45933+ sizeof (struct acl_subject_label)))
45934+ return -EFAULT;
45935+
45936+ /* do not add nested subjects here, add
45937+ while parsing objects
45938+ */
45939+
45940+ if (s_pre.mode & GR_NESTED) {
45941+ userp = s_pre.prev;
45942+ continue;
45943+ }
45944+
45945+ ret = do_copy_user_subj(userp, role);
45946+
45947+ err = PTR_ERR(ret);
45948+ if (IS_ERR(ret))
45949+ return err;
45950+
45951+ insert_acl_subj_label(ret, role);
45952+
45953+ userp = s_pre.prev;
45954+ }
45955+
45956+ return 0;
45957+}
45958+
45959+static int
45960+copy_user_acl(struct gr_arg *arg)
45961+{
45962+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
45963+ struct sprole_pw *sptmp;
45964+ struct gr_hash_struct *ghash;
45965+ uid_t *domainlist;
45966+ unsigned int r_num;
45967+ unsigned int len;
45968+ char *tmp;
45969+ int err = 0;
45970+ __u16 i;
45971+ __u32 num_subjs;
45972+
45973+ /* we need a default and kernel role */
45974+ if (arg->role_db.num_roles < 2)
45975+ return -EINVAL;
45976+
45977+ /* copy special role authentication info from userspace */
45978+
45979+ num_sprole_pws = arg->num_sprole_pws;
45980+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
45981+
45982+ if (!acl_special_roles) {
45983+ err = -ENOMEM;
45984+ goto cleanup;
45985+ }
45986+
45987+ for (i = 0; i < num_sprole_pws; i++) {
45988+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
45989+ if (!sptmp) {
45990+ err = -ENOMEM;
45991+ goto cleanup;
45992+ }
45993+ if (copy_from_user(sptmp, arg->sprole_pws + i,
45994+ sizeof (struct sprole_pw))) {
45995+ err = -EFAULT;
45996+ goto cleanup;
45997+ }
45998+
45999+ len =
46000+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
46001+
46002+ if (!len || len >= GR_SPROLE_LEN) {
46003+ err = -EINVAL;
46004+ goto cleanup;
46005+ }
46006+
46007+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
46008+ err = -ENOMEM;
46009+ goto cleanup;
46010+ }
46011+
46012+ if (copy_from_user(tmp, sptmp->rolename, len)) {
46013+ err = -EFAULT;
46014+ goto cleanup;
46015+ }
46016+ tmp[len-1] = '\0';
46017+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
46018+ printk(KERN_ALERT "Copying special role %s\n", tmp);
46019+#endif
46020+ sptmp->rolename = tmp;
46021+ acl_special_roles[i] = sptmp;
46022+ }
46023+
46024+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
46025+
46026+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
46027+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
46028+
46029+ if (!r_tmp) {
46030+ err = -ENOMEM;
46031+ goto cleanup;
46032+ }
46033+
46034+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
46035+ sizeof (struct acl_role_label *))) {
46036+ err = -EFAULT;
46037+ goto cleanup;
46038+ }
46039+
46040+ if (copy_from_user(r_tmp, r_utmp2,
46041+ sizeof (struct acl_role_label))) {
46042+ err = -EFAULT;
46043+ goto cleanup;
46044+ }
46045+
46046+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
46047+
46048+ if (!len || len >= PATH_MAX) {
46049+ err = -EINVAL;
46050+ goto cleanup;
46051+ }
46052+
46053+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
46054+ err = -ENOMEM;
46055+ goto cleanup;
46056+ }
46057+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
46058+ err = -EFAULT;
46059+ goto cleanup;
46060+ }
46061+ tmp[len-1] = '\0';
46062+ r_tmp->rolename = tmp;
46063+
46064+ if (!strcmp(r_tmp->rolename, "default")
46065+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
46066+ default_role = r_tmp;
46067+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
46068+ kernel_role = r_tmp;
46069+ }
46070+
46071+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
46072+ err = -ENOMEM;
46073+ goto cleanup;
46074+ }
46075+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
46076+ err = -EFAULT;
46077+ goto cleanup;
46078+ }
46079+
46080+ r_tmp->hash = ghash;
46081+
46082+ num_subjs = count_user_subjs(r_tmp->hash->first);
46083+
46084+ r_tmp->subj_hash_size = num_subjs;
46085+ r_tmp->subj_hash =
46086+ (struct acl_subject_label **)
46087+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
46088+
46089+ if (!r_tmp->subj_hash) {
46090+ err = -ENOMEM;
46091+ goto cleanup;
46092+ }
46093+
46094+ err = copy_user_allowedips(r_tmp);
46095+ if (err)
46096+ goto cleanup;
46097+
46098+ /* copy domain info */
46099+ if (r_tmp->domain_children != NULL) {
46100+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
46101+ if (domainlist == NULL) {
46102+ err = -ENOMEM;
46103+ goto cleanup;
46104+ }
46105+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
46106+ err = -EFAULT;
46107+ goto cleanup;
46108+ }
46109+ r_tmp->domain_children = domainlist;
46110+ }
46111+
46112+ err = copy_user_transitions(r_tmp);
46113+ if (err)
46114+ goto cleanup;
46115+
46116+ memset(r_tmp->subj_hash, 0,
46117+ r_tmp->subj_hash_size *
46118+ sizeof (struct acl_subject_label *));
46119+
46120+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
46121+
46122+ if (err)
46123+ goto cleanup;
46124+
46125+ /* set nested subject list to null */
46126+ r_tmp->hash->first = NULL;
46127+
46128+ insert_acl_role_label(r_tmp);
46129+ }
46130+
46131+ goto return_err;
46132+ cleanup:
46133+ free_variables();
46134+ return_err:
46135+ return err;
46136+
46137+}
46138+
46139+static int
46140+gracl_init(struct gr_arg *args)
46141+{
46142+ int error = 0;
46143+
46144+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
46145+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
46146+
46147+ if (init_variables(args)) {
46148+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
46149+ error = -ENOMEM;
46150+ free_variables();
46151+ goto out;
46152+ }
46153+
46154+ error = copy_user_acl(args);
46155+ free_init_variables();
46156+ if (error) {
46157+ free_variables();
46158+ goto out;
46159+ }
46160+
46161+ if ((error = gr_set_acls(0))) {
46162+ free_variables();
46163+ goto out;
46164+ }
46165+
46166+ pax_open_kernel();
46167+ gr_status |= GR_READY;
46168+ pax_close_kernel();
46169+
46170+ out:
46171+ return error;
46172+}
46173+
46174+/* derived from glibc fnmatch() 0: match, 1: no match*/
46175+
46176+static int
46177+glob_match(const char *p, const char *n)
46178+{
46179+ char c;
46180+
46181+ while ((c = *p++) != '\0') {
46182+ switch (c) {
46183+ case '?':
46184+ if (*n == '\0')
46185+ return 1;
46186+ else if (*n == '/')
46187+ return 1;
46188+ break;
46189+ case '\\':
46190+ if (*n != c)
46191+ return 1;
46192+ break;
46193+ case '*':
46194+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
46195+ if (*n == '/')
46196+ return 1;
46197+ else if (c == '?') {
46198+ if (*n == '\0')
46199+ return 1;
46200+ else
46201+ ++n;
46202+ }
46203+ }
46204+ if (c == '\0') {
46205+ return 0;
46206+ } else {
46207+ const char *endp;
46208+
46209+ if ((endp = strchr(n, '/')) == NULL)
46210+ endp = n + strlen(n);
46211+
46212+ if (c == '[') {
46213+ for (--p; n < endp; ++n)
46214+ if (!glob_match(p, n))
46215+ return 0;
46216+ } else if (c == '/') {
46217+ while (*n != '\0' && *n != '/')
46218+ ++n;
46219+ if (*n == '/' && !glob_match(p, n + 1))
46220+ return 0;
46221+ } else {
46222+ for (--p; n < endp; ++n)
46223+ if (*n == c && !glob_match(p, n))
46224+ return 0;
46225+ }
46226+
46227+ return 1;
46228+ }
46229+ case '[':
46230+ {
46231+ int not;
46232+ char cold;
46233+
46234+ if (*n == '\0' || *n == '/')
46235+ return 1;
46236+
46237+ not = (*p == '!' || *p == '^');
46238+ if (not)
46239+ ++p;
46240+
46241+ c = *p++;
46242+ for (;;) {
46243+ unsigned char fn = (unsigned char)*n;
46244+
46245+ if (c == '\0')
46246+ return 1;
46247+ else {
46248+ if (c == fn)
46249+ goto matched;
46250+ cold = c;
46251+ c = *p++;
46252+
46253+ if (c == '-' && *p != ']') {
46254+ unsigned char cend = *p++;
46255+
46256+ if (cend == '\0')
46257+ return 1;
46258+
46259+ if (cold <= fn && fn <= cend)
46260+ goto matched;
46261+
46262+ c = *p++;
46263+ }
46264+ }
46265+
46266+ if (c == ']')
46267+ break;
46268+ }
46269+ if (!not)
46270+ return 1;
46271+ break;
46272+ matched:
46273+ while (c != ']') {
46274+ if (c == '\0')
46275+ return 1;
46276+
46277+ c = *p++;
46278+ }
46279+ if (not)
46280+ return 1;
46281+ }
46282+ break;
46283+ default:
46284+ if (c != *n)
46285+ return 1;
46286+ }
46287+
46288+ ++n;
46289+ }
46290+
46291+ if (*n == '\0')
46292+ return 0;
46293+
46294+ if (*n == '/')
46295+ return 0;
46296+
46297+ return 1;
46298+}
46299+
46300+static struct acl_object_label *
46301+chk_glob_label(struct acl_object_label *globbed,
46302+ struct dentry *dentry, struct vfsmount *mnt, char **path)
46303+{
46304+ struct acl_object_label *tmp;
46305+
46306+ if (*path == NULL)
46307+ *path = gr_to_filename_nolock(dentry, mnt);
46308+
46309+ tmp = globbed;
46310+
46311+ while (tmp) {
46312+ if (!glob_match(tmp->filename, *path))
46313+ return tmp;
46314+ tmp = tmp->next;
46315+ }
46316+
46317+ return NULL;
46318+}
46319+
46320+static struct acl_object_label *
46321+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46322+ const ino_t curr_ino, const dev_t curr_dev,
46323+ const struct acl_subject_label *subj, char **path, const int checkglob)
46324+{
46325+ struct acl_subject_label *tmpsubj;
46326+ struct acl_object_label *retval;
46327+ struct acl_object_label *retval2;
46328+
46329+ tmpsubj = (struct acl_subject_label *) subj;
46330+ read_lock(&gr_inode_lock);
46331+ do {
46332+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
46333+ if (retval) {
46334+ if (checkglob && retval->globbed) {
46335+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
46336+ (struct vfsmount *)orig_mnt, path);
46337+ if (retval2)
46338+ retval = retval2;
46339+ }
46340+ break;
46341+ }
46342+ } while ((tmpsubj = tmpsubj->parent_subject));
46343+ read_unlock(&gr_inode_lock);
46344+
46345+ return retval;
46346+}
46347+
46348+static __inline__ struct acl_object_label *
46349+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
46350+ const struct dentry *curr_dentry,
46351+ const struct acl_subject_label *subj, char **path, const int checkglob)
46352+{
46353+ int newglob = checkglob;
46354+
46355+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
46356+ as we don't want a / * rule to match instead of the / object
46357+ don't do this for create lookups that call this function though, since they're looking up
46358+ on the parent and thus need globbing checks on all paths
46359+ */
46360+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
46361+ newglob = GR_NO_GLOB;
46362+
46363+ return __full_lookup(orig_dentry, orig_mnt,
46364+ curr_dentry->d_inode->i_ino,
46365+ __get_dev(curr_dentry), subj, path, newglob);
46366+}
46367+
46368+static struct acl_object_label *
46369+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46370+ const struct acl_subject_label *subj, char *path, const int checkglob)
46371+{
46372+ struct dentry *dentry = (struct dentry *) l_dentry;
46373+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46374+ struct acl_object_label *retval;
46375+
46376+ spin_lock(&dcache_lock);
46377+ spin_lock(&vfsmount_lock);
46378+
46379+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
46380+#ifdef CONFIG_NET
46381+ mnt == sock_mnt ||
46382+#endif
46383+#ifdef CONFIG_HUGETLBFS
46384+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
46385+#endif
46386+ /* ignore Eric Biederman */
46387+ IS_PRIVATE(l_dentry->d_inode))) {
46388+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
46389+ goto out;
46390+ }
46391+
46392+ for (;;) {
46393+ if (dentry == real_root && mnt == real_root_mnt)
46394+ break;
46395+
46396+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46397+ if (mnt->mnt_parent == mnt)
46398+ break;
46399+
46400+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46401+ if (retval != NULL)
46402+ goto out;
46403+
46404+ dentry = mnt->mnt_mountpoint;
46405+ mnt = mnt->mnt_parent;
46406+ continue;
46407+ }
46408+
46409+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46410+ if (retval != NULL)
46411+ goto out;
46412+
46413+ dentry = dentry->d_parent;
46414+ }
46415+
46416+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
46417+
46418+ if (retval == NULL)
46419+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
46420+out:
46421+ spin_unlock(&vfsmount_lock);
46422+ spin_unlock(&dcache_lock);
46423+
46424+ BUG_ON(retval == NULL);
46425+
46426+ return retval;
46427+}
46428+
46429+static __inline__ struct acl_object_label *
46430+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46431+ const struct acl_subject_label *subj)
46432+{
46433+ char *path = NULL;
46434+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
46435+}
46436+
46437+static __inline__ struct acl_object_label *
46438+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46439+ const struct acl_subject_label *subj)
46440+{
46441+ char *path = NULL;
46442+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
46443+}
46444+
46445+static __inline__ struct acl_object_label *
46446+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46447+ const struct acl_subject_label *subj, char *path)
46448+{
46449+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
46450+}
46451+
46452+static struct acl_subject_label *
46453+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
46454+ const struct acl_role_label *role)
46455+{
46456+ struct dentry *dentry = (struct dentry *) l_dentry;
46457+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
46458+ struct acl_subject_label *retval;
46459+
46460+ spin_lock(&dcache_lock);
46461+ spin_lock(&vfsmount_lock);
46462+
46463+ for (;;) {
46464+ if (dentry == real_root && mnt == real_root_mnt)
46465+ break;
46466+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
46467+ if (mnt->mnt_parent == mnt)
46468+ break;
46469+
46470+ read_lock(&gr_inode_lock);
46471+ retval =
46472+ lookup_acl_subj_label(dentry->d_inode->i_ino,
46473+ __get_dev(dentry), role);
46474+ read_unlock(&gr_inode_lock);
46475+ if (retval != NULL)
46476+ goto out;
46477+
46478+ dentry = mnt->mnt_mountpoint;
46479+ mnt = mnt->mnt_parent;
46480+ continue;
46481+ }
46482+
46483+ read_lock(&gr_inode_lock);
46484+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46485+ __get_dev(dentry), role);
46486+ read_unlock(&gr_inode_lock);
46487+ if (retval != NULL)
46488+ goto out;
46489+
46490+ dentry = dentry->d_parent;
46491+ }
46492+
46493+ read_lock(&gr_inode_lock);
46494+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
46495+ __get_dev(dentry), role);
46496+ read_unlock(&gr_inode_lock);
46497+
46498+ if (unlikely(retval == NULL)) {
46499+ read_lock(&gr_inode_lock);
46500+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
46501+ __get_dev(real_root), role);
46502+ read_unlock(&gr_inode_lock);
46503+ }
46504+out:
46505+ spin_unlock(&vfsmount_lock);
46506+ spin_unlock(&dcache_lock);
46507+
46508+ BUG_ON(retval == NULL);
46509+
46510+ return retval;
46511+}
46512+
46513+static void
46514+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
46515+{
46516+ struct task_struct *task = current;
46517+ const struct cred *cred = current_cred();
46518+
46519+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46520+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46521+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46522+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
46523+
46524+ return;
46525+}
46526+
46527+static void
46528+gr_log_learn_sysctl(const char *path, const __u32 mode)
46529+{
46530+ struct task_struct *task = current;
46531+ const struct cred *cred = current_cred();
46532+
46533+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
46534+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46535+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46536+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
46537+
46538+ return;
46539+}
46540+
46541+static void
46542+gr_log_learn_id_change(const char type, const unsigned int real,
46543+ const unsigned int effective, const unsigned int fs)
46544+{
46545+ struct task_struct *task = current;
46546+ const struct cred *cred = current_cred();
46547+
46548+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
46549+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
46550+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
46551+ type, real, effective, fs, &task->signal->saved_ip);
46552+
46553+ return;
46554+}
46555+
46556+__u32
46557+gr_check_link(const struct dentry * new_dentry,
46558+ const struct dentry * parent_dentry,
46559+ const struct vfsmount * parent_mnt,
46560+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
46561+{
46562+ struct acl_object_label *obj;
46563+ __u32 oldmode, newmode;
46564+ __u32 needmode;
46565+
46566+ if (unlikely(!(gr_status & GR_READY)))
46567+ return (GR_CREATE | GR_LINK);
46568+
46569+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
46570+ oldmode = obj->mode;
46571+
46572+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46573+ oldmode |= (GR_CREATE | GR_LINK);
46574+
46575+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
46576+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46577+ needmode |= GR_SETID | GR_AUDIT_SETID;
46578+
46579+ newmode =
46580+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
46581+ oldmode | needmode);
46582+
46583+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
46584+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
46585+ GR_INHERIT | GR_AUDIT_INHERIT);
46586+
46587+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
46588+ goto bad;
46589+
46590+ if ((oldmode & needmode) != needmode)
46591+ goto bad;
46592+
46593+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
46594+ if ((newmode & needmode) != needmode)
46595+ goto bad;
46596+
46597+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
46598+ return newmode;
46599+bad:
46600+ needmode = oldmode;
46601+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
46602+ needmode |= GR_SETID;
46603+
46604+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46605+ gr_log_learn(old_dentry, old_mnt, needmode);
46606+ return (GR_CREATE | GR_LINK);
46607+ } else if (newmode & GR_SUPPRESS)
46608+ return GR_SUPPRESS;
46609+ else
46610+ return 0;
46611+}
46612+
46613+__u32
46614+gr_search_file(const struct dentry * dentry, const __u32 mode,
46615+ const struct vfsmount * mnt)
46616+{
46617+ __u32 retval = mode;
46618+ struct acl_subject_label *curracl;
46619+ struct acl_object_label *currobj;
46620+
46621+ if (unlikely(!(gr_status & GR_READY)))
46622+ return (mode & ~GR_AUDITS);
46623+
46624+ curracl = current->acl;
46625+
46626+ currobj = chk_obj_label(dentry, mnt, curracl);
46627+ retval = currobj->mode & mode;
46628+
46629+ /* if we're opening a specified transfer file for writing
46630+ (e.g. /dev/initctl), then transfer our role to init
46631+ */
46632+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
46633+ current->role->roletype & GR_ROLE_PERSIST)) {
46634+ struct task_struct *task = init_pid_ns.child_reaper;
46635+
46636+ if (task->role != current->role) {
46637+ task->acl_sp_role = 0;
46638+ task->acl_role_id = current->acl_role_id;
46639+ task->role = current->role;
46640+ rcu_read_lock();
46641+ read_lock(&grsec_exec_file_lock);
46642+ gr_apply_subject_to_task(task);
46643+ read_unlock(&grsec_exec_file_lock);
46644+ rcu_read_unlock();
46645+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
46646+ }
46647+ }
46648+
46649+ if (unlikely
46650+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
46651+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
46652+ __u32 new_mode = mode;
46653+
46654+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46655+
46656+ retval = new_mode;
46657+
46658+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
46659+ new_mode |= GR_INHERIT;
46660+
46661+ if (!(mode & GR_NOLEARN))
46662+ gr_log_learn(dentry, mnt, new_mode);
46663+ }
46664+
46665+ return retval;
46666+}
46667+
46668+__u32
46669+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
46670+ const struct vfsmount * mnt, const __u32 mode)
46671+{
46672+ struct name_entry *match;
46673+ struct acl_object_label *matchpo;
46674+ struct acl_subject_label *curracl;
46675+ char *path;
46676+ __u32 retval;
46677+
46678+ if (unlikely(!(gr_status & GR_READY)))
46679+ return (mode & ~GR_AUDITS);
46680+
46681+ preempt_disable();
46682+ path = gr_to_filename_rbac(new_dentry, mnt);
46683+ match = lookup_name_entry_create(path);
46684+
46685+ if (!match)
46686+ goto check_parent;
46687+
46688+ curracl = current->acl;
46689+
46690+ read_lock(&gr_inode_lock);
46691+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
46692+ read_unlock(&gr_inode_lock);
46693+
46694+ if (matchpo) {
46695+ if ((matchpo->mode & mode) !=
46696+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
46697+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
46698+ __u32 new_mode = mode;
46699+
46700+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46701+
46702+ gr_log_learn(new_dentry, mnt, new_mode);
46703+
46704+ preempt_enable();
46705+ return new_mode;
46706+ }
46707+ preempt_enable();
46708+ return (matchpo->mode & mode);
46709+ }
46710+
46711+ check_parent:
46712+ curracl = current->acl;
46713+
46714+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
46715+ retval = matchpo->mode & mode;
46716+
46717+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
46718+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
46719+ __u32 new_mode = mode;
46720+
46721+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
46722+
46723+ gr_log_learn(new_dentry, mnt, new_mode);
46724+ preempt_enable();
46725+ return new_mode;
46726+ }
46727+
46728+ preempt_enable();
46729+ return retval;
46730+}
46731+
46732+int
46733+gr_check_hidden_task(const struct task_struct *task)
46734+{
46735+ if (unlikely(!(gr_status & GR_READY)))
46736+ return 0;
46737+
46738+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
46739+ return 1;
46740+
46741+ return 0;
46742+}
46743+
46744+int
46745+gr_check_protected_task(const struct task_struct *task)
46746+{
46747+ if (unlikely(!(gr_status & GR_READY) || !task))
46748+ return 0;
46749+
46750+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46751+ task->acl != current->acl)
46752+ return 1;
46753+
46754+ return 0;
46755+}
46756+
46757+int
46758+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
46759+{
46760+ struct task_struct *p;
46761+ int ret = 0;
46762+
46763+ if (unlikely(!(gr_status & GR_READY) || !pid))
46764+ return ret;
46765+
46766+ read_lock(&tasklist_lock);
46767+ do_each_pid_task(pid, type, p) {
46768+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
46769+ p->acl != current->acl) {
46770+ ret = 1;
46771+ goto out;
46772+ }
46773+ } while_each_pid_task(pid, type, p);
46774+out:
46775+ read_unlock(&tasklist_lock);
46776+
46777+ return ret;
46778+}
46779+
46780+void
46781+gr_copy_label(struct task_struct *tsk)
46782+{
46783+ tsk->signal->used_accept = 0;
46784+ tsk->acl_sp_role = 0;
46785+ tsk->acl_role_id = current->acl_role_id;
46786+ tsk->acl = current->acl;
46787+ tsk->role = current->role;
46788+ tsk->signal->curr_ip = current->signal->curr_ip;
46789+ tsk->signal->saved_ip = current->signal->saved_ip;
46790+ if (current->exec_file)
46791+ get_file(current->exec_file);
46792+ tsk->exec_file = current->exec_file;
46793+ tsk->is_writable = current->is_writable;
46794+ if (unlikely(current->signal->used_accept)) {
46795+ current->signal->curr_ip = 0;
46796+ current->signal->saved_ip = 0;
46797+ }
46798+
46799+ return;
46800+}
46801+
46802+static void
46803+gr_set_proc_res(struct task_struct *task)
46804+{
46805+ struct acl_subject_label *proc;
46806+ unsigned short i;
46807+
46808+ proc = task->acl;
46809+
46810+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
46811+ return;
46812+
46813+ for (i = 0; i < RLIM_NLIMITS; i++) {
46814+ if (!(proc->resmask & (1 << i)))
46815+ continue;
46816+
46817+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
46818+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
46819+ }
46820+
46821+ return;
46822+}
46823+
46824+extern int __gr_process_user_ban(struct user_struct *user);
46825+
46826+int
46827+gr_check_user_change(int real, int effective, int fs)
46828+{
46829+ unsigned int i;
46830+ __u16 num;
46831+ uid_t *uidlist;
46832+ int curuid;
46833+ int realok = 0;
46834+ int effectiveok = 0;
46835+ int fsok = 0;
46836+
46837+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
46838+ struct user_struct *user;
46839+
46840+ if (real == -1)
46841+ goto skipit;
46842+
46843+ user = find_user(real);
46844+ if (user == NULL)
46845+ goto skipit;
46846+
46847+ if (__gr_process_user_ban(user)) {
46848+ /* for find_user */
46849+ free_uid(user);
46850+ return 1;
46851+ }
46852+
46853+ /* for find_user */
46854+ free_uid(user);
46855+
46856+skipit:
46857+#endif
46858+
46859+ if (unlikely(!(gr_status & GR_READY)))
46860+ return 0;
46861+
46862+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46863+ gr_log_learn_id_change('u', real, effective, fs);
46864+
46865+ num = current->acl->user_trans_num;
46866+ uidlist = current->acl->user_transitions;
46867+
46868+ if (uidlist == NULL)
46869+ return 0;
46870+
46871+ if (real == -1)
46872+ realok = 1;
46873+ if (effective == -1)
46874+ effectiveok = 1;
46875+ if (fs == -1)
46876+ fsok = 1;
46877+
46878+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
46879+ for (i = 0; i < num; i++) {
46880+ curuid = (int)uidlist[i];
46881+ if (real == curuid)
46882+ realok = 1;
46883+ if (effective == curuid)
46884+ effectiveok = 1;
46885+ if (fs == curuid)
46886+ fsok = 1;
46887+ }
46888+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
46889+ for (i = 0; i < num; i++) {
46890+ curuid = (int)uidlist[i];
46891+ if (real == curuid)
46892+ break;
46893+ if (effective == curuid)
46894+ break;
46895+ if (fs == curuid)
46896+ break;
46897+ }
46898+ /* not in deny list */
46899+ if (i == num) {
46900+ realok = 1;
46901+ effectiveok = 1;
46902+ fsok = 1;
46903+ }
46904+ }
46905+
46906+ if (realok && effectiveok && fsok)
46907+ return 0;
46908+ else {
46909+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46910+ return 1;
46911+ }
46912+}
46913+
46914+int
46915+gr_check_group_change(int real, int effective, int fs)
46916+{
46917+ unsigned int i;
46918+ __u16 num;
46919+ gid_t *gidlist;
46920+ int curgid;
46921+ int realok = 0;
46922+ int effectiveok = 0;
46923+ int fsok = 0;
46924+
46925+ if (unlikely(!(gr_status & GR_READY)))
46926+ return 0;
46927+
46928+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
46929+ gr_log_learn_id_change('g', real, effective, fs);
46930+
46931+ num = current->acl->group_trans_num;
46932+ gidlist = current->acl->group_transitions;
46933+
46934+ if (gidlist == NULL)
46935+ return 0;
46936+
46937+ if (real == -1)
46938+ realok = 1;
46939+ if (effective == -1)
46940+ effectiveok = 1;
46941+ if (fs == -1)
46942+ fsok = 1;
46943+
46944+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
46945+ for (i = 0; i < num; i++) {
46946+ curgid = (int)gidlist[i];
46947+ if (real == curgid)
46948+ realok = 1;
46949+ if (effective == curgid)
46950+ effectiveok = 1;
46951+ if (fs == curgid)
46952+ fsok = 1;
46953+ }
46954+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
46955+ for (i = 0; i < num; i++) {
46956+ curgid = (int)gidlist[i];
46957+ if (real == curgid)
46958+ break;
46959+ if (effective == curgid)
46960+ break;
46961+ if (fs == curgid)
46962+ break;
46963+ }
46964+ /* not in deny list */
46965+ if (i == num) {
46966+ realok = 1;
46967+ effectiveok = 1;
46968+ fsok = 1;
46969+ }
46970+ }
46971+
46972+ if (realok && effectiveok && fsok)
46973+ return 0;
46974+ else {
46975+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
46976+ return 1;
46977+ }
46978+}
46979+
46980+void
46981+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
46982+{
46983+ struct acl_role_label *role = task->role;
46984+ struct acl_subject_label *subj = NULL;
46985+ struct acl_object_label *obj;
46986+ struct file *filp;
46987+
46988+ if (unlikely(!(gr_status & GR_READY)))
46989+ return;
46990+
46991+ filp = task->exec_file;
46992+
46993+ /* kernel process, we'll give them the kernel role */
46994+ if (unlikely(!filp)) {
46995+ task->role = kernel_role;
46996+ task->acl = kernel_role->root_label;
46997+ return;
46998+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
46999+ role = lookup_acl_role_label(task, uid, gid);
47000+
47001+ /* perform subject lookup in possibly new role
47002+ we can use this result below in the case where role == task->role
47003+ */
47004+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
47005+
47006+ /* if we changed uid/gid, but result in the same role
47007+ and are using inheritance, don't lose the inherited subject
47008+ if current subject is other than what normal lookup
47009+ would result in, we arrived via inheritance, don't
47010+ lose subject
47011+ */
47012+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
47013+ (subj == task->acl)))
47014+ task->acl = subj;
47015+
47016+ task->role = role;
47017+
47018+ task->is_writable = 0;
47019+
47020+ /* ignore additional mmap checks for processes that are writable
47021+ by the default ACL */
47022+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47023+ if (unlikely(obj->mode & GR_WRITE))
47024+ task->is_writable = 1;
47025+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47026+ if (unlikely(obj->mode & GR_WRITE))
47027+ task->is_writable = 1;
47028+
47029+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47030+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47031+#endif
47032+
47033+ gr_set_proc_res(task);
47034+
47035+ return;
47036+}
47037+
47038+int
47039+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
47040+ const int unsafe_share)
47041+{
47042+ struct task_struct *task = current;
47043+ struct acl_subject_label *newacl;
47044+ struct acl_object_label *obj;
47045+ __u32 retmode;
47046+
47047+ if (unlikely(!(gr_status & GR_READY)))
47048+ return 0;
47049+
47050+ newacl = chk_subj_label(dentry, mnt, task->role);
47051+
47052+ task_lock(task);
47053+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
47054+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
47055+ !(task->role->roletype & GR_ROLE_GOD) &&
47056+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
47057+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
47058+ task_unlock(task);
47059+ if (unsafe_share)
47060+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
47061+ else
47062+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
47063+ return -EACCES;
47064+ }
47065+ task_unlock(task);
47066+
47067+ obj = chk_obj_label(dentry, mnt, task->acl);
47068+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
47069+
47070+ if (!(task->acl->mode & GR_INHERITLEARN) &&
47071+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
47072+ if (obj->nested)
47073+ task->acl = obj->nested;
47074+ else
47075+ task->acl = newacl;
47076+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
47077+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
47078+
47079+ task->is_writable = 0;
47080+
47081+ /* ignore additional mmap checks for processes that are writable
47082+ by the default ACL */
47083+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
47084+ if (unlikely(obj->mode & GR_WRITE))
47085+ task->is_writable = 1;
47086+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
47087+ if (unlikely(obj->mode & GR_WRITE))
47088+ task->is_writable = 1;
47089+
47090+ gr_set_proc_res(task);
47091+
47092+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47093+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47094+#endif
47095+ return 0;
47096+}
47097+
47098+/* always called with valid inodev ptr */
47099+static void
47100+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
47101+{
47102+ struct acl_object_label *matchpo;
47103+ struct acl_subject_label *matchps;
47104+ struct acl_subject_label *subj;
47105+ struct acl_role_label *role;
47106+ unsigned int x;
47107+
47108+ FOR_EACH_ROLE_START(role)
47109+ FOR_EACH_SUBJECT_START(role, subj, x)
47110+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
47111+ matchpo->mode |= GR_DELETED;
47112+ FOR_EACH_SUBJECT_END(subj,x)
47113+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
47114+ if (subj->inode == ino && subj->device == dev)
47115+ subj->mode |= GR_DELETED;
47116+ FOR_EACH_NESTED_SUBJECT_END(subj)
47117+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
47118+ matchps->mode |= GR_DELETED;
47119+ FOR_EACH_ROLE_END(role)
47120+
47121+ inodev->nentry->deleted = 1;
47122+
47123+ return;
47124+}
47125+
47126+void
47127+gr_handle_delete(const ino_t ino, const dev_t dev)
47128+{
47129+ struct inodev_entry *inodev;
47130+
47131+ if (unlikely(!(gr_status & GR_READY)))
47132+ return;
47133+
47134+ write_lock(&gr_inode_lock);
47135+ inodev = lookup_inodev_entry(ino, dev);
47136+ if (inodev != NULL)
47137+ do_handle_delete(inodev, ino, dev);
47138+ write_unlock(&gr_inode_lock);
47139+
47140+ return;
47141+}
47142+
47143+static void
47144+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
47145+ const ino_t newinode, const dev_t newdevice,
47146+ struct acl_subject_label *subj)
47147+{
47148+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
47149+ struct acl_object_label *match;
47150+
47151+ match = subj->obj_hash[index];
47152+
47153+ while (match && (match->inode != oldinode ||
47154+ match->device != olddevice ||
47155+ !(match->mode & GR_DELETED)))
47156+ match = match->next;
47157+
47158+ if (match && (match->inode == oldinode)
47159+ && (match->device == olddevice)
47160+ && (match->mode & GR_DELETED)) {
47161+ if (match->prev == NULL) {
47162+ subj->obj_hash[index] = match->next;
47163+ if (match->next != NULL)
47164+ match->next->prev = NULL;
47165+ } else {
47166+ match->prev->next = match->next;
47167+ if (match->next != NULL)
47168+ match->next->prev = match->prev;
47169+ }
47170+ match->prev = NULL;
47171+ match->next = NULL;
47172+ match->inode = newinode;
47173+ match->device = newdevice;
47174+ match->mode &= ~GR_DELETED;
47175+
47176+ insert_acl_obj_label(match, subj);
47177+ }
47178+
47179+ return;
47180+}
47181+
47182+static void
47183+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
47184+ const ino_t newinode, const dev_t newdevice,
47185+ struct acl_role_label *role)
47186+{
47187+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
47188+ struct acl_subject_label *match;
47189+
47190+ match = role->subj_hash[index];
47191+
47192+ while (match && (match->inode != oldinode ||
47193+ match->device != olddevice ||
47194+ !(match->mode & GR_DELETED)))
47195+ match = match->next;
47196+
47197+ if (match && (match->inode == oldinode)
47198+ && (match->device == olddevice)
47199+ && (match->mode & GR_DELETED)) {
47200+ if (match->prev == NULL) {
47201+ role->subj_hash[index] = match->next;
47202+ if (match->next != NULL)
47203+ match->next->prev = NULL;
47204+ } else {
47205+ match->prev->next = match->next;
47206+ if (match->next != NULL)
47207+ match->next->prev = match->prev;
47208+ }
47209+ match->prev = NULL;
47210+ match->next = NULL;
47211+ match->inode = newinode;
47212+ match->device = newdevice;
47213+ match->mode &= ~GR_DELETED;
47214+
47215+ insert_acl_subj_label(match, role);
47216+ }
47217+
47218+ return;
47219+}
47220+
47221+static void
47222+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
47223+ const ino_t newinode, const dev_t newdevice)
47224+{
47225+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
47226+ struct inodev_entry *match;
47227+
47228+ match = inodev_set.i_hash[index];
47229+
47230+ while (match && (match->nentry->inode != oldinode ||
47231+ match->nentry->device != olddevice || !match->nentry->deleted))
47232+ match = match->next;
47233+
47234+ if (match && (match->nentry->inode == oldinode)
47235+ && (match->nentry->device == olddevice) &&
47236+ match->nentry->deleted) {
47237+ if (match->prev == NULL) {
47238+ inodev_set.i_hash[index] = match->next;
47239+ if (match->next != NULL)
47240+ match->next->prev = NULL;
47241+ } else {
47242+ match->prev->next = match->next;
47243+ if (match->next != NULL)
47244+ match->next->prev = match->prev;
47245+ }
47246+ match->prev = NULL;
47247+ match->next = NULL;
47248+ match->nentry->inode = newinode;
47249+ match->nentry->device = newdevice;
47250+ match->nentry->deleted = 0;
47251+
47252+ insert_inodev_entry(match);
47253+ }
47254+
47255+ return;
47256+}
47257+
47258+static void
47259+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
47260+ const struct vfsmount *mnt)
47261+{
47262+ struct acl_subject_label *subj;
47263+ struct acl_role_label *role;
47264+ unsigned int x;
47265+ ino_t inode = dentry->d_inode->i_ino;
47266+ dev_t dev = __get_dev(dentry);
47267+
47268+ FOR_EACH_ROLE_START(role)
47269+ update_acl_subj_label(matchn->inode, matchn->device,
47270+ inode, dev, role);
47271+
47272+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
47273+ if ((subj->inode == inode) && (subj->device == dev)) {
47274+ subj->inode = inode;
47275+ subj->device = dev;
47276+ }
47277+ FOR_EACH_NESTED_SUBJECT_END(subj)
47278+ FOR_EACH_SUBJECT_START(role, subj, x)
47279+ update_acl_obj_label(matchn->inode, matchn->device,
47280+ inode, dev, subj);
47281+ FOR_EACH_SUBJECT_END(subj,x)
47282+ FOR_EACH_ROLE_END(role)
47283+
47284+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
47285+
47286+ return;
47287+}
47288+
47289+void
47290+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
47291+{
47292+ struct name_entry *matchn;
47293+
47294+ if (unlikely(!(gr_status & GR_READY)))
47295+ return;
47296+
47297+ preempt_disable();
47298+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
47299+
47300+ if (unlikely((unsigned long)matchn)) {
47301+ write_lock(&gr_inode_lock);
47302+ do_handle_create(matchn, dentry, mnt);
47303+ write_unlock(&gr_inode_lock);
47304+ }
47305+ preempt_enable();
47306+
47307+ return;
47308+}
47309+
47310+void
47311+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
47312+ struct dentry *old_dentry,
47313+ struct dentry *new_dentry,
47314+ struct vfsmount *mnt, const __u8 replace)
47315+{
47316+ struct name_entry *matchn;
47317+ struct inodev_entry *inodev;
47318+ ino_t oldinode = old_dentry->d_inode->i_ino;
47319+ dev_t olddev = __get_dev(old_dentry);
47320+
47321+ /* vfs_rename swaps the name and parent link for old_dentry and
47322+ new_dentry
47323+ at this point, old_dentry has the new name, parent link, and inode
47324+ for the renamed file
47325+ if a file is being replaced by a rename, new_dentry has the inode
47326+ and name for the replaced file
47327+ */
47328+
47329+ if (unlikely(!(gr_status & GR_READY)))
47330+ return;
47331+
47332+ preempt_disable();
47333+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
47334+
47335+ /* we wouldn't have to check d_inode if it weren't for
47336+ NFS silly-renaming
47337+ */
47338+
47339+ write_lock(&gr_inode_lock);
47340+ if (unlikely(replace && new_dentry->d_inode)) {
47341+ ino_t newinode = new_dentry->d_inode->i_ino;
47342+ dev_t newdev = __get_dev(new_dentry);
47343+ inodev = lookup_inodev_entry(newinode, newdev);
47344+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
47345+ do_handle_delete(inodev, newinode, newdev);
47346+ }
47347+
47348+ inodev = lookup_inodev_entry(oldinode, olddev);
47349+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
47350+ do_handle_delete(inodev, oldinode, olddev);
47351+
47352+ if (unlikely((unsigned long)matchn))
47353+ do_handle_create(matchn, old_dentry, mnt);
47354+
47355+ write_unlock(&gr_inode_lock);
47356+ preempt_enable();
47357+
47358+ return;
47359+}
47360+
47361+static int
47362+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
47363+ unsigned char **sum)
47364+{
47365+ struct acl_role_label *r;
47366+ struct role_allowed_ip *ipp;
47367+ struct role_transition *trans;
47368+ unsigned int i;
47369+ int found = 0;
47370+ u32 curr_ip = current->signal->curr_ip;
47371+
47372+ current->signal->saved_ip = curr_ip;
47373+
47374+ /* check transition table */
47375+
47376+ for (trans = current->role->transitions; trans; trans = trans->next) {
47377+ if (!strcmp(rolename, trans->rolename)) {
47378+ found = 1;
47379+ break;
47380+ }
47381+ }
47382+
47383+ if (!found)
47384+ return 0;
47385+
47386+ /* handle special roles that do not require authentication
47387+ and check ip */
47388+
47389+ FOR_EACH_ROLE_START(r)
47390+ if (!strcmp(rolename, r->rolename) &&
47391+ (r->roletype & GR_ROLE_SPECIAL)) {
47392+ found = 0;
47393+ if (r->allowed_ips != NULL) {
47394+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
47395+ if ((ntohl(curr_ip) & ipp->netmask) ==
47396+ (ntohl(ipp->addr) & ipp->netmask))
47397+ found = 1;
47398+ }
47399+ } else
47400+ found = 2;
47401+ if (!found)
47402+ return 0;
47403+
47404+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
47405+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
47406+ *salt = NULL;
47407+ *sum = NULL;
47408+ return 1;
47409+ }
47410+ }
47411+ FOR_EACH_ROLE_END(r)
47412+
47413+ for (i = 0; i < num_sprole_pws; i++) {
47414+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
47415+ *salt = acl_special_roles[i]->salt;
47416+ *sum = acl_special_roles[i]->sum;
47417+ return 1;
47418+ }
47419+ }
47420+
47421+ return 0;
47422+}
47423+
47424+static void
47425+assign_special_role(char *rolename)
47426+{
47427+ struct acl_object_label *obj;
47428+ struct acl_role_label *r;
47429+ struct acl_role_label *assigned = NULL;
47430+ struct task_struct *tsk;
47431+ struct file *filp;
47432+
47433+ FOR_EACH_ROLE_START(r)
47434+ if (!strcmp(rolename, r->rolename) &&
47435+ (r->roletype & GR_ROLE_SPECIAL)) {
47436+ assigned = r;
47437+ break;
47438+ }
47439+ FOR_EACH_ROLE_END(r)
47440+
47441+ if (!assigned)
47442+ return;
47443+
47444+ read_lock(&tasklist_lock);
47445+ read_lock(&grsec_exec_file_lock);
47446+
47447+ tsk = current->real_parent;
47448+ if (tsk == NULL)
47449+ goto out_unlock;
47450+
47451+ filp = tsk->exec_file;
47452+ if (filp == NULL)
47453+ goto out_unlock;
47454+
47455+ tsk->is_writable = 0;
47456+
47457+ tsk->acl_sp_role = 1;
47458+ tsk->acl_role_id = ++acl_sp_role_value;
47459+ tsk->role = assigned;
47460+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
47461+
47462+ /* ignore additional mmap checks for processes that are writable
47463+ by the default ACL */
47464+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47465+ if (unlikely(obj->mode & GR_WRITE))
47466+ tsk->is_writable = 1;
47467+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
47468+ if (unlikely(obj->mode & GR_WRITE))
47469+ tsk->is_writable = 1;
47470+
47471+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47472+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
47473+#endif
47474+
47475+out_unlock:
47476+ read_unlock(&grsec_exec_file_lock);
47477+ read_unlock(&tasklist_lock);
47478+ return;
47479+}
47480+
47481+int gr_check_secure_terminal(struct task_struct *task)
47482+{
47483+ struct task_struct *p, *p2, *p3;
47484+ struct files_struct *files;
47485+ struct fdtable *fdt;
47486+ struct file *our_file = NULL, *file;
47487+ int i;
47488+
47489+ if (task->signal->tty == NULL)
47490+ return 1;
47491+
47492+ files = get_files_struct(task);
47493+ if (files != NULL) {
47494+ rcu_read_lock();
47495+ fdt = files_fdtable(files);
47496+ for (i=0; i < fdt->max_fds; i++) {
47497+ file = fcheck_files(files, i);
47498+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
47499+ get_file(file);
47500+ our_file = file;
47501+ }
47502+ }
47503+ rcu_read_unlock();
47504+ put_files_struct(files);
47505+ }
47506+
47507+ if (our_file == NULL)
47508+ return 1;
47509+
47510+ read_lock(&tasklist_lock);
47511+ do_each_thread(p2, p) {
47512+ files = get_files_struct(p);
47513+ if (files == NULL ||
47514+ (p->signal && p->signal->tty == task->signal->tty)) {
47515+ if (files != NULL)
47516+ put_files_struct(files);
47517+ continue;
47518+ }
47519+ rcu_read_lock();
47520+ fdt = files_fdtable(files);
47521+ for (i=0; i < fdt->max_fds; i++) {
47522+ file = fcheck_files(files, i);
47523+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
47524+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
47525+ p3 = task;
47526+ while (p3->pid > 0) {
47527+ if (p3 == p)
47528+ break;
47529+ p3 = p3->real_parent;
47530+ }
47531+ if (p3 == p)
47532+ break;
47533+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
47534+ gr_handle_alertkill(p);
47535+ rcu_read_unlock();
47536+ put_files_struct(files);
47537+ read_unlock(&tasklist_lock);
47538+ fput(our_file);
47539+ return 0;
47540+ }
47541+ }
47542+ rcu_read_unlock();
47543+ put_files_struct(files);
47544+ } while_each_thread(p2, p);
47545+ read_unlock(&tasklist_lock);
47546+
47547+ fput(our_file);
47548+ return 1;
47549+}
47550+
47551+ssize_t
47552+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
47553+{
47554+ struct gr_arg_wrapper uwrap;
47555+ unsigned char *sprole_salt = NULL;
47556+ unsigned char *sprole_sum = NULL;
47557+ int error = sizeof (struct gr_arg_wrapper);
47558+ int error2 = 0;
47559+
47560+ mutex_lock(&gr_dev_mutex);
47561+
47562+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
47563+ error = -EPERM;
47564+ goto out;
47565+ }
47566+
47567+ if (count != sizeof (struct gr_arg_wrapper)) {
47568+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
47569+ error = -EINVAL;
47570+ goto out;
47571+ }
47572+
47573+
47574+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
47575+ gr_auth_expires = 0;
47576+ gr_auth_attempts = 0;
47577+ }
47578+
47579+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
47580+ error = -EFAULT;
47581+ goto out;
47582+ }
47583+
47584+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
47585+ error = -EINVAL;
47586+ goto out;
47587+ }
47588+
47589+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
47590+ error = -EFAULT;
47591+ goto out;
47592+ }
47593+
47594+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47595+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47596+ time_after(gr_auth_expires, get_seconds())) {
47597+ error = -EBUSY;
47598+ goto out;
47599+ }
47600+
47601+ /* if non-root trying to do anything other than use a special role,
47602+ do not attempt authentication, do not count towards authentication
47603+ locking
47604+ */
47605+
47606+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
47607+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
47608+ current_uid()) {
47609+ error = -EPERM;
47610+ goto out;
47611+ }
47612+
47613+ /* ensure pw and special role name are null terminated */
47614+
47615+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
47616+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
47617+
47618+ /* Okay.
47619+ * We have our enough of the argument structure..(we have yet
47620+ * to copy_from_user the tables themselves) . Copy the tables
47621+ * only if we need them, i.e. for loading operations. */
47622+
47623+ switch (gr_usermode->mode) {
47624+ case GR_STATUS:
47625+ if (gr_status & GR_READY) {
47626+ error = 1;
47627+ if (!gr_check_secure_terminal(current))
47628+ error = 3;
47629+ } else
47630+ error = 2;
47631+ goto out;
47632+ case GR_SHUTDOWN:
47633+ if ((gr_status & GR_READY)
47634+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47635+ pax_open_kernel();
47636+ gr_status &= ~GR_READY;
47637+ pax_close_kernel();
47638+
47639+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
47640+ free_variables();
47641+ memset(gr_usermode, 0, sizeof (struct gr_arg));
47642+ memset(gr_system_salt, 0, GR_SALT_LEN);
47643+ memset(gr_system_sum, 0, GR_SHA_LEN);
47644+ } else if (gr_status & GR_READY) {
47645+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
47646+ error = -EPERM;
47647+ } else {
47648+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
47649+ error = -EAGAIN;
47650+ }
47651+ break;
47652+ case GR_ENABLE:
47653+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
47654+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
47655+ else {
47656+ if (gr_status & GR_READY)
47657+ error = -EAGAIN;
47658+ else
47659+ error = error2;
47660+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
47661+ }
47662+ break;
47663+ case GR_RELOAD:
47664+ if (!(gr_status & GR_READY)) {
47665+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
47666+ error = -EAGAIN;
47667+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47668+ lock_kernel();
47669+
47670+ pax_open_kernel();
47671+ gr_status &= ~GR_READY;
47672+ pax_close_kernel();
47673+
47674+ free_variables();
47675+ if (!(error2 = gracl_init(gr_usermode))) {
47676+ unlock_kernel();
47677+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
47678+ } else {
47679+ unlock_kernel();
47680+ error = error2;
47681+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47682+ }
47683+ } else {
47684+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
47685+ error = -EPERM;
47686+ }
47687+ break;
47688+ case GR_SEGVMOD:
47689+ if (unlikely(!(gr_status & GR_READY))) {
47690+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
47691+ error = -EAGAIN;
47692+ break;
47693+ }
47694+
47695+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
47696+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
47697+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
47698+ struct acl_subject_label *segvacl;
47699+ segvacl =
47700+ lookup_acl_subj_label(gr_usermode->segv_inode,
47701+ gr_usermode->segv_device,
47702+ current->role);
47703+ if (segvacl) {
47704+ segvacl->crashes = 0;
47705+ segvacl->expires = 0;
47706+ }
47707+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
47708+ gr_remove_uid(gr_usermode->segv_uid);
47709+ }
47710+ } else {
47711+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
47712+ error = -EPERM;
47713+ }
47714+ break;
47715+ case GR_SPROLE:
47716+ case GR_SPROLEPAM:
47717+ if (unlikely(!(gr_status & GR_READY))) {
47718+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
47719+ error = -EAGAIN;
47720+ break;
47721+ }
47722+
47723+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
47724+ current->role->expires = 0;
47725+ current->role->auth_attempts = 0;
47726+ }
47727+
47728+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
47729+ time_after(current->role->expires, get_seconds())) {
47730+ error = -EBUSY;
47731+ goto out;
47732+ }
47733+
47734+ if (lookup_special_role_auth
47735+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
47736+ && ((!sprole_salt && !sprole_sum)
47737+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
47738+ char *p = "";
47739+ assign_special_role(gr_usermode->sp_role);
47740+ read_lock(&tasklist_lock);
47741+ if (current->real_parent)
47742+ p = current->real_parent->role->rolename;
47743+ read_unlock(&tasklist_lock);
47744+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
47745+ p, acl_sp_role_value);
47746+ } else {
47747+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
47748+ error = -EPERM;
47749+ if(!(current->role->auth_attempts++))
47750+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47751+
47752+ goto out;
47753+ }
47754+ break;
47755+ case GR_UNSPROLE:
47756+ if (unlikely(!(gr_status & GR_READY))) {
47757+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
47758+ error = -EAGAIN;
47759+ break;
47760+ }
47761+
47762+ if (current->role->roletype & GR_ROLE_SPECIAL) {
47763+ char *p = "";
47764+ int i = 0;
47765+
47766+ read_lock(&tasklist_lock);
47767+ if (current->real_parent) {
47768+ p = current->real_parent->role->rolename;
47769+ i = current->real_parent->acl_role_id;
47770+ }
47771+ read_unlock(&tasklist_lock);
47772+
47773+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
47774+ gr_set_acls(1);
47775+ } else {
47776+ error = -EPERM;
47777+ goto out;
47778+ }
47779+ break;
47780+ default:
47781+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
47782+ error = -EINVAL;
47783+ break;
47784+ }
47785+
47786+ if (error != -EPERM)
47787+ goto out;
47788+
47789+ if(!(gr_auth_attempts++))
47790+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
47791+
47792+ out:
47793+ mutex_unlock(&gr_dev_mutex);
47794+ return error;
47795+}
47796+
47797+/* must be called with
47798+ rcu_read_lock();
47799+ read_lock(&tasklist_lock);
47800+ read_lock(&grsec_exec_file_lock);
47801+*/
47802+int gr_apply_subject_to_task(struct task_struct *task)
47803+{
47804+ struct acl_object_label *obj;
47805+ char *tmpname;
47806+ struct acl_subject_label *tmpsubj;
47807+ struct file *filp;
47808+ struct name_entry *nmatch;
47809+
47810+ filp = task->exec_file;
47811+ if (filp == NULL)
47812+ return 0;
47813+
47814+ /* the following is to apply the correct subject
47815+ on binaries running when the RBAC system
47816+ is enabled, when the binaries have been
47817+ replaced or deleted since their execution
47818+ -----
47819+ when the RBAC system starts, the inode/dev
47820+ from exec_file will be one the RBAC system
47821+ is unaware of. It only knows the inode/dev
47822+ of the present file on disk, or the absence
47823+ of it.
47824+ */
47825+ preempt_disable();
47826+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
47827+
47828+ nmatch = lookup_name_entry(tmpname);
47829+ preempt_enable();
47830+ tmpsubj = NULL;
47831+ if (nmatch) {
47832+ if (nmatch->deleted)
47833+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
47834+ else
47835+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
47836+ if (tmpsubj != NULL)
47837+ task->acl = tmpsubj;
47838+ }
47839+ if (tmpsubj == NULL)
47840+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
47841+ task->role);
47842+ if (task->acl) {
47843+ task->is_writable = 0;
47844+ /* ignore additional mmap checks for processes that are writable
47845+ by the default ACL */
47846+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
47847+ if (unlikely(obj->mode & GR_WRITE))
47848+ task->is_writable = 1;
47849+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
47850+ if (unlikely(obj->mode & GR_WRITE))
47851+ task->is_writable = 1;
47852+
47853+ gr_set_proc_res(task);
47854+
47855+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
47856+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
47857+#endif
47858+ } else {
47859+ return 1;
47860+ }
47861+
47862+ return 0;
47863+}
47864+
47865+int
47866+gr_set_acls(const int type)
47867+{
47868+ struct task_struct *task, *task2;
47869+ struct acl_role_label *role = current->role;
47870+ __u16 acl_role_id = current->acl_role_id;
47871+ const struct cred *cred;
47872+ int ret;
47873+
47874+ rcu_read_lock();
47875+ read_lock(&tasklist_lock);
47876+ read_lock(&grsec_exec_file_lock);
47877+ do_each_thread(task2, task) {
47878+ /* check to see if we're called from the exit handler,
47879+ if so, only replace ACLs that have inherited the admin
47880+ ACL */
47881+
47882+ if (type && (task->role != role ||
47883+ task->acl_role_id != acl_role_id))
47884+ continue;
47885+
47886+ task->acl_role_id = 0;
47887+ task->acl_sp_role = 0;
47888+
47889+ if (task->exec_file) {
47890+ cred = __task_cred(task);
47891+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
47892+
47893+ ret = gr_apply_subject_to_task(task);
47894+ if (ret) {
47895+ read_unlock(&grsec_exec_file_lock);
47896+ read_unlock(&tasklist_lock);
47897+ rcu_read_unlock();
47898+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
47899+ return ret;
47900+ }
47901+ } else {
47902+ // it's a kernel process
47903+ task->role = kernel_role;
47904+ task->acl = kernel_role->root_label;
47905+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
47906+ task->acl->mode &= ~GR_PROCFIND;
47907+#endif
47908+ }
47909+ } while_each_thread(task2, task);
47910+ read_unlock(&grsec_exec_file_lock);
47911+ read_unlock(&tasklist_lock);
47912+ rcu_read_unlock();
47913+
47914+ return 0;
47915+}
47916+
47917+void
47918+gr_learn_resource(const struct task_struct *task,
47919+ const int res, const unsigned long wanted, const int gt)
47920+{
47921+ struct acl_subject_label *acl;
47922+ const struct cred *cred;
47923+
47924+ if (unlikely((gr_status & GR_READY) &&
47925+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
47926+ goto skip_reslog;
47927+
47928+#ifdef CONFIG_GRKERNSEC_RESLOG
47929+ gr_log_resource(task, res, wanted, gt);
47930+#endif
47931+ skip_reslog:
47932+
47933+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
47934+ return;
47935+
47936+ acl = task->acl;
47937+
47938+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
47939+ !(acl->resmask & (1 << (unsigned short) res))))
47940+ return;
47941+
47942+ if (wanted >= acl->res[res].rlim_cur) {
47943+ unsigned long res_add;
47944+
47945+ res_add = wanted;
47946+ switch (res) {
47947+ case RLIMIT_CPU:
47948+ res_add += GR_RLIM_CPU_BUMP;
47949+ break;
47950+ case RLIMIT_FSIZE:
47951+ res_add += GR_RLIM_FSIZE_BUMP;
47952+ break;
47953+ case RLIMIT_DATA:
47954+ res_add += GR_RLIM_DATA_BUMP;
47955+ break;
47956+ case RLIMIT_STACK:
47957+ res_add += GR_RLIM_STACK_BUMP;
47958+ break;
47959+ case RLIMIT_CORE:
47960+ res_add += GR_RLIM_CORE_BUMP;
47961+ break;
47962+ case RLIMIT_RSS:
47963+ res_add += GR_RLIM_RSS_BUMP;
47964+ break;
47965+ case RLIMIT_NPROC:
47966+ res_add += GR_RLIM_NPROC_BUMP;
47967+ break;
47968+ case RLIMIT_NOFILE:
47969+ res_add += GR_RLIM_NOFILE_BUMP;
47970+ break;
47971+ case RLIMIT_MEMLOCK:
47972+ res_add += GR_RLIM_MEMLOCK_BUMP;
47973+ break;
47974+ case RLIMIT_AS:
47975+ res_add += GR_RLIM_AS_BUMP;
47976+ break;
47977+ case RLIMIT_LOCKS:
47978+ res_add += GR_RLIM_LOCKS_BUMP;
47979+ break;
47980+ case RLIMIT_SIGPENDING:
47981+ res_add += GR_RLIM_SIGPENDING_BUMP;
47982+ break;
47983+ case RLIMIT_MSGQUEUE:
47984+ res_add += GR_RLIM_MSGQUEUE_BUMP;
47985+ break;
47986+ case RLIMIT_NICE:
47987+ res_add += GR_RLIM_NICE_BUMP;
47988+ break;
47989+ case RLIMIT_RTPRIO:
47990+ res_add += GR_RLIM_RTPRIO_BUMP;
47991+ break;
47992+ case RLIMIT_RTTIME:
47993+ res_add += GR_RLIM_RTTIME_BUMP;
47994+ break;
47995+ }
47996+
47997+ acl->res[res].rlim_cur = res_add;
47998+
47999+ if (wanted > acl->res[res].rlim_max)
48000+ acl->res[res].rlim_max = res_add;
48001+
48002+ /* only log the subject filename, since resource logging is supported for
48003+ single-subject learning only */
48004+ rcu_read_lock();
48005+ cred = __task_cred(task);
48006+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48007+ task->role->roletype, cred->uid, cred->gid, acl->filename,
48008+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
48009+ "", (unsigned long) res, &task->signal->saved_ip);
48010+ rcu_read_unlock();
48011+ }
48012+
48013+ return;
48014+}
48015+
48016+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
48017+void
48018+pax_set_initial_flags(struct linux_binprm *bprm)
48019+{
48020+ struct task_struct *task = current;
48021+ struct acl_subject_label *proc;
48022+ unsigned long flags;
48023+
48024+ if (unlikely(!(gr_status & GR_READY)))
48025+ return;
48026+
48027+ flags = pax_get_flags(task);
48028+
48029+ proc = task->acl;
48030+
48031+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
48032+ flags &= ~MF_PAX_PAGEEXEC;
48033+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
48034+ flags &= ~MF_PAX_SEGMEXEC;
48035+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
48036+ flags &= ~MF_PAX_RANDMMAP;
48037+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
48038+ flags &= ~MF_PAX_EMUTRAMP;
48039+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
48040+ flags &= ~MF_PAX_MPROTECT;
48041+
48042+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
48043+ flags |= MF_PAX_PAGEEXEC;
48044+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
48045+ flags |= MF_PAX_SEGMEXEC;
48046+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
48047+ flags |= MF_PAX_RANDMMAP;
48048+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
48049+ flags |= MF_PAX_EMUTRAMP;
48050+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
48051+ flags |= MF_PAX_MPROTECT;
48052+
48053+ pax_set_flags(task, flags);
48054+
48055+ return;
48056+}
48057+#endif
48058+
48059+#ifdef CONFIG_SYSCTL
48060+/* Eric Biederman likes breaking userland ABI and every inode-based security
48061+ system to save 35kb of memory */
48062+
48063+/* we modify the passed in filename, but adjust it back before returning */
48064+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
48065+{
48066+ struct name_entry *nmatch;
48067+ char *p, *lastp = NULL;
48068+ struct acl_object_label *obj = NULL, *tmp;
48069+ struct acl_subject_label *tmpsubj;
48070+ char c = '\0';
48071+
48072+ read_lock(&gr_inode_lock);
48073+
48074+ p = name + len - 1;
48075+ do {
48076+ nmatch = lookup_name_entry(name);
48077+ if (lastp != NULL)
48078+ *lastp = c;
48079+
48080+ if (nmatch == NULL)
48081+ goto next_component;
48082+ tmpsubj = current->acl;
48083+ do {
48084+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
48085+ if (obj != NULL) {
48086+ tmp = obj->globbed;
48087+ while (tmp) {
48088+ if (!glob_match(tmp->filename, name)) {
48089+ obj = tmp;
48090+ goto found_obj;
48091+ }
48092+ tmp = tmp->next;
48093+ }
48094+ goto found_obj;
48095+ }
48096+ } while ((tmpsubj = tmpsubj->parent_subject));
48097+next_component:
48098+ /* end case */
48099+ if (p == name)
48100+ break;
48101+
48102+ while (*p != '/')
48103+ p--;
48104+ if (p == name)
48105+ lastp = p + 1;
48106+ else {
48107+ lastp = p;
48108+ p--;
48109+ }
48110+ c = *lastp;
48111+ *lastp = '\0';
48112+ } while (1);
48113+found_obj:
48114+ read_unlock(&gr_inode_lock);
48115+ /* obj returned will always be non-null */
48116+ return obj;
48117+}
48118+
48119+/* returns 0 when allowing, non-zero on error
48120+ op of 0 is used for readdir, so we don't log the names of hidden files
48121+*/
48122+__u32
48123+gr_handle_sysctl(const struct ctl_table *table, const int op)
48124+{
48125+ ctl_table *tmp;
48126+ const char *proc_sys = "/proc/sys";
48127+ char *path;
48128+ struct acl_object_label *obj;
48129+ unsigned short len = 0, pos = 0, depth = 0, i;
48130+ __u32 err = 0;
48131+ __u32 mode = 0;
48132+
48133+ if (unlikely(!(gr_status & GR_READY)))
48134+ return 0;
48135+
48136+ /* for now, ignore operations on non-sysctl entries if it's not a
48137+ readdir*/
48138+ if (table->child != NULL && op != 0)
48139+ return 0;
48140+
48141+ mode |= GR_FIND;
48142+ /* it's only a read if it's an entry, read on dirs is for readdir */
48143+ if (op & MAY_READ)
48144+ mode |= GR_READ;
48145+ if (op & MAY_WRITE)
48146+ mode |= GR_WRITE;
48147+
48148+ preempt_disable();
48149+
48150+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
48151+
48152+ /* it's only a read/write if it's an actual entry, not a dir
48153+ (which are opened for readdir)
48154+ */
48155+
48156+ /* convert the requested sysctl entry into a pathname */
48157+
48158+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48159+ len += strlen(tmp->procname);
48160+ len++;
48161+ depth++;
48162+ }
48163+
48164+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
48165+ /* deny */
48166+ goto out;
48167+ }
48168+
48169+ memset(path, 0, PAGE_SIZE);
48170+
48171+ memcpy(path, proc_sys, strlen(proc_sys));
48172+
48173+ pos += strlen(proc_sys);
48174+
48175+ for (; depth > 0; depth--) {
48176+ path[pos] = '/';
48177+ pos++;
48178+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
48179+ if (depth == i) {
48180+ memcpy(path + pos, tmp->procname,
48181+ strlen(tmp->procname));
48182+ pos += strlen(tmp->procname);
48183+ }
48184+ i++;
48185+ }
48186+ }
48187+
48188+ obj = gr_lookup_by_name(path, pos);
48189+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
48190+
48191+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
48192+ ((err & mode) != mode))) {
48193+ __u32 new_mode = mode;
48194+
48195+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
48196+
48197+ err = 0;
48198+ gr_log_learn_sysctl(path, new_mode);
48199+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
48200+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
48201+ err = -ENOENT;
48202+ } else if (!(err & GR_FIND)) {
48203+ err = -ENOENT;
48204+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
48205+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
48206+ path, (mode & GR_READ) ? " reading" : "",
48207+ (mode & GR_WRITE) ? " writing" : "");
48208+ err = -EACCES;
48209+ } else if ((err & mode) != mode) {
48210+ err = -EACCES;
48211+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
48212+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
48213+ path, (mode & GR_READ) ? " reading" : "",
48214+ (mode & GR_WRITE) ? " writing" : "");
48215+ err = 0;
48216+ } else
48217+ err = 0;
48218+
48219+ out:
48220+ preempt_enable();
48221+
48222+ return err;
48223+}
48224+#endif
48225+
48226+int
48227+gr_handle_proc_ptrace(struct task_struct *task)
48228+{
48229+ struct file *filp;
48230+ struct task_struct *tmp = task;
48231+ struct task_struct *curtemp = current;
48232+ __u32 retmode;
48233+
48234+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48235+ if (unlikely(!(gr_status & GR_READY)))
48236+ return 0;
48237+#endif
48238+
48239+ read_lock(&tasklist_lock);
48240+ read_lock(&grsec_exec_file_lock);
48241+ filp = task->exec_file;
48242+
48243+ while (tmp->pid > 0) {
48244+ if (tmp == curtemp)
48245+ break;
48246+ tmp = tmp->real_parent;
48247+ }
48248+
48249+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48250+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
48251+ read_unlock(&grsec_exec_file_lock);
48252+ read_unlock(&tasklist_lock);
48253+ return 1;
48254+ }
48255+
48256+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48257+ if (!(gr_status & GR_READY)) {
48258+ read_unlock(&grsec_exec_file_lock);
48259+ read_unlock(&tasklist_lock);
48260+ return 0;
48261+ }
48262+#endif
48263+
48264+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
48265+ read_unlock(&grsec_exec_file_lock);
48266+ read_unlock(&tasklist_lock);
48267+
48268+ if (retmode & GR_NOPTRACE)
48269+ return 1;
48270+
48271+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
48272+ && (current->acl != task->acl || (current->acl != current->role->root_label
48273+ && current->pid != task->pid)))
48274+ return 1;
48275+
48276+ return 0;
48277+}
48278+
48279+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
48280+{
48281+ if (unlikely(!(gr_status & GR_READY)))
48282+ return;
48283+
48284+ if (!(current->role->roletype & GR_ROLE_GOD))
48285+ return;
48286+
48287+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
48288+ p->role->rolename, gr_task_roletype_to_char(p),
48289+ p->acl->filename);
48290+}
48291+
48292+int
48293+gr_handle_ptrace(struct task_struct *task, const long request)
48294+{
48295+ struct task_struct *tmp = task;
48296+ struct task_struct *curtemp = current;
48297+ __u32 retmode;
48298+
48299+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
48300+ if (unlikely(!(gr_status & GR_READY)))
48301+ return 0;
48302+#endif
48303+
48304+ read_lock(&tasklist_lock);
48305+ while (tmp->pid > 0) {
48306+ if (tmp == curtemp)
48307+ break;
48308+ tmp = tmp->real_parent;
48309+ }
48310+
48311+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
48312+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
48313+ read_unlock(&tasklist_lock);
48314+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48315+ return 1;
48316+ }
48317+ read_unlock(&tasklist_lock);
48318+
48319+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
48320+ if (!(gr_status & GR_READY))
48321+ return 0;
48322+#endif
48323+
48324+ read_lock(&grsec_exec_file_lock);
48325+ if (unlikely(!task->exec_file)) {
48326+ read_unlock(&grsec_exec_file_lock);
48327+ return 0;
48328+ }
48329+
48330+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
48331+ read_unlock(&grsec_exec_file_lock);
48332+
48333+ if (retmode & GR_NOPTRACE) {
48334+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48335+ return 1;
48336+ }
48337+
48338+ if (retmode & GR_PTRACERD) {
48339+ switch (request) {
48340+ case PTRACE_POKETEXT:
48341+ case PTRACE_POKEDATA:
48342+ case PTRACE_POKEUSR:
48343+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
48344+ case PTRACE_SETREGS:
48345+ case PTRACE_SETFPREGS:
48346+#endif
48347+#ifdef CONFIG_X86
48348+ case PTRACE_SETFPXREGS:
48349+#endif
48350+#ifdef CONFIG_ALTIVEC
48351+ case PTRACE_SETVRREGS:
48352+#endif
48353+ return 1;
48354+ default:
48355+ return 0;
48356+ }
48357+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
48358+ !(current->role->roletype & GR_ROLE_GOD) &&
48359+ (current->acl != task->acl)) {
48360+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
48361+ return 1;
48362+ }
48363+
48364+ return 0;
48365+}
48366+
48367+static int is_writable_mmap(const struct file *filp)
48368+{
48369+ struct task_struct *task = current;
48370+ struct acl_object_label *obj, *obj2;
48371+
48372+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
48373+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
48374+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
48375+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
48376+ task->role->root_label);
48377+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
48378+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
48379+ return 1;
48380+ }
48381+ }
48382+ return 0;
48383+}
48384+
48385+int
48386+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
48387+{
48388+ __u32 mode;
48389+
48390+ if (unlikely(!file || !(prot & PROT_EXEC)))
48391+ return 1;
48392+
48393+ if (is_writable_mmap(file))
48394+ return 0;
48395+
48396+ mode =
48397+ gr_search_file(file->f_path.dentry,
48398+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48399+ file->f_path.mnt);
48400+
48401+ if (!gr_tpe_allow(file))
48402+ return 0;
48403+
48404+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48405+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48406+ return 0;
48407+ } else if (unlikely(!(mode & GR_EXEC))) {
48408+ return 0;
48409+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48410+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48411+ return 1;
48412+ }
48413+
48414+ return 1;
48415+}
48416+
48417+int
48418+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
48419+{
48420+ __u32 mode;
48421+
48422+ if (unlikely(!file || !(prot & PROT_EXEC)))
48423+ return 1;
48424+
48425+ if (is_writable_mmap(file))
48426+ return 0;
48427+
48428+ mode =
48429+ gr_search_file(file->f_path.dentry,
48430+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
48431+ file->f_path.mnt);
48432+
48433+ if (!gr_tpe_allow(file))
48434+ return 0;
48435+
48436+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
48437+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48438+ return 0;
48439+ } else if (unlikely(!(mode & GR_EXEC))) {
48440+ return 0;
48441+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
48442+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
48443+ return 1;
48444+ }
48445+
48446+ return 1;
48447+}
48448+
48449+void
48450+gr_acl_handle_psacct(struct task_struct *task, const long code)
48451+{
48452+ unsigned long runtime;
48453+ unsigned long cputime;
48454+ unsigned int wday, cday;
48455+ __u8 whr, chr;
48456+ __u8 wmin, cmin;
48457+ __u8 wsec, csec;
48458+ struct timespec timeval;
48459+
48460+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
48461+ !(task->acl->mode & GR_PROCACCT)))
48462+ return;
48463+
48464+ do_posix_clock_monotonic_gettime(&timeval);
48465+ runtime = timeval.tv_sec - task->start_time.tv_sec;
48466+ wday = runtime / (3600 * 24);
48467+ runtime -= wday * (3600 * 24);
48468+ whr = runtime / 3600;
48469+ runtime -= whr * 3600;
48470+ wmin = runtime / 60;
48471+ runtime -= wmin * 60;
48472+ wsec = runtime;
48473+
48474+ cputime = (task->utime + task->stime) / HZ;
48475+ cday = cputime / (3600 * 24);
48476+ cputime -= cday * (3600 * 24);
48477+ chr = cputime / 3600;
48478+ cputime -= chr * 3600;
48479+ cmin = cputime / 60;
48480+ cputime -= cmin * 60;
48481+ csec = cputime;
48482+
48483+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
48484+
48485+ return;
48486+}
48487+
48488+void gr_set_kernel_label(struct task_struct *task)
48489+{
48490+ if (gr_status & GR_READY) {
48491+ task->role = kernel_role;
48492+ task->acl = kernel_role->root_label;
48493+ }
48494+ return;
48495+}
48496+
48497+#ifdef CONFIG_TASKSTATS
48498+int gr_is_taskstats_denied(int pid)
48499+{
48500+ struct task_struct *task;
48501+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48502+ const struct cred *cred;
48503+#endif
48504+ int ret = 0;
48505+
48506+ /* restrict taskstats viewing to un-chrooted root users
48507+ who have the 'view' subject flag if the RBAC system is enabled
48508+ */
48509+
48510+ rcu_read_lock();
48511+ read_lock(&tasklist_lock);
48512+ task = find_task_by_vpid(pid);
48513+ if (task) {
48514+#ifdef CONFIG_GRKERNSEC_CHROOT
48515+ if (proc_is_chrooted(task))
48516+ ret = -EACCES;
48517+#endif
48518+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48519+ cred = __task_cred(task);
48520+#ifdef CONFIG_GRKERNSEC_PROC_USER
48521+ if (cred->uid != 0)
48522+ ret = -EACCES;
48523+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
48524+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
48525+ ret = -EACCES;
48526+#endif
48527+#endif
48528+ if (gr_status & GR_READY) {
48529+ if (!(task->acl->mode & GR_VIEW))
48530+ ret = -EACCES;
48531+ }
48532+ } else
48533+ ret = -ENOENT;
48534+
48535+ read_unlock(&tasklist_lock);
48536+ rcu_read_unlock();
48537+
48538+ return ret;
48539+}
48540+#endif
48541+
48542+/* AUXV entries are filled via a descendant of search_binary_handler
48543+ after we've already applied the subject for the target
48544+*/
48545+int gr_acl_enable_at_secure(void)
48546+{
48547+ if (unlikely(!(gr_status & GR_READY)))
48548+ return 0;
48549+
48550+ if (current->acl->mode & GR_ATSECURE)
48551+ return 1;
48552+
48553+ return 0;
48554+}
48555+
48556+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
48557+{
48558+ struct task_struct *task = current;
48559+ struct dentry *dentry = file->f_path.dentry;
48560+ struct vfsmount *mnt = file->f_path.mnt;
48561+ struct acl_object_label *obj, *tmp;
48562+ struct acl_subject_label *subj;
48563+ unsigned int bufsize;
48564+ int is_not_root;
48565+ char *path;
48566+ dev_t dev = __get_dev(dentry);
48567+
48568+ if (unlikely(!(gr_status & GR_READY)))
48569+ return 1;
48570+
48571+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
48572+ return 1;
48573+
48574+ /* ignore Eric Biederman */
48575+ if (IS_PRIVATE(dentry->d_inode))
48576+ return 1;
48577+
48578+ subj = task->acl;
48579+ do {
48580+ obj = lookup_acl_obj_label(ino, dev, subj);
48581+ if (obj != NULL)
48582+ return (obj->mode & GR_FIND) ? 1 : 0;
48583+ } while ((subj = subj->parent_subject));
48584+
48585+ /* this is purely an optimization since we're looking for an object
48586+ for the directory we're doing a readdir on
48587+ if it's possible for any globbed object to match the entry we're
48588+ filling into the directory, then the object we find here will be
48589+ an anchor point with attached globbed objects
48590+ */
48591+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
48592+ if (obj->globbed == NULL)
48593+ return (obj->mode & GR_FIND) ? 1 : 0;
48594+
48595+ is_not_root = ((obj->filename[0] == '/') &&
48596+ (obj->filename[1] == '\0')) ? 0 : 1;
48597+ bufsize = PAGE_SIZE - namelen - is_not_root;
48598+
48599+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
48600+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
48601+ return 1;
48602+
48603+ preempt_disable();
48604+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
48605+ bufsize);
48606+
48607+ bufsize = strlen(path);
48608+
48609+ /* if base is "/", don't append an additional slash */
48610+ if (is_not_root)
48611+ *(path + bufsize) = '/';
48612+ memcpy(path + bufsize + is_not_root, name, namelen);
48613+ *(path + bufsize + namelen + is_not_root) = '\0';
48614+
48615+ tmp = obj->globbed;
48616+ while (tmp) {
48617+ if (!glob_match(tmp->filename, path)) {
48618+ preempt_enable();
48619+ return (tmp->mode & GR_FIND) ? 1 : 0;
48620+ }
48621+ tmp = tmp->next;
48622+ }
48623+ preempt_enable();
48624+ return (obj->mode & GR_FIND) ? 1 : 0;
48625+}
48626+
48627+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
48628+EXPORT_SYMBOL(gr_acl_is_enabled);
48629+#endif
48630+EXPORT_SYMBOL(gr_learn_resource);
48631+EXPORT_SYMBOL(gr_set_kernel_label);
48632+#ifdef CONFIG_SECURITY
48633+EXPORT_SYMBOL(gr_check_user_change);
48634+EXPORT_SYMBOL(gr_check_group_change);
48635+#endif
48636+
48637diff -urNp linux-2.6.32.43/grsecurity/gracl_cap.c linux-2.6.32.43/grsecurity/gracl_cap.c
48638--- linux-2.6.32.43/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
48639+++ linux-2.6.32.43/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
48640@@ -0,0 +1,138 @@
48641+#include <linux/kernel.h>
48642+#include <linux/module.h>
48643+#include <linux/sched.h>
48644+#include <linux/gracl.h>
48645+#include <linux/grsecurity.h>
48646+#include <linux/grinternal.h>
48647+
48648+static const char *captab_log[] = {
48649+ "CAP_CHOWN",
48650+ "CAP_DAC_OVERRIDE",
48651+ "CAP_DAC_READ_SEARCH",
48652+ "CAP_FOWNER",
48653+ "CAP_FSETID",
48654+ "CAP_KILL",
48655+ "CAP_SETGID",
48656+ "CAP_SETUID",
48657+ "CAP_SETPCAP",
48658+ "CAP_LINUX_IMMUTABLE",
48659+ "CAP_NET_BIND_SERVICE",
48660+ "CAP_NET_BROADCAST",
48661+ "CAP_NET_ADMIN",
48662+ "CAP_NET_RAW",
48663+ "CAP_IPC_LOCK",
48664+ "CAP_IPC_OWNER",
48665+ "CAP_SYS_MODULE",
48666+ "CAP_SYS_RAWIO",
48667+ "CAP_SYS_CHROOT",
48668+ "CAP_SYS_PTRACE",
48669+ "CAP_SYS_PACCT",
48670+ "CAP_SYS_ADMIN",
48671+ "CAP_SYS_BOOT",
48672+ "CAP_SYS_NICE",
48673+ "CAP_SYS_RESOURCE",
48674+ "CAP_SYS_TIME",
48675+ "CAP_SYS_TTY_CONFIG",
48676+ "CAP_MKNOD",
48677+ "CAP_LEASE",
48678+ "CAP_AUDIT_WRITE",
48679+ "CAP_AUDIT_CONTROL",
48680+ "CAP_SETFCAP",
48681+ "CAP_MAC_OVERRIDE",
48682+ "CAP_MAC_ADMIN"
48683+};
48684+
48685+EXPORT_SYMBOL(gr_is_capable);
48686+EXPORT_SYMBOL(gr_is_capable_nolog);
48687+
48688+int
48689+gr_is_capable(const int cap)
48690+{
48691+ struct task_struct *task = current;
48692+ const struct cred *cred = current_cred();
48693+ struct acl_subject_label *curracl;
48694+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48695+ kernel_cap_t cap_audit = __cap_empty_set;
48696+
48697+ if (!gr_acl_is_enabled())
48698+ return 1;
48699+
48700+ curracl = task->acl;
48701+
48702+ cap_drop = curracl->cap_lower;
48703+ cap_mask = curracl->cap_mask;
48704+ cap_audit = curracl->cap_invert_audit;
48705+
48706+ while ((curracl = curracl->parent_subject)) {
48707+ /* if the cap isn't specified in the current computed mask but is specified in the
48708+ current level subject, and is lowered in the current level subject, then add
48709+ it to the set of dropped capabilities
48710+ otherwise, add the current level subject's mask to the current computed mask
48711+ */
48712+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48713+ cap_raise(cap_mask, cap);
48714+ if (cap_raised(curracl->cap_lower, cap))
48715+ cap_raise(cap_drop, cap);
48716+ if (cap_raised(curracl->cap_invert_audit, cap))
48717+ cap_raise(cap_audit, cap);
48718+ }
48719+ }
48720+
48721+ if (!cap_raised(cap_drop, cap)) {
48722+ if (cap_raised(cap_audit, cap))
48723+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
48724+ return 1;
48725+ }
48726+
48727+ curracl = task->acl;
48728+
48729+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
48730+ && cap_raised(cred->cap_effective, cap)) {
48731+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
48732+ task->role->roletype, cred->uid,
48733+ cred->gid, task->exec_file ?
48734+ gr_to_filename(task->exec_file->f_path.dentry,
48735+ task->exec_file->f_path.mnt) : curracl->filename,
48736+ curracl->filename, 0UL,
48737+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
48738+ return 1;
48739+ }
48740+
48741+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
48742+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
48743+ return 0;
48744+}
48745+
48746+int
48747+gr_is_capable_nolog(const int cap)
48748+{
48749+ struct acl_subject_label *curracl;
48750+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
48751+
48752+ if (!gr_acl_is_enabled())
48753+ return 1;
48754+
48755+ curracl = current->acl;
48756+
48757+ cap_drop = curracl->cap_lower;
48758+ cap_mask = curracl->cap_mask;
48759+
48760+ while ((curracl = curracl->parent_subject)) {
48761+ /* if the cap isn't specified in the current computed mask but is specified in the
48762+ current level subject, and is lowered in the current level subject, then add
48763+ it to the set of dropped capabilities
48764+ otherwise, add the current level subject's mask to the current computed mask
48765+ */
48766+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
48767+ cap_raise(cap_mask, cap);
48768+ if (cap_raised(curracl->cap_lower, cap))
48769+ cap_raise(cap_drop, cap);
48770+ }
48771+ }
48772+
48773+ if (!cap_raised(cap_drop, cap))
48774+ return 1;
48775+
48776+ return 0;
48777+}
48778+
48779diff -urNp linux-2.6.32.43/grsecurity/gracl_fs.c linux-2.6.32.43/grsecurity/gracl_fs.c
48780--- linux-2.6.32.43/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
48781+++ linux-2.6.32.43/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
48782@@ -0,0 +1,431 @@
48783+#include <linux/kernel.h>
48784+#include <linux/sched.h>
48785+#include <linux/types.h>
48786+#include <linux/fs.h>
48787+#include <linux/file.h>
48788+#include <linux/stat.h>
48789+#include <linux/grsecurity.h>
48790+#include <linux/grinternal.h>
48791+#include <linux/gracl.h>
48792+
48793+__u32
48794+gr_acl_handle_hidden_file(const struct dentry * dentry,
48795+ const struct vfsmount * mnt)
48796+{
48797+ __u32 mode;
48798+
48799+ if (unlikely(!dentry->d_inode))
48800+ return GR_FIND;
48801+
48802+ mode =
48803+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
48804+
48805+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
48806+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48807+ return mode;
48808+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
48809+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
48810+ return 0;
48811+ } else if (unlikely(!(mode & GR_FIND)))
48812+ return 0;
48813+
48814+ return GR_FIND;
48815+}
48816+
48817+__u32
48818+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
48819+ const int fmode)
48820+{
48821+ __u32 reqmode = GR_FIND;
48822+ __u32 mode;
48823+
48824+ if (unlikely(!dentry->d_inode))
48825+ return reqmode;
48826+
48827+ if (unlikely(fmode & O_APPEND))
48828+ reqmode |= GR_APPEND;
48829+ else if (unlikely(fmode & FMODE_WRITE))
48830+ reqmode |= GR_WRITE;
48831+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48832+ reqmode |= GR_READ;
48833+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
48834+ reqmode &= ~GR_READ;
48835+ mode =
48836+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48837+ mnt);
48838+
48839+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48840+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48841+ reqmode & GR_READ ? " reading" : "",
48842+ reqmode & GR_WRITE ? " writing" : reqmode &
48843+ GR_APPEND ? " appending" : "");
48844+ return reqmode;
48845+ } else
48846+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48847+ {
48848+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
48849+ reqmode & GR_READ ? " reading" : "",
48850+ reqmode & GR_WRITE ? " writing" : reqmode &
48851+ GR_APPEND ? " appending" : "");
48852+ return 0;
48853+ } else if (unlikely((mode & reqmode) != reqmode))
48854+ return 0;
48855+
48856+ return reqmode;
48857+}
48858+
48859+__u32
48860+gr_acl_handle_creat(const struct dentry * dentry,
48861+ const struct dentry * p_dentry,
48862+ const struct vfsmount * p_mnt, const int fmode,
48863+ const int imode)
48864+{
48865+ __u32 reqmode = GR_WRITE | GR_CREATE;
48866+ __u32 mode;
48867+
48868+ if (unlikely(fmode & O_APPEND))
48869+ reqmode |= GR_APPEND;
48870+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
48871+ reqmode |= GR_READ;
48872+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
48873+ reqmode |= GR_SETID;
48874+
48875+ mode =
48876+ gr_check_create(dentry, p_dentry, p_mnt,
48877+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
48878+
48879+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48880+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48881+ reqmode & GR_READ ? " reading" : "",
48882+ reqmode & GR_WRITE ? " writing" : reqmode &
48883+ GR_APPEND ? " appending" : "");
48884+ return reqmode;
48885+ } else
48886+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48887+ {
48888+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
48889+ reqmode & GR_READ ? " reading" : "",
48890+ reqmode & GR_WRITE ? " writing" : reqmode &
48891+ GR_APPEND ? " appending" : "");
48892+ return 0;
48893+ } else if (unlikely((mode & reqmode) != reqmode))
48894+ return 0;
48895+
48896+ return reqmode;
48897+}
48898+
48899+__u32
48900+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
48901+ const int fmode)
48902+{
48903+ __u32 mode, reqmode = GR_FIND;
48904+
48905+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
48906+ reqmode |= GR_EXEC;
48907+ if (fmode & S_IWOTH)
48908+ reqmode |= GR_WRITE;
48909+ if (fmode & S_IROTH)
48910+ reqmode |= GR_READ;
48911+
48912+ mode =
48913+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
48914+ mnt);
48915+
48916+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
48917+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48918+ reqmode & GR_READ ? " reading" : "",
48919+ reqmode & GR_WRITE ? " writing" : "",
48920+ reqmode & GR_EXEC ? " executing" : "");
48921+ return reqmode;
48922+ } else
48923+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
48924+ {
48925+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
48926+ reqmode & GR_READ ? " reading" : "",
48927+ reqmode & GR_WRITE ? " writing" : "",
48928+ reqmode & GR_EXEC ? " executing" : "");
48929+ return 0;
48930+ } else if (unlikely((mode & reqmode) != reqmode))
48931+ return 0;
48932+
48933+ return reqmode;
48934+}
48935+
48936+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
48937+{
48938+ __u32 mode;
48939+
48940+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
48941+
48942+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
48943+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
48944+ return mode;
48945+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
48946+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
48947+ return 0;
48948+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
48949+ return 0;
48950+
48951+ return (reqmode);
48952+}
48953+
48954+__u32
48955+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
48956+{
48957+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
48958+}
48959+
48960+__u32
48961+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
48962+{
48963+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
48964+}
48965+
48966+__u32
48967+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
48968+{
48969+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
48970+}
48971+
48972+__u32
48973+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
48974+{
48975+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
48976+}
48977+
48978+__u32
48979+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
48980+ mode_t mode)
48981+{
48982+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
48983+ return 1;
48984+
48985+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48986+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48987+ GR_FCHMOD_ACL_MSG);
48988+ } else {
48989+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
48990+ }
48991+}
48992+
48993+__u32
48994+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
48995+ mode_t mode)
48996+{
48997+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
48998+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
48999+ GR_CHMOD_ACL_MSG);
49000+ } else {
49001+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
49002+ }
49003+}
49004+
49005+__u32
49006+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
49007+{
49008+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
49009+}
49010+
49011+__u32
49012+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
49013+{
49014+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
49015+}
49016+
49017+__u32
49018+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
49019+{
49020+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
49021+}
49022+
49023+__u32
49024+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
49025+{
49026+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
49027+ GR_UNIXCONNECT_ACL_MSG);
49028+}
49029+
49030+/* hardlinks require at minimum create permission,
49031+ any additional privilege required is based on the
49032+ privilege of the file being linked to
49033+*/
49034+__u32
49035+gr_acl_handle_link(const struct dentry * new_dentry,
49036+ const struct dentry * parent_dentry,
49037+ const struct vfsmount * parent_mnt,
49038+ const struct dentry * old_dentry,
49039+ const struct vfsmount * old_mnt, const char *to)
49040+{
49041+ __u32 mode;
49042+ __u32 needmode = GR_CREATE | GR_LINK;
49043+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
49044+
49045+ mode =
49046+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
49047+ old_mnt);
49048+
49049+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
49050+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49051+ return mode;
49052+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49053+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
49054+ return 0;
49055+ } else if (unlikely((mode & needmode) != needmode))
49056+ return 0;
49057+
49058+ return 1;
49059+}
49060+
49061+__u32
49062+gr_acl_handle_symlink(const struct dentry * new_dentry,
49063+ const struct dentry * parent_dentry,
49064+ const struct vfsmount * parent_mnt, const char *from)
49065+{
49066+ __u32 needmode = GR_WRITE | GR_CREATE;
49067+ __u32 mode;
49068+
49069+ mode =
49070+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
49071+ GR_CREATE | GR_AUDIT_CREATE |
49072+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
49073+
49074+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
49075+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49076+ return mode;
49077+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
49078+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
49079+ return 0;
49080+ } else if (unlikely((mode & needmode) != needmode))
49081+ return 0;
49082+
49083+ return (GR_WRITE | GR_CREATE);
49084+}
49085+
49086+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
49087+{
49088+ __u32 mode;
49089+
49090+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
49091+
49092+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
49093+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
49094+ return mode;
49095+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
49096+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
49097+ return 0;
49098+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
49099+ return 0;
49100+
49101+ return (reqmode);
49102+}
49103+
49104+__u32
49105+gr_acl_handle_mknod(const struct dentry * new_dentry,
49106+ const struct dentry * parent_dentry,
49107+ const struct vfsmount * parent_mnt,
49108+ const int mode)
49109+{
49110+ __u32 reqmode = GR_WRITE | GR_CREATE;
49111+ if (unlikely(mode & (S_ISUID | S_ISGID)))
49112+ reqmode |= GR_SETID;
49113+
49114+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49115+ reqmode, GR_MKNOD_ACL_MSG);
49116+}
49117+
49118+__u32
49119+gr_acl_handle_mkdir(const struct dentry *new_dentry,
49120+ const struct dentry *parent_dentry,
49121+ const struct vfsmount *parent_mnt)
49122+{
49123+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
49124+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
49125+}
49126+
49127+#define RENAME_CHECK_SUCCESS(old, new) \
49128+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
49129+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
49130+
49131+int
49132+gr_acl_handle_rename(struct dentry *new_dentry,
49133+ struct dentry *parent_dentry,
49134+ const struct vfsmount *parent_mnt,
49135+ struct dentry *old_dentry,
49136+ struct inode *old_parent_inode,
49137+ struct vfsmount *old_mnt, const char *newname)
49138+{
49139+ __u32 comp1, comp2;
49140+ int error = 0;
49141+
49142+ if (unlikely(!gr_acl_is_enabled()))
49143+ return 0;
49144+
49145+ if (!new_dentry->d_inode) {
49146+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
49147+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
49148+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
49149+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
49150+ GR_DELETE | GR_AUDIT_DELETE |
49151+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49152+ GR_SUPPRESS, old_mnt);
49153+ } else {
49154+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
49155+ GR_CREATE | GR_DELETE |
49156+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
49157+ GR_AUDIT_READ | GR_AUDIT_WRITE |
49158+ GR_SUPPRESS, parent_mnt);
49159+ comp2 =
49160+ gr_search_file(old_dentry,
49161+ GR_READ | GR_WRITE | GR_AUDIT_READ |
49162+ GR_DELETE | GR_AUDIT_DELETE |
49163+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
49164+ }
49165+
49166+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
49167+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
49168+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49169+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
49170+ && !(comp2 & GR_SUPPRESS)) {
49171+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
49172+ error = -EACCES;
49173+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
49174+ error = -EACCES;
49175+
49176+ return error;
49177+}
49178+
49179+void
49180+gr_acl_handle_exit(void)
49181+{
49182+ u16 id;
49183+ char *rolename;
49184+ struct file *exec_file;
49185+
49186+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
49187+ !(current->role->roletype & GR_ROLE_PERSIST))) {
49188+ id = current->acl_role_id;
49189+ rolename = current->role->rolename;
49190+ gr_set_acls(1);
49191+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
49192+ }
49193+
49194+ write_lock(&grsec_exec_file_lock);
49195+ exec_file = current->exec_file;
49196+ current->exec_file = NULL;
49197+ write_unlock(&grsec_exec_file_lock);
49198+
49199+ if (exec_file)
49200+ fput(exec_file);
49201+}
49202+
49203+int
49204+gr_acl_handle_procpidmem(const struct task_struct *task)
49205+{
49206+ if (unlikely(!gr_acl_is_enabled()))
49207+ return 0;
49208+
49209+ if (task != current && task->acl->mode & GR_PROTPROCFD)
49210+ return -EACCES;
49211+
49212+ return 0;
49213+}
49214diff -urNp linux-2.6.32.43/grsecurity/gracl_ip.c linux-2.6.32.43/grsecurity/gracl_ip.c
49215--- linux-2.6.32.43/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
49216+++ linux-2.6.32.43/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
49217@@ -0,0 +1,382 @@
49218+#include <linux/kernel.h>
49219+#include <asm/uaccess.h>
49220+#include <asm/errno.h>
49221+#include <net/sock.h>
49222+#include <linux/file.h>
49223+#include <linux/fs.h>
49224+#include <linux/net.h>
49225+#include <linux/in.h>
49226+#include <linux/skbuff.h>
49227+#include <linux/ip.h>
49228+#include <linux/udp.h>
49229+#include <linux/smp_lock.h>
49230+#include <linux/types.h>
49231+#include <linux/sched.h>
49232+#include <linux/netdevice.h>
49233+#include <linux/inetdevice.h>
49234+#include <linux/gracl.h>
49235+#include <linux/grsecurity.h>
49236+#include <linux/grinternal.h>
49237+
49238+#define GR_BIND 0x01
49239+#define GR_CONNECT 0x02
49240+#define GR_INVERT 0x04
49241+#define GR_BINDOVERRIDE 0x08
49242+#define GR_CONNECTOVERRIDE 0x10
49243+#define GR_SOCK_FAMILY 0x20
49244+
49245+static const char * gr_protocols[IPPROTO_MAX] = {
49246+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
49247+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
49248+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
49249+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
49250+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
49251+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
49252+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
49253+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
49254+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
49255+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
49256+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
49257+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
49258+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
49259+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
49260+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
49261+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
49262+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
49263+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
49264+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
49265+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
49266+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
49267+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
49268+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
49269+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
49270+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
49271+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
49272+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
49273+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
49274+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
49275+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
49276+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
49277+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
49278+ };
49279+
49280+static const char * gr_socktypes[SOCK_MAX] = {
49281+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
49282+ "unknown:7", "unknown:8", "unknown:9", "packet"
49283+ };
49284+
49285+static const char * gr_sockfamilies[AF_MAX+1] = {
49286+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
49287+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
49288+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
49289+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
49290+ };
49291+
49292+const char *
49293+gr_proto_to_name(unsigned char proto)
49294+{
49295+ return gr_protocols[proto];
49296+}
49297+
49298+const char *
49299+gr_socktype_to_name(unsigned char type)
49300+{
49301+ return gr_socktypes[type];
49302+}
49303+
49304+const char *
49305+gr_sockfamily_to_name(unsigned char family)
49306+{
49307+ return gr_sockfamilies[family];
49308+}
49309+
49310+int
49311+gr_search_socket(const int domain, const int type, const int protocol)
49312+{
49313+ struct acl_subject_label *curr;
49314+ const struct cred *cred = current_cred();
49315+
49316+ if (unlikely(!gr_acl_is_enabled()))
49317+ goto exit;
49318+
49319+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
49320+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
49321+ goto exit; // let the kernel handle it
49322+
49323+ curr = current->acl;
49324+
49325+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
49326+ /* the family is allowed, if this is PF_INET allow it only if
49327+ the extra sock type/protocol checks pass */
49328+ if (domain == PF_INET)
49329+ goto inet_check;
49330+ goto exit;
49331+ } else {
49332+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49333+ __u32 fakeip = 0;
49334+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49335+ current->role->roletype, cred->uid,
49336+ cred->gid, current->exec_file ?
49337+ gr_to_filename(current->exec_file->f_path.dentry,
49338+ current->exec_file->f_path.mnt) :
49339+ curr->filename, curr->filename,
49340+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
49341+ &current->signal->saved_ip);
49342+ goto exit;
49343+ }
49344+ goto exit_fail;
49345+ }
49346+
49347+inet_check:
49348+ /* the rest of this checking is for IPv4 only */
49349+ if (!curr->ips)
49350+ goto exit;
49351+
49352+ if ((curr->ip_type & (1 << type)) &&
49353+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
49354+ goto exit;
49355+
49356+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49357+ /* we don't place acls on raw sockets , and sometimes
49358+ dgram/ip sockets are opened for ioctl and not
49359+ bind/connect, so we'll fake a bind learn log */
49360+ if (type == SOCK_RAW || type == SOCK_PACKET) {
49361+ __u32 fakeip = 0;
49362+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49363+ current->role->roletype, cred->uid,
49364+ cred->gid, current->exec_file ?
49365+ gr_to_filename(current->exec_file->f_path.dentry,
49366+ current->exec_file->f_path.mnt) :
49367+ curr->filename, curr->filename,
49368+ &fakeip, 0, type,
49369+ protocol, GR_CONNECT, &current->signal->saved_ip);
49370+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
49371+ __u32 fakeip = 0;
49372+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49373+ current->role->roletype, cred->uid,
49374+ cred->gid, current->exec_file ?
49375+ gr_to_filename(current->exec_file->f_path.dentry,
49376+ current->exec_file->f_path.mnt) :
49377+ curr->filename, curr->filename,
49378+ &fakeip, 0, type,
49379+ protocol, GR_BIND, &current->signal->saved_ip);
49380+ }
49381+ /* we'll log when they use connect or bind */
49382+ goto exit;
49383+ }
49384+
49385+exit_fail:
49386+ if (domain == PF_INET)
49387+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
49388+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
49389+ else
49390+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
49391+ gr_socktype_to_name(type), protocol);
49392+
49393+ return 0;
49394+exit:
49395+ return 1;
49396+}
49397+
49398+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
49399+{
49400+ if ((ip->mode & mode) &&
49401+ (ip_port >= ip->low) &&
49402+ (ip_port <= ip->high) &&
49403+ ((ntohl(ip_addr) & our_netmask) ==
49404+ (ntohl(our_addr) & our_netmask))
49405+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
49406+ && (ip->type & (1 << type))) {
49407+ if (ip->mode & GR_INVERT)
49408+ return 2; // specifically denied
49409+ else
49410+ return 1; // allowed
49411+ }
49412+
49413+ return 0; // not specifically allowed, may continue parsing
49414+}
49415+
49416+static int
49417+gr_search_connectbind(const int full_mode, struct sock *sk,
49418+ struct sockaddr_in *addr, const int type)
49419+{
49420+ char iface[IFNAMSIZ] = {0};
49421+ struct acl_subject_label *curr;
49422+ struct acl_ip_label *ip;
49423+ struct inet_sock *isk;
49424+ struct net_device *dev;
49425+ struct in_device *idev;
49426+ unsigned long i;
49427+ int ret;
49428+ int mode = full_mode & (GR_BIND | GR_CONNECT);
49429+ __u32 ip_addr = 0;
49430+ __u32 our_addr;
49431+ __u32 our_netmask;
49432+ char *p;
49433+ __u16 ip_port = 0;
49434+ const struct cred *cred = current_cred();
49435+
49436+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
49437+ return 0;
49438+
49439+ curr = current->acl;
49440+ isk = inet_sk(sk);
49441+
49442+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
49443+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
49444+ addr->sin_addr.s_addr = curr->inaddr_any_override;
49445+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
49446+ struct sockaddr_in saddr;
49447+ int err;
49448+
49449+ saddr.sin_family = AF_INET;
49450+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
49451+ saddr.sin_port = isk->sport;
49452+
49453+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49454+ if (err)
49455+ return err;
49456+
49457+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
49458+ if (err)
49459+ return err;
49460+ }
49461+
49462+ if (!curr->ips)
49463+ return 0;
49464+
49465+ ip_addr = addr->sin_addr.s_addr;
49466+ ip_port = ntohs(addr->sin_port);
49467+
49468+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
49469+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
49470+ current->role->roletype, cred->uid,
49471+ cred->gid, current->exec_file ?
49472+ gr_to_filename(current->exec_file->f_path.dentry,
49473+ current->exec_file->f_path.mnt) :
49474+ curr->filename, curr->filename,
49475+ &ip_addr, ip_port, type,
49476+ sk->sk_protocol, mode, &current->signal->saved_ip);
49477+ return 0;
49478+ }
49479+
49480+ for (i = 0; i < curr->ip_num; i++) {
49481+ ip = *(curr->ips + i);
49482+ if (ip->iface != NULL) {
49483+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
49484+ p = strchr(iface, ':');
49485+ if (p != NULL)
49486+ *p = '\0';
49487+ dev = dev_get_by_name(sock_net(sk), iface);
49488+ if (dev == NULL)
49489+ continue;
49490+ idev = in_dev_get(dev);
49491+ if (idev == NULL) {
49492+ dev_put(dev);
49493+ continue;
49494+ }
49495+ rcu_read_lock();
49496+ for_ifa(idev) {
49497+ if (!strcmp(ip->iface, ifa->ifa_label)) {
49498+ our_addr = ifa->ifa_address;
49499+ our_netmask = 0xffffffff;
49500+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49501+ if (ret == 1) {
49502+ rcu_read_unlock();
49503+ in_dev_put(idev);
49504+ dev_put(dev);
49505+ return 0;
49506+ } else if (ret == 2) {
49507+ rcu_read_unlock();
49508+ in_dev_put(idev);
49509+ dev_put(dev);
49510+ goto denied;
49511+ }
49512+ }
49513+ } endfor_ifa(idev);
49514+ rcu_read_unlock();
49515+ in_dev_put(idev);
49516+ dev_put(dev);
49517+ } else {
49518+ our_addr = ip->addr;
49519+ our_netmask = ip->netmask;
49520+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
49521+ if (ret == 1)
49522+ return 0;
49523+ else if (ret == 2)
49524+ goto denied;
49525+ }
49526+ }
49527+
49528+denied:
49529+ if (mode == GR_BIND)
49530+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49531+ else if (mode == GR_CONNECT)
49532+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
49533+
49534+ return -EACCES;
49535+}
49536+
49537+int
49538+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
49539+{
49540+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
49541+}
49542+
49543+int
49544+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
49545+{
49546+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
49547+}
49548+
49549+int gr_search_listen(struct socket *sock)
49550+{
49551+ struct sock *sk = sock->sk;
49552+ struct sockaddr_in addr;
49553+
49554+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49555+ addr.sin_port = inet_sk(sk)->sport;
49556+
49557+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49558+}
49559+
49560+int gr_search_accept(struct socket *sock)
49561+{
49562+ struct sock *sk = sock->sk;
49563+ struct sockaddr_in addr;
49564+
49565+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
49566+ addr.sin_port = inet_sk(sk)->sport;
49567+
49568+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
49569+}
49570+
49571+int
49572+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
49573+{
49574+ if (addr)
49575+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
49576+ else {
49577+ struct sockaddr_in sin;
49578+ const struct inet_sock *inet = inet_sk(sk);
49579+
49580+ sin.sin_addr.s_addr = inet->daddr;
49581+ sin.sin_port = inet->dport;
49582+
49583+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49584+ }
49585+}
49586+
49587+int
49588+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
49589+{
49590+ struct sockaddr_in sin;
49591+
49592+ if (unlikely(skb->len < sizeof (struct udphdr)))
49593+ return 0; // skip this packet
49594+
49595+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
49596+ sin.sin_port = udp_hdr(skb)->source;
49597+
49598+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
49599+}
49600diff -urNp linux-2.6.32.43/grsecurity/gracl_learn.c linux-2.6.32.43/grsecurity/gracl_learn.c
49601--- linux-2.6.32.43/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
49602+++ linux-2.6.32.43/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
49603@@ -0,0 +1,208 @@
49604+#include <linux/kernel.h>
49605+#include <linux/mm.h>
49606+#include <linux/sched.h>
49607+#include <linux/poll.h>
49608+#include <linux/smp_lock.h>
49609+#include <linux/string.h>
49610+#include <linux/file.h>
49611+#include <linux/types.h>
49612+#include <linux/vmalloc.h>
49613+#include <linux/grinternal.h>
49614+
49615+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
49616+ size_t count, loff_t *ppos);
49617+extern int gr_acl_is_enabled(void);
49618+
49619+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
49620+static int gr_learn_attached;
49621+
49622+/* use a 512k buffer */
49623+#define LEARN_BUFFER_SIZE (512 * 1024)
49624+
49625+static DEFINE_SPINLOCK(gr_learn_lock);
49626+static DEFINE_MUTEX(gr_learn_user_mutex);
49627+
49628+/* we need to maintain two buffers, so that the kernel context of grlearn
49629+ uses a semaphore around the userspace copying, and the other kernel contexts
49630+ use a spinlock when copying into the buffer, since they cannot sleep
49631+*/
49632+static char *learn_buffer;
49633+static char *learn_buffer_user;
49634+static int learn_buffer_len;
49635+static int learn_buffer_user_len;
49636+
49637+static ssize_t
49638+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
49639+{
49640+ DECLARE_WAITQUEUE(wait, current);
49641+ ssize_t retval = 0;
49642+
49643+ add_wait_queue(&learn_wait, &wait);
49644+ set_current_state(TASK_INTERRUPTIBLE);
49645+ do {
49646+ mutex_lock(&gr_learn_user_mutex);
49647+ spin_lock(&gr_learn_lock);
49648+ if (learn_buffer_len)
49649+ break;
49650+ spin_unlock(&gr_learn_lock);
49651+ mutex_unlock(&gr_learn_user_mutex);
49652+ if (file->f_flags & O_NONBLOCK) {
49653+ retval = -EAGAIN;
49654+ goto out;
49655+ }
49656+ if (signal_pending(current)) {
49657+ retval = -ERESTARTSYS;
49658+ goto out;
49659+ }
49660+
49661+ schedule();
49662+ } while (1);
49663+
49664+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
49665+ learn_buffer_user_len = learn_buffer_len;
49666+ retval = learn_buffer_len;
49667+ learn_buffer_len = 0;
49668+
49669+ spin_unlock(&gr_learn_lock);
49670+
49671+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
49672+ retval = -EFAULT;
49673+
49674+ mutex_unlock(&gr_learn_user_mutex);
49675+out:
49676+ set_current_state(TASK_RUNNING);
49677+ remove_wait_queue(&learn_wait, &wait);
49678+ return retval;
49679+}
49680+
49681+static unsigned int
49682+poll_learn(struct file * file, poll_table * wait)
49683+{
49684+ poll_wait(file, &learn_wait, wait);
49685+
49686+ if (learn_buffer_len)
49687+ return (POLLIN | POLLRDNORM);
49688+
49689+ return 0;
49690+}
49691+
49692+void
49693+gr_clear_learn_entries(void)
49694+{
49695+ char *tmp;
49696+
49697+ mutex_lock(&gr_learn_user_mutex);
49698+ spin_lock(&gr_learn_lock);
49699+ tmp = learn_buffer;
49700+ learn_buffer = NULL;
49701+ spin_unlock(&gr_learn_lock);
49702+ if (tmp)
49703+ vfree(tmp);
49704+ if (learn_buffer_user != NULL) {
49705+ vfree(learn_buffer_user);
49706+ learn_buffer_user = NULL;
49707+ }
49708+ learn_buffer_len = 0;
49709+ mutex_unlock(&gr_learn_user_mutex);
49710+
49711+ return;
49712+}
49713+
49714+void
49715+gr_add_learn_entry(const char *fmt, ...)
49716+{
49717+ va_list args;
49718+ unsigned int len;
49719+
49720+ if (!gr_learn_attached)
49721+ return;
49722+
49723+ spin_lock(&gr_learn_lock);
49724+
49725+ /* leave a gap at the end so we know when it's "full" but don't have to
49726+ compute the exact length of the string we're trying to append
49727+ */
49728+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
49729+ spin_unlock(&gr_learn_lock);
49730+ wake_up_interruptible(&learn_wait);
49731+ return;
49732+ }
49733+ if (learn_buffer == NULL) {
49734+ spin_unlock(&gr_learn_lock);
49735+ return;
49736+ }
49737+
49738+ va_start(args, fmt);
49739+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
49740+ va_end(args);
49741+
49742+ learn_buffer_len += len + 1;
49743+
49744+ spin_unlock(&gr_learn_lock);
49745+ wake_up_interruptible(&learn_wait);
49746+
49747+ return;
49748+}
49749+
49750+static int
49751+open_learn(struct inode *inode, struct file *file)
49752+{
49753+ if (file->f_mode & FMODE_READ && gr_learn_attached)
49754+ return -EBUSY;
49755+ if (file->f_mode & FMODE_READ) {
49756+ int retval = 0;
49757+ mutex_lock(&gr_learn_user_mutex);
49758+ if (learn_buffer == NULL)
49759+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
49760+ if (learn_buffer_user == NULL)
49761+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
49762+ if (learn_buffer == NULL) {
49763+ retval = -ENOMEM;
49764+ goto out_error;
49765+ }
49766+ if (learn_buffer_user == NULL) {
49767+ retval = -ENOMEM;
49768+ goto out_error;
49769+ }
49770+ learn_buffer_len = 0;
49771+ learn_buffer_user_len = 0;
49772+ gr_learn_attached = 1;
49773+out_error:
49774+ mutex_unlock(&gr_learn_user_mutex);
49775+ return retval;
49776+ }
49777+ return 0;
49778+}
49779+
49780+static int
49781+close_learn(struct inode *inode, struct file *file)
49782+{
49783+ if (file->f_mode & FMODE_READ) {
49784+ char *tmp = NULL;
49785+ mutex_lock(&gr_learn_user_mutex);
49786+ spin_lock(&gr_learn_lock);
49787+ tmp = learn_buffer;
49788+ learn_buffer = NULL;
49789+ spin_unlock(&gr_learn_lock);
49790+ if (tmp)
49791+ vfree(tmp);
49792+ if (learn_buffer_user != NULL) {
49793+ vfree(learn_buffer_user);
49794+ learn_buffer_user = NULL;
49795+ }
49796+ learn_buffer_len = 0;
49797+ learn_buffer_user_len = 0;
49798+ gr_learn_attached = 0;
49799+ mutex_unlock(&gr_learn_user_mutex);
49800+ }
49801+
49802+ return 0;
49803+}
49804+
49805+const struct file_operations grsec_fops = {
49806+ .read = read_learn,
49807+ .write = write_grsec_handler,
49808+ .open = open_learn,
49809+ .release = close_learn,
49810+ .poll = poll_learn,
49811+};
49812diff -urNp linux-2.6.32.43/grsecurity/gracl_res.c linux-2.6.32.43/grsecurity/gracl_res.c
49813--- linux-2.6.32.43/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
49814+++ linux-2.6.32.43/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
49815@@ -0,0 +1,67 @@
49816+#include <linux/kernel.h>
49817+#include <linux/sched.h>
49818+#include <linux/gracl.h>
49819+#include <linux/grinternal.h>
49820+
49821+static const char *restab_log[] = {
49822+ [RLIMIT_CPU] = "RLIMIT_CPU",
49823+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
49824+ [RLIMIT_DATA] = "RLIMIT_DATA",
49825+ [RLIMIT_STACK] = "RLIMIT_STACK",
49826+ [RLIMIT_CORE] = "RLIMIT_CORE",
49827+ [RLIMIT_RSS] = "RLIMIT_RSS",
49828+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
49829+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
49830+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
49831+ [RLIMIT_AS] = "RLIMIT_AS",
49832+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
49833+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
49834+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
49835+ [RLIMIT_NICE] = "RLIMIT_NICE",
49836+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
49837+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
49838+ [GR_CRASH_RES] = "RLIMIT_CRASH"
49839+};
49840+
49841+void
49842+gr_log_resource(const struct task_struct *task,
49843+ const int res, const unsigned long wanted, const int gt)
49844+{
49845+ const struct cred *cred;
49846+ unsigned long rlim;
49847+
49848+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
49849+ return;
49850+
49851+ // not yet supported resource
49852+ if (unlikely(!restab_log[res]))
49853+ return;
49854+
49855+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
49856+ rlim = task->signal->rlim[res].rlim_max;
49857+ else
49858+ rlim = task->signal->rlim[res].rlim_cur;
49859+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
49860+ return;
49861+
49862+ rcu_read_lock();
49863+ cred = __task_cred(task);
49864+
49865+ if (res == RLIMIT_NPROC &&
49866+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
49867+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
49868+ goto out_rcu_unlock;
49869+ else if (res == RLIMIT_MEMLOCK &&
49870+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
49871+ goto out_rcu_unlock;
49872+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
49873+ goto out_rcu_unlock;
49874+ rcu_read_unlock();
49875+
49876+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
49877+
49878+ return;
49879+out_rcu_unlock:
49880+ rcu_read_unlock();
49881+ return;
49882+}
49883diff -urNp linux-2.6.32.43/grsecurity/gracl_segv.c linux-2.6.32.43/grsecurity/gracl_segv.c
49884--- linux-2.6.32.43/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
49885+++ linux-2.6.32.43/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
49886@@ -0,0 +1,284 @@
49887+#include <linux/kernel.h>
49888+#include <linux/mm.h>
49889+#include <asm/uaccess.h>
49890+#include <asm/errno.h>
49891+#include <asm/mman.h>
49892+#include <net/sock.h>
49893+#include <linux/file.h>
49894+#include <linux/fs.h>
49895+#include <linux/net.h>
49896+#include <linux/in.h>
49897+#include <linux/smp_lock.h>
49898+#include <linux/slab.h>
49899+#include <linux/types.h>
49900+#include <linux/sched.h>
49901+#include <linux/timer.h>
49902+#include <linux/gracl.h>
49903+#include <linux/grsecurity.h>
49904+#include <linux/grinternal.h>
49905+
49906+static struct crash_uid *uid_set;
49907+static unsigned short uid_used;
49908+static DEFINE_SPINLOCK(gr_uid_lock);
49909+extern rwlock_t gr_inode_lock;
49910+extern struct acl_subject_label *
49911+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
49912+ struct acl_role_label *role);
49913+extern int gr_fake_force_sig(int sig, struct task_struct *t);
49914+
49915+int
49916+gr_init_uidset(void)
49917+{
49918+ uid_set =
49919+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
49920+ uid_used = 0;
49921+
49922+ return uid_set ? 1 : 0;
49923+}
49924+
49925+void
49926+gr_free_uidset(void)
49927+{
49928+ if (uid_set)
49929+ kfree(uid_set);
49930+
49931+ return;
49932+}
49933+
49934+int
49935+gr_find_uid(const uid_t uid)
49936+{
49937+ struct crash_uid *tmp = uid_set;
49938+ uid_t buid;
49939+ int low = 0, high = uid_used - 1, mid;
49940+
49941+ while (high >= low) {
49942+ mid = (low + high) >> 1;
49943+ buid = tmp[mid].uid;
49944+ if (buid == uid)
49945+ return mid;
49946+ if (buid > uid)
49947+ high = mid - 1;
49948+ if (buid < uid)
49949+ low = mid + 1;
49950+ }
49951+
49952+ return -1;
49953+}
49954+
49955+static __inline__ void
49956+gr_insertsort(void)
49957+{
49958+ unsigned short i, j;
49959+ struct crash_uid index;
49960+
49961+ for (i = 1; i < uid_used; i++) {
49962+ index = uid_set[i];
49963+ j = i;
49964+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
49965+ uid_set[j] = uid_set[j - 1];
49966+ j--;
49967+ }
49968+ uid_set[j] = index;
49969+ }
49970+
49971+ return;
49972+}
49973+
49974+static __inline__ void
49975+gr_insert_uid(const uid_t uid, const unsigned long expires)
49976+{
49977+ int loc;
49978+
49979+ if (uid_used == GR_UIDTABLE_MAX)
49980+ return;
49981+
49982+ loc = gr_find_uid(uid);
49983+
49984+ if (loc >= 0) {
49985+ uid_set[loc].expires = expires;
49986+ return;
49987+ }
49988+
49989+ uid_set[uid_used].uid = uid;
49990+ uid_set[uid_used].expires = expires;
49991+ uid_used++;
49992+
49993+ gr_insertsort();
49994+
49995+ return;
49996+}
49997+
49998+void
49999+gr_remove_uid(const unsigned short loc)
50000+{
50001+ unsigned short i;
50002+
50003+ for (i = loc + 1; i < uid_used; i++)
50004+ uid_set[i - 1] = uid_set[i];
50005+
50006+ uid_used--;
50007+
50008+ return;
50009+}
50010+
50011+int
50012+gr_check_crash_uid(const uid_t uid)
50013+{
50014+ int loc;
50015+ int ret = 0;
50016+
50017+ if (unlikely(!gr_acl_is_enabled()))
50018+ return 0;
50019+
50020+ spin_lock(&gr_uid_lock);
50021+ loc = gr_find_uid(uid);
50022+
50023+ if (loc < 0)
50024+ goto out_unlock;
50025+
50026+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
50027+ gr_remove_uid(loc);
50028+ else
50029+ ret = 1;
50030+
50031+out_unlock:
50032+ spin_unlock(&gr_uid_lock);
50033+ return ret;
50034+}
50035+
50036+static __inline__ int
50037+proc_is_setxid(const struct cred *cred)
50038+{
50039+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
50040+ cred->uid != cred->fsuid)
50041+ return 1;
50042+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
50043+ cred->gid != cred->fsgid)
50044+ return 1;
50045+
50046+ return 0;
50047+}
50048+
50049+void
50050+gr_handle_crash(struct task_struct *task, const int sig)
50051+{
50052+ struct acl_subject_label *curr;
50053+ struct acl_subject_label *curr2;
50054+ struct task_struct *tsk, *tsk2;
50055+ const struct cred *cred;
50056+ const struct cred *cred2;
50057+
50058+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
50059+ return;
50060+
50061+ if (unlikely(!gr_acl_is_enabled()))
50062+ return;
50063+
50064+ curr = task->acl;
50065+
50066+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
50067+ return;
50068+
50069+ if (time_before_eq(curr->expires, get_seconds())) {
50070+ curr->expires = 0;
50071+ curr->crashes = 0;
50072+ }
50073+
50074+ curr->crashes++;
50075+
50076+ if (!curr->expires)
50077+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
50078+
50079+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50080+ time_after(curr->expires, get_seconds())) {
50081+ rcu_read_lock();
50082+ cred = __task_cred(task);
50083+ if (cred->uid && proc_is_setxid(cred)) {
50084+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50085+ spin_lock(&gr_uid_lock);
50086+ gr_insert_uid(cred->uid, curr->expires);
50087+ spin_unlock(&gr_uid_lock);
50088+ curr->expires = 0;
50089+ curr->crashes = 0;
50090+ read_lock(&tasklist_lock);
50091+ do_each_thread(tsk2, tsk) {
50092+ cred2 = __task_cred(tsk);
50093+ if (tsk != task && cred2->uid == cred->uid)
50094+ gr_fake_force_sig(SIGKILL, tsk);
50095+ } while_each_thread(tsk2, tsk);
50096+ read_unlock(&tasklist_lock);
50097+ } else {
50098+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
50099+ read_lock(&tasklist_lock);
50100+ do_each_thread(tsk2, tsk) {
50101+ if (likely(tsk != task)) {
50102+ curr2 = tsk->acl;
50103+
50104+ if (curr2->device == curr->device &&
50105+ curr2->inode == curr->inode)
50106+ gr_fake_force_sig(SIGKILL, tsk);
50107+ }
50108+ } while_each_thread(tsk2, tsk);
50109+ read_unlock(&tasklist_lock);
50110+ }
50111+ rcu_read_unlock();
50112+ }
50113+
50114+ return;
50115+}
50116+
50117+int
50118+gr_check_crash_exec(const struct file *filp)
50119+{
50120+ struct acl_subject_label *curr;
50121+
50122+ if (unlikely(!gr_acl_is_enabled()))
50123+ return 0;
50124+
50125+ read_lock(&gr_inode_lock);
50126+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
50127+ filp->f_path.dentry->d_inode->i_sb->s_dev,
50128+ current->role);
50129+ read_unlock(&gr_inode_lock);
50130+
50131+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
50132+ (!curr->crashes && !curr->expires))
50133+ return 0;
50134+
50135+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
50136+ time_after(curr->expires, get_seconds()))
50137+ return 1;
50138+ else if (time_before_eq(curr->expires, get_seconds())) {
50139+ curr->crashes = 0;
50140+ curr->expires = 0;
50141+ }
50142+
50143+ return 0;
50144+}
50145+
50146+void
50147+gr_handle_alertkill(struct task_struct *task)
50148+{
50149+ struct acl_subject_label *curracl;
50150+ __u32 curr_ip;
50151+ struct task_struct *p, *p2;
50152+
50153+ if (unlikely(!gr_acl_is_enabled()))
50154+ return;
50155+
50156+ curracl = task->acl;
50157+ curr_ip = task->signal->curr_ip;
50158+
50159+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
50160+ read_lock(&tasklist_lock);
50161+ do_each_thread(p2, p) {
50162+ if (p->signal->curr_ip == curr_ip)
50163+ gr_fake_force_sig(SIGKILL, p);
50164+ } while_each_thread(p2, p);
50165+ read_unlock(&tasklist_lock);
50166+ } else if (curracl->mode & GR_KILLPROC)
50167+ gr_fake_force_sig(SIGKILL, task);
50168+
50169+ return;
50170+}
50171diff -urNp linux-2.6.32.43/grsecurity/gracl_shm.c linux-2.6.32.43/grsecurity/gracl_shm.c
50172--- linux-2.6.32.43/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
50173+++ linux-2.6.32.43/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
50174@@ -0,0 +1,40 @@
50175+#include <linux/kernel.h>
50176+#include <linux/mm.h>
50177+#include <linux/sched.h>
50178+#include <linux/file.h>
50179+#include <linux/ipc.h>
50180+#include <linux/gracl.h>
50181+#include <linux/grsecurity.h>
50182+#include <linux/grinternal.h>
50183+
50184+int
50185+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50186+ const time_t shm_createtime, const uid_t cuid, const int shmid)
50187+{
50188+ struct task_struct *task;
50189+
50190+ if (!gr_acl_is_enabled())
50191+ return 1;
50192+
50193+ rcu_read_lock();
50194+ read_lock(&tasklist_lock);
50195+
50196+ task = find_task_by_vpid(shm_cprid);
50197+
50198+ if (unlikely(!task))
50199+ task = find_task_by_vpid(shm_lapid);
50200+
50201+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
50202+ (task->pid == shm_lapid)) &&
50203+ (task->acl->mode & GR_PROTSHM) &&
50204+ (task->acl != current->acl))) {
50205+ read_unlock(&tasklist_lock);
50206+ rcu_read_unlock();
50207+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
50208+ return 0;
50209+ }
50210+ read_unlock(&tasklist_lock);
50211+ rcu_read_unlock();
50212+
50213+ return 1;
50214+}
50215diff -urNp linux-2.6.32.43/grsecurity/grsec_chdir.c linux-2.6.32.43/grsecurity/grsec_chdir.c
50216--- linux-2.6.32.43/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
50217+++ linux-2.6.32.43/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
50218@@ -0,0 +1,19 @@
50219+#include <linux/kernel.h>
50220+#include <linux/sched.h>
50221+#include <linux/fs.h>
50222+#include <linux/file.h>
50223+#include <linux/grsecurity.h>
50224+#include <linux/grinternal.h>
50225+
50226+void
50227+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
50228+{
50229+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
50230+ if ((grsec_enable_chdir && grsec_enable_group &&
50231+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
50232+ !grsec_enable_group)) {
50233+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
50234+ }
50235+#endif
50236+ return;
50237+}
50238diff -urNp linux-2.6.32.43/grsecurity/grsec_chroot.c linux-2.6.32.43/grsecurity/grsec_chroot.c
50239--- linux-2.6.32.43/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
50240+++ linux-2.6.32.43/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
50241@@ -0,0 +1,384 @@
50242+#include <linux/kernel.h>
50243+#include <linux/module.h>
50244+#include <linux/sched.h>
50245+#include <linux/file.h>
50246+#include <linux/fs.h>
50247+#include <linux/mount.h>
50248+#include <linux/types.h>
50249+#include <linux/pid_namespace.h>
50250+#include <linux/grsecurity.h>
50251+#include <linux/grinternal.h>
50252+
50253+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
50254+{
50255+#ifdef CONFIG_GRKERNSEC
50256+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
50257+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
50258+ task->gr_is_chrooted = 1;
50259+ else
50260+ task->gr_is_chrooted = 0;
50261+
50262+ task->gr_chroot_dentry = path->dentry;
50263+#endif
50264+ return;
50265+}
50266+
50267+void gr_clear_chroot_entries(struct task_struct *task)
50268+{
50269+#ifdef CONFIG_GRKERNSEC
50270+ task->gr_is_chrooted = 0;
50271+ task->gr_chroot_dentry = NULL;
50272+#endif
50273+ return;
50274+}
50275+
50276+int
50277+gr_handle_chroot_unix(const pid_t pid)
50278+{
50279+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
50280+ struct task_struct *p;
50281+
50282+ if (unlikely(!grsec_enable_chroot_unix))
50283+ return 1;
50284+
50285+ if (likely(!proc_is_chrooted(current)))
50286+ return 1;
50287+
50288+ rcu_read_lock();
50289+ read_lock(&tasklist_lock);
50290+
50291+ p = find_task_by_vpid_unrestricted(pid);
50292+ if (unlikely(p && !have_same_root(current, p))) {
50293+ read_unlock(&tasklist_lock);
50294+ rcu_read_unlock();
50295+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
50296+ return 0;
50297+ }
50298+ read_unlock(&tasklist_lock);
50299+ rcu_read_unlock();
50300+#endif
50301+ return 1;
50302+}
50303+
50304+int
50305+gr_handle_chroot_nice(void)
50306+{
50307+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50308+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
50309+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
50310+ return -EPERM;
50311+ }
50312+#endif
50313+ return 0;
50314+}
50315+
50316+int
50317+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
50318+{
50319+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
50320+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
50321+ && proc_is_chrooted(current)) {
50322+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
50323+ return -EACCES;
50324+ }
50325+#endif
50326+ return 0;
50327+}
50328+
50329+int
50330+gr_handle_chroot_rawio(const struct inode *inode)
50331+{
50332+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50333+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
50334+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
50335+ return 1;
50336+#endif
50337+ return 0;
50338+}
50339+
50340+int
50341+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
50342+{
50343+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50344+ struct task_struct *p;
50345+ int ret = 0;
50346+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
50347+ return ret;
50348+
50349+ read_lock(&tasklist_lock);
50350+ do_each_pid_task(pid, type, p) {
50351+ if (!have_same_root(current, p)) {
50352+ ret = 1;
50353+ goto out;
50354+ }
50355+ } while_each_pid_task(pid, type, p);
50356+out:
50357+ read_unlock(&tasklist_lock);
50358+ return ret;
50359+#endif
50360+ return 0;
50361+}
50362+
50363+int
50364+gr_pid_is_chrooted(struct task_struct *p)
50365+{
50366+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
50367+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
50368+ return 0;
50369+
50370+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
50371+ !have_same_root(current, p)) {
50372+ return 1;
50373+ }
50374+#endif
50375+ return 0;
50376+}
50377+
50378+EXPORT_SYMBOL(gr_pid_is_chrooted);
50379+
50380+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
50381+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
50382+{
50383+ struct dentry *dentry = (struct dentry *)u_dentry;
50384+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
50385+ struct dentry *realroot;
50386+ struct vfsmount *realrootmnt;
50387+ struct dentry *currentroot;
50388+ struct vfsmount *currentmnt;
50389+ struct task_struct *reaper = &init_task;
50390+ int ret = 1;
50391+
50392+ read_lock(&reaper->fs->lock);
50393+ realrootmnt = mntget(reaper->fs->root.mnt);
50394+ realroot = dget(reaper->fs->root.dentry);
50395+ read_unlock(&reaper->fs->lock);
50396+
50397+ read_lock(&current->fs->lock);
50398+ currentmnt = mntget(current->fs->root.mnt);
50399+ currentroot = dget(current->fs->root.dentry);
50400+ read_unlock(&current->fs->lock);
50401+
50402+ spin_lock(&dcache_lock);
50403+ for (;;) {
50404+ if (unlikely((dentry == realroot && mnt == realrootmnt)
50405+ || (dentry == currentroot && mnt == currentmnt)))
50406+ break;
50407+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
50408+ if (mnt->mnt_parent == mnt)
50409+ break;
50410+ dentry = mnt->mnt_mountpoint;
50411+ mnt = mnt->mnt_parent;
50412+ continue;
50413+ }
50414+ dentry = dentry->d_parent;
50415+ }
50416+ spin_unlock(&dcache_lock);
50417+
50418+ dput(currentroot);
50419+ mntput(currentmnt);
50420+
50421+ /* access is outside of chroot */
50422+ if (dentry == realroot && mnt == realrootmnt)
50423+ ret = 0;
50424+
50425+ dput(realroot);
50426+ mntput(realrootmnt);
50427+ return ret;
50428+}
50429+#endif
50430+
50431+int
50432+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
50433+{
50434+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
50435+ if (!grsec_enable_chroot_fchdir)
50436+ return 1;
50437+
50438+ if (!proc_is_chrooted(current))
50439+ return 1;
50440+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
50441+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
50442+ return 0;
50443+ }
50444+#endif
50445+ return 1;
50446+}
50447+
50448+int
50449+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50450+ const time_t shm_createtime)
50451+{
50452+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
50453+ struct task_struct *p;
50454+ time_t starttime;
50455+
50456+ if (unlikely(!grsec_enable_chroot_shmat))
50457+ return 1;
50458+
50459+ if (likely(!proc_is_chrooted(current)))
50460+ return 1;
50461+
50462+ rcu_read_lock();
50463+ read_lock(&tasklist_lock);
50464+
50465+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
50466+ starttime = p->start_time.tv_sec;
50467+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
50468+ if (have_same_root(current, p)) {
50469+ goto allow;
50470+ } else {
50471+ read_unlock(&tasklist_lock);
50472+ rcu_read_unlock();
50473+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50474+ return 0;
50475+ }
50476+ }
50477+ /* creator exited, pid reuse, fall through to next check */
50478+ }
50479+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
50480+ if (unlikely(!have_same_root(current, p))) {
50481+ read_unlock(&tasklist_lock);
50482+ rcu_read_unlock();
50483+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
50484+ return 0;
50485+ }
50486+ }
50487+
50488+allow:
50489+ read_unlock(&tasklist_lock);
50490+ rcu_read_unlock();
50491+#endif
50492+ return 1;
50493+}
50494+
50495+void
50496+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
50497+{
50498+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
50499+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
50500+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
50501+#endif
50502+ return;
50503+}
50504+
50505+int
50506+gr_handle_chroot_mknod(const struct dentry *dentry,
50507+ const struct vfsmount *mnt, const int mode)
50508+{
50509+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
50510+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
50511+ proc_is_chrooted(current)) {
50512+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
50513+ return -EPERM;
50514+ }
50515+#endif
50516+ return 0;
50517+}
50518+
50519+int
50520+gr_handle_chroot_mount(const struct dentry *dentry,
50521+ const struct vfsmount *mnt, const char *dev_name)
50522+{
50523+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
50524+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
50525+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
50526+ return -EPERM;
50527+ }
50528+#endif
50529+ return 0;
50530+}
50531+
50532+int
50533+gr_handle_chroot_pivot(void)
50534+{
50535+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
50536+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
50537+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
50538+ return -EPERM;
50539+ }
50540+#endif
50541+ return 0;
50542+}
50543+
50544+int
50545+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
50546+{
50547+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
50548+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
50549+ !gr_is_outside_chroot(dentry, mnt)) {
50550+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
50551+ return -EPERM;
50552+ }
50553+#endif
50554+ return 0;
50555+}
50556+
50557+int
50558+gr_handle_chroot_caps(struct path *path)
50559+{
50560+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
50561+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
50562+ (init_task.fs->root.dentry != path->dentry) &&
50563+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
50564+
50565+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
50566+ const struct cred *old = current_cred();
50567+ struct cred *new = prepare_creds();
50568+ if (new == NULL)
50569+ return 1;
50570+
50571+ new->cap_permitted = cap_drop(old->cap_permitted,
50572+ chroot_caps);
50573+ new->cap_inheritable = cap_drop(old->cap_inheritable,
50574+ chroot_caps);
50575+ new->cap_effective = cap_drop(old->cap_effective,
50576+ chroot_caps);
50577+
50578+ commit_creds(new);
50579+
50580+ return 0;
50581+ }
50582+#endif
50583+ return 0;
50584+}
50585+
50586+int
50587+gr_handle_chroot_sysctl(const int op)
50588+{
50589+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
50590+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
50591+ && (op & MAY_WRITE))
50592+ return -EACCES;
50593+#endif
50594+ return 0;
50595+}
50596+
50597+void
50598+gr_handle_chroot_chdir(struct path *path)
50599+{
50600+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
50601+ if (grsec_enable_chroot_chdir)
50602+ set_fs_pwd(current->fs, path);
50603+#endif
50604+ return;
50605+}
50606+
50607+int
50608+gr_handle_chroot_chmod(const struct dentry *dentry,
50609+ const struct vfsmount *mnt, const int mode)
50610+{
50611+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
50612+ /* allow chmod +s on directories, but not on files */
50613+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
50614+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
50615+ proc_is_chrooted(current)) {
50616+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
50617+ return -EPERM;
50618+ }
50619+#endif
50620+ return 0;
50621+}
50622+
50623+#ifdef CONFIG_SECURITY
50624+EXPORT_SYMBOL(gr_handle_chroot_caps);
50625+#endif
50626diff -urNp linux-2.6.32.43/grsecurity/grsec_disabled.c linux-2.6.32.43/grsecurity/grsec_disabled.c
50627--- linux-2.6.32.43/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
50628+++ linux-2.6.32.43/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
50629@@ -0,0 +1,447 @@
50630+#include <linux/kernel.h>
50631+#include <linux/module.h>
50632+#include <linux/sched.h>
50633+#include <linux/file.h>
50634+#include <linux/fs.h>
50635+#include <linux/kdev_t.h>
50636+#include <linux/net.h>
50637+#include <linux/in.h>
50638+#include <linux/ip.h>
50639+#include <linux/skbuff.h>
50640+#include <linux/sysctl.h>
50641+
50642+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50643+void
50644+pax_set_initial_flags(struct linux_binprm *bprm)
50645+{
50646+ return;
50647+}
50648+#endif
50649+
50650+#ifdef CONFIG_SYSCTL
50651+__u32
50652+gr_handle_sysctl(const struct ctl_table * table, const int op)
50653+{
50654+ return 0;
50655+}
50656+#endif
50657+
50658+#ifdef CONFIG_TASKSTATS
50659+int gr_is_taskstats_denied(int pid)
50660+{
50661+ return 0;
50662+}
50663+#endif
50664+
50665+int
50666+gr_acl_is_enabled(void)
50667+{
50668+ return 0;
50669+}
50670+
50671+int
50672+gr_handle_rawio(const struct inode *inode)
50673+{
50674+ return 0;
50675+}
50676+
50677+void
50678+gr_acl_handle_psacct(struct task_struct *task, const long code)
50679+{
50680+ return;
50681+}
50682+
50683+int
50684+gr_handle_ptrace(struct task_struct *task, const long request)
50685+{
50686+ return 0;
50687+}
50688+
50689+int
50690+gr_handle_proc_ptrace(struct task_struct *task)
50691+{
50692+ return 0;
50693+}
50694+
50695+void
50696+gr_learn_resource(const struct task_struct *task,
50697+ const int res, const unsigned long wanted, const int gt)
50698+{
50699+ return;
50700+}
50701+
50702+int
50703+gr_set_acls(const int type)
50704+{
50705+ return 0;
50706+}
50707+
50708+int
50709+gr_check_hidden_task(const struct task_struct *tsk)
50710+{
50711+ return 0;
50712+}
50713+
50714+int
50715+gr_check_protected_task(const struct task_struct *task)
50716+{
50717+ return 0;
50718+}
50719+
50720+int
50721+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
50722+{
50723+ return 0;
50724+}
50725+
50726+void
50727+gr_copy_label(struct task_struct *tsk)
50728+{
50729+ return;
50730+}
50731+
50732+void
50733+gr_set_pax_flags(struct task_struct *task)
50734+{
50735+ return;
50736+}
50737+
50738+int
50739+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
50740+ const int unsafe_share)
50741+{
50742+ return 0;
50743+}
50744+
50745+void
50746+gr_handle_delete(const ino_t ino, const dev_t dev)
50747+{
50748+ return;
50749+}
50750+
50751+void
50752+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50753+{
50754+ return;
50755+}
50756+
50757+void
50758+gr_handle_crash(struct task_struct *task, const int sig)
50759+{
50760+ return;
50761+}
50762+
50763+int
50764+gr_check_crash_exec(const struct file *filp)
50765+{
50766+ return 0;
50767+}
50768+
50769+int
50770+gr_check_crash_uid(const uid_t uid)
50771+{
50772+ return 0;
50773+}
50774+
50775+void
50776+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50777+ struct dentry *old_dentry,
50778+ struct dentry *new_dentry,
50779+ struct vfsmount *mnt, const __u8 replace)
50780+{
50781+ return;
50782+}
50783+
50784+int
50785+gr_search_socket(const int family, const int type, const int protocol)
50786+{
50787+ return 1;
50788+}
50789+
50790+int
50791+gr_search_connectbind(const int mode, const struct socket *sock,
50792+ const struct sockaddr_in *addr)
50793+{
50794+ return 0;
50795+}
50796+
50797+int
50798+gr_is_capable(const int cap)
50799+{
50800+ return 1;
50801+}
50802+
50803+int
50804+gr_is_capable_nolog(const int cap)
50805+{
50806+ return 1;
50807+}
50808+
50809+void
50810+gr_handle_alertkill(struct task_struct *task)
50811+{
50812+ return;
50813+}
50814+
50815+__u32
50816+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
50817+{
50818+ return 1;
50819+}
50820+
50821+__u32
50822+gr_acl_handle_hidden_file(const struct dentry * dentry,
50823+ const struct vfsmount * mnt)
50824+{
50825+ return 1;
50826+}
50827+
50828+__u32
50829+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
50830+ const int fmode)
50831+{
50832+ return 1;
50833+}
50834+
50835+__u32
50836+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
50837+{
50838+ return 1;
50839+}
50840+
50841+__u32
50842+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
50843+{
50844+ return 1;
50845+}
50846+
50847+int
50848+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
50849+ unsigned int *vm_flags)
50850+{
50851+ return 1;
50852+}
50853+
50854+__u32
50855+gr_acl_handle_truncate(const struct dentry * dentry,
50856+ const struct vfsmount * mnt)
50857+{
50858+ return 1;
50859+}
50860+
50861+__u32
50862+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
50863+{
50864+ return 1;
50865+}
50866+
50867+__u32
50868+gr_acl_handle_access(const struct dentry * dentry,
50869+ const struct vfsmount * mnt, const int fmode)
50870+{
50871+ return 1;
50872+}
50873+
50874+__u32
50875+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
50876+ mode_t mode)
50877+{
50878+ return 1;
50879+}
50880+
50881+__u32
50882+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
50883+ mode_t mode)
50884+{
50885+ return 1;
50886+}
50887+
50888+__u32
50889+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
50890+{
50891+ return 1;
50892+}
50893+
50894+__u32
50895+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
50896+{
50897+ return 1;
50898+}
50899+
50900+void
50901+grsecurity_init(void)
50902+{
50903+ return;
50904+}
50905+
50906+__u32
50907+gr_acl_handle_mknod(const struct dentry * new_dentry,
50908+ const struct dentry * parent_dentry,
50909+ const struct vfsmount * parent_mnt,
50910+ const int mode)
50911+{
50912+ return 1;
50913+}
50914+
50915+__u32
50916+gr_acl_handle_mkdir(const struct dentry * new_dentry,
50917+ const struct dentry * parent_dentry,
50918+ const struct vfsmount * parent_mnt)
50919+{
50920+ return 1;
50921+}
50922+
50923+__u32
50924+gr_acl_handle_symlink(const struct dentry * new_dentry,
50925+ const struct dentry * parent_dentry,
50926+ const struct vfsmount * parent_mnt, const char *from)
50927+{
50928+ return 1;
50929+}
50930+
50931+__u32
50932+gr_acl_handle_link(const struct dentry * new_dentry,
50933+ const struct dentry * parent_dentry,
50934+ const struct vfsmount * parent_mnt,
50935+ const struct dentry * old_dentry,
50936+ const struct vfsmount * old_mnt, const char *to)
50937+{
50938+ return 1;
50939+}
50940+
50941+int
50942+gr_acl_handle_rename(const struct dentry *new_dentry,
50943+ const struct dentry *parent_dentry,
50944+ const struct vfsmount *parent_mnt,
50945+ const struct dentry *old_dentry,
50946+ const struct inode *old_parent_inode,
50947+ const struct vfsmount *old_mnt, const char *newname)
50948+{
50949+ return 0;
50950+}
50951+
50952+int
50953+gr_acl_handle_filldir(const struct file *file, const char *name,
50954+ const int namelen, const ino_t ino)
50955+{
50956+ return 1;
50957+}
50958+
50959+int
50960+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
50961+ const time_t shm_createtime, const uid_t cuid, const int shmid)
50962+{
50963+ return 1;
50964+}
50965+
50966+int
50967+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
50968+{
50969+ return 0;
50970+}
50971+
50972+int
50973+gr_search_accept(const struct socket *sock)
50974+{
50975+ return 0;
50976+}
50977+
50978+int
50979+gr_search_listen(const struct socket *sock)
50980+{
50981+ return 0;
50982+}
50983+
50984+int
50985+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
50986+{
50987+ return 0;
50988+}
50989+
50990+__u32
50991+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
50992+{
50993+ return 1;
50994+}
50995+
50996+__u32
50997+gr_acl_handle_creat(const struct dentry * dentry,
50998+ const struct dentry * p_dentry,
50999+ const struct vfsmount * p_mnt, const int fmode,
51000+ const int imode)
51001+{
51002+ return 1;
51003+}
51004+
51005+void
51006+gr_acl_handle_exit(void)
51007+{
51008+ return;
51009+}
51010+
51011+int
51012+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51013+{
51014+ return 1;
51015+}
51016+
51017+void
51018+gr_set_role_label(const uid_t uid, const gid_t gid)
51019+{
51020+ return;
51021+}
51022+
51023+int
51024+gr_acl_handle_procpidmem(const struct task_struct *task)
51025+{
51026+ return 0;
51027+}
51028+
51029+int
51030+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
51031+{
51032+ return 0;
51033+}
51034+
51035+int
51036+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
51037+{
51038+ return 0;
51039+}
51040+
51041+void
51042+gr_set_kernel_label(struct task_struct *task)
51043+{
51044+ return;
51045+}
51046+
51047+int
51048+gr_check_user_change(int real, int effective, int fs)
51049+{
51050+ return 0;
51051+}
51052+
51053+int
51054+gr_check_group_change(int real, int effective, int fs)
51055+{
51056+ return 0;
51057+}
51058+
51059+int gr_acl_enable_at_secure(void)
51060+{
51061+ return 0;
51062+}
51063+
51064+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
51065+{
51066+ return dentry->d_inode->i_sb->s_dev;
51067+}
51068+
51069+EXPORT_SYMBOL(gr_is_capable);
51070+EXPORT_SYMBOL(gr_is_capable_nolog);
51071+EXPORT_SYMBOL(gr_learn_resource);
51072+EXPORT_SYMBOL(gr_set_kernel_label);
51073+#ifdef CONFIG_SECURITY
51074+EXPORT_SYMBOL(gr_check_user_change);
51075+EXPORT_SYMBOL(gr_check_group_change);
51076+#endif
51077diff -urNp linux-2.6.32.43/grsecurity/grsec_exec.c linux-2.6.32.43/grsecurity/grsec_exec.c
51078--- linux-2.6.32.43/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
51079+++ linux-2.6.32.43/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
51080@@ -0,0 +1,148 @@
51081+#include <linux/kernel.h>
51082+#include <linux/sched.h>
51083+#include <linux/file.h>
51084+#include <linux/binfmts.h>
51085+#include <linux/smp_lock.h>
51086+#include <linux/fs.h>
51087+#include <linux/types.h>
51088+#include <linux/grdefs.h>
51089+#include <linux/grinternal.h>
51090+#include <linux/capability.h>
51091+#include <linux/compat.h>
51092+
51093+#include <asm/uaccess.h>
51094+
51095+#ifdef CONFIG_GRKERNSEC_EXECLOG
51096+static char gr_exec_arg_buf[132];
51097+static DEFINE_MUTEX(gr_exec_arg_mutex);
51098+#endif
51099+
51100+int
51101+gr_handle_nproc(void)
51102+{
51103+#ifdef CONFIG_GRKERNSEC_EXECVE
51104+ const struct cred *cred = current_cred();
51105+ if (grsec_enable_execve && cred->user &&
51106+ (atomic_read(&cred->user->processes) >
51107+ current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
51108+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
51109+ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
51110+ return -EAGAIN;
51111+ }
51112+#endif
51113+ return 0;
51114+}
51115+
51116+void
51117+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
51118+{
51119+#ifdef CONFIG_GRKERNSEC_EXECLOG
51120+ char *grarg = gr_exec_arg_buf;
51121+ unsigned int i, x, execlen = 0;
51122+ char c;
51123+
51124+ if (!((grsec_enable_execlog && grsec_enable_group &&
51125+ in_group_p(grsec_audit_gid))
51126+ || (grsec_enable_execlog && !grsec_enable_group)))
51127+ return;
51128+
51129+ mutex_lock(&gr_exec_arg_mutex);
51130+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
51131+
51132+ if (unlikely(argv == NULL))
51133+ goto log;
51134+
51135+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
51136+ const char __user *p;
51137+ unsigned int len;
51138+
51139+ if (copy_from_user(&p, argv + i, sizeof(p)))
51140+ goto log;
51141+ if (!p)
51142+ goto log;
51143+ len = strnlen_user(p, 128 - execlen);
51144+ if (len > 128 - execlen)
51145+ len = 128 - execlen;
51146+ else if (len > 0)
51147+ len--;
51148+ if (copy_from_user(grarg + execlen, p, len))
51149+ goto log;
51150+
51151+ /* rewrite unprintable characters */
51152+ for (x = 0; x < len; x++) {
51153+ c = *(grarg + execlen + x);
51154+ if (c < 32 || c > 126)
51155+ *(grarg + execlen + x) = ' ';
51156+ }
51157+
51158+ execlen += len;
51159+ *(grarg + execlen) = ' ';
51160+ *(grarg + execlen + 1) = '\0';
51161+ execlen++;
51162+ }
51163+
51164+ log:
51165+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51166+ bprm->file->f_path.mnt, grarg);
51167+ mutex_unlock(&gr_exec_arg_mutex);
51168+#endif
51169+ return;
51170+}
51171+
51172+#ifdef CONFIG_COMPAT
51173+void
51174+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
51175+{
51176+#ifdef CONFIG_GRKERNSEC_EXECLOG
51177+ char *grarg = gr_exec_arg_buf;
51178+ unsigned int i, x, execlen = 0;
51179+ char c;
51180+
51181+ if (!((grsec_enable_execlog && grsec_enable_group &&
51182+ in_group_p(grsec_audit_gid))
51183+ || (grsec_enable_execlog && !grsec_enable_group)))
51184+ return;
51185+
51186+ mutex_lock(&gr_exec_arg_mutex);
51187+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
51188+
51189+ if (unlikely(argv == NULL))
51190+ goto log;
51191+
51192+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
51193+ compat_uptr_t p;
51194+ unsigned int len;
51195+
51196+ if (get_user(p, argv + i))
51197+ goto log;
51198+ len = strnlen_user(compat_ptr(p), 128 - execlen);
51199+ if (len > 128 - execlen)
51200+ len = 128 - execlen;
51201+ else if (len > 0)
51202+ len--;
51203+ else
51204+ goto log;
51205+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
51206+ goto log;
51207+
51208+ /* rewrite unprintable characters */
51209+ for (x = 0; x < len; x++) {
51210+ c = *(grarg + execlen + x);
51211+ if (c < 32 || c > 126)
51212+ *(grarg + execlen + x) = ' ';
51213+ }
51214+
51215+ execlen += len;
51216+ *(grarg + execlen) = ' ';
51217+ *(grarg + execlen + 1) = '\0';
51218+ execlen++;
51219+ }
51220+
51221+ log:
51222+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
51223+ bprm->file->f_path.mnt, grarg);
51224+ mutex_unlock(&gr_exec_arg_mutex);
51225+#endif
51226+ return;
51227+}
51228+#endif
51229diff -urNp linux-2.6.32.43/grsecurity/grsec_fifo.c linux-2.6.32.43/grsecurity/grsec_fifo.c
51230--- linux-2.6.32.43/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
51231+++ linux-2.6.32.43/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
51232@@ -0,0 +1,24 @@
51233+#include <linux/kernel.h>
51234+#include <linux/sched.h>
51235+#include <linux/fs.h>
51236+#include <linux/file.h>
51237+#include <linux/grinternal.h>
51238+
51239+int
51240+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
51241+ const struct dentry *dir, const int flag, const int acc_mode)
51242+{
51243+#ifdef CONFIG_GRKERNSEC_FIFO
51244+ const struct cred *cred = current_cred();
51245+
51246+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
51247+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
51248+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
51249+ (cred->fsuid != dentry->d_inode->i_uid)) {
51250+ if (!inode_permission(dentry->d_inode, acc_mode))
51251+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
51252+ return -EACCES;
51253+ }
51254+#endif
51255+ return 0;
51256+}
51257diff -urNp linux-2.6.32.43/grsecurity/grsec_fork.c linux-2.6.32.43/grsecurity/grsec_fork.c
51258--- linux-2.6.32.43/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
51259+++ linux-2.6.32.43/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
51260@@ -0,0 +1,23 @@
51261+#include <linux/kernel.h>
51262+#include <linux/sched.h>
51263+#include <linux/grsecurity.h>
51264+#include <linux/grinternal.h>
51265+#include <linux/errno.h>
51266+
51267+void
51268+gr_log_forkfail(const int retval)
51269+{
51270+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51271+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
51272+ switch (retval) {
51273+ case -EAGAIN:
51274+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
51275+ break;
51276+ case -ENOMEM:
51277+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
51278+ break;
51279+ }
51280+ }
51281+#endif
51282+ return;
51283+}
51284diff -urNp linux-2.6.32.43/grsecurity/grsec_init.c linux-2.6.32.43/grsecurity/grsec_init.c
51285--- linux-2.6.32.43/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
51286+++ linux-2.6.32.43/grsecurity/grsec_init.c 2011-06-29 19:35:26.000000000 -0400
51287@@ -0,0 +1,274 @@
51288+#include <linux/kernel.h>
51289+#include <linux/sched.h>
51290+#include <linux/mm.h>
51291+#include <linux/smp_lock.h>
51292+#include <linux/gracl.h>
51293+#include <linux/slab.h>
51294+#include <linux/vmalloc.h>
51295+#include <linux/percpu.h>
51296+#include <linux/module.h>
51297+
51298+int grsec_enable_brute;
51299+int grsec_enable_link;
51300+int grsec_enable_dmesg;
51301+int grsec_enable_harden_ptrace;
51302+int grsec_enable_fifo;
51303+int grsec_enable_execve;
51304+int grsec_enable_execlog;
51305+int grsec_enable_signal;
51306+int grsec_enable_forkfail;
51307+int grsec_enable_audit_ptrace;
51308+int grsec_enable_time;
51309+int grsec_enable_audit_textrel;
51310+int grsec_enable_group;
51311+int grsec_audit_gid;
51312+int grsec_enable_chdir;
51313+int grsec_enable_mount;
51314+int grsec_enable_rofs;
51315+int grsec_enable_chroot_findtask;
51316+int grsec_enable_chroot_mount;
51317+int grsec_enable_chroot_shmat;
51318+int grsec_enable_chroot_fchdir;
51319+int grsec_enable_chroot_double;
51320+int grsec_enable_chroot_pivot;
51321+int grsec_enable_chroot_chdir;
51322+int grsec_enable_chroot_chmod;
51323+int grsec_enable_chroot_mknod;
51324+int grsec_enable_chroot_nice;
51325+int grsec_enable_chroot_execlog;
51326+int grsec_enable_chroot_caps;
51327+int grsec_enable_chroot_sysctl;
51328+int grsec_enable_chroot_unix;
51329+int grsec_enable_tpe;
51330+int grsec_tpe_gid;
51331+int grsec_enable_blackhole;
51332+#ifdef CONFIG_IPV6_MODULE
51333+EXPORT_SYMBOL(grsec_enable_blackhole);
51334+#endif
51335+int grsec_lastack_retries;
51336+int grsec_enable_tpe_all;
51337+int grsec_enable_tpe_invert;
51338+int grsec_enable_socket_all;
51339+int grsec_socket_all_gid;
51340+int grsec_enable_socket_client;
51341+int grsec_socket_client_gid;
51342+int grsec_enable_socket_server;
51343+int grsec_socket_server_gid;
51344+int grsec_resource_logging;
51345+int grsec_disable_privio;
51346+int grsec_enable_log_rwxmaps;
51347+int grsec_lock;
51348+
51349+DEFINE_SPINLOCK(grsec_alert_lock);
51350+unsigned long grsec_alert_wtime = 0;
51351+unsigned long grsec_alert_fyet = 0;
51352+
51353+DEFINE_SPINLOCK(grsec_audit_lock);
51354+
51355+DEFINE_RWLOCK(grsec_exec_file_lock);
51356+
51357+char *gr_shared_page[4];
51358+
51359+char *gr_alert_log_fmt;
51360+char *gr_audit_log_fmt;
51361+char *gr_alert_log_buf;
51362+char *gr_audit_log_buf;
51363+
51364+extern struct gr_arg *gr_usermode;
51365+extern unsigned char *gr_system_salt;
51366+extern unsigned char *gr_system_sum;
51367+
51368+void __init
51369+grsecurity_init(void)
51370+{
51371+ int j;
51372+ /* create the per-cpu shared pages */
51373+
51374+#ifdef CONFIG_X86
51375+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
51376+#endif
51377+
51378+ for (j = 0; j < 4; j++) {
51379+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
51380+ if (gr_shared_page[j] == NULL) {
51381+ panic("Unable to allocate grsecurity shared page");
51382+ return;
51383+ }
51384+ }
51385+
51386+ /* allocate log buffers */
51387+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
51388+ if (!gr_alert_log_fmt) {
51389+ panic("Unable to allocate grsecurity alert log format buffer");
51390+ return;
51391+ }
51392+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
51393+ if (!gr_audit_log_fmt) {
51394+ panic("Unable to allocate grsecurity audit log format buffer");
51395+ return;
51396+ }
51397+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51398+ if (!gr_alert_log_buf) {
51399+ panic("Unable to allocate grsecurity alert log buffer");
51400+ return;
51401+ }
51402+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
51403+ if (!gr_audit_log_buf) {
51404+ panic("Unable to allocate grsecurity audit log buffer");
51405+ return;
51406+ }
51407+
51408+ /* allocate memory for authentication structure */
51409+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
51410+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
51411+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
51412+
51413+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
51414+ panic("Unable to allocate grsecurity authentication structure");
51415+ return;
51416+ }
51417+
51418+
51419+#ifdef CONFIG_GRKERNSEC_IO
51420+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
51421+ grsec_disable_privio = 1;
51422+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51423+ grsec_disable_privio = 1;
51424+#else
51425+ grsec_disable_privio = 0;
51426+#endif
51427+#endif
51428+
51429+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
51430+ /* for backward compatibility, tpe_invert always defaults to on if
51431+ enabled in the kernel
51432+ */
51433+ grsec_enable_tpe_invert = 1;
51434+#endif
51435+
51436+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
51437+#ifndef CONFIG_GRKERNSEC_SYSCTL
51438+ grsec_lock = 1;
51439+#endif
51440+
51441+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
51442+ grsec_enable_audit_textrel = 1;
51443+#endif
51444+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
51445+ grsec_enable_log_rwxmaps = 1;
51446+#endif
51447+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
51448+ grsec_enable_group = 1;
51449+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
51450+#endif
51451+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
51452+ grsec_enable_chdir = 1;
51453+#endif
51454+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51455+ grsec_enable_harden_ptrace = 1;
51456+#endif
51457+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51458+ grsec_enable_mount = 1;
51459+#endif
51460+#ifdef CONFIG_GRKERNSEC_LINK
51461+ grsec_enable_link = 1;
51462+#endif
51463+#ifdef CONFIG_GRKERNSEC_BRUTE
51464+ grsec_enable_brute = 1;
51465+#endif
51466+#ifdef CONFIG_GRKERNSEC_DMESG
51467+ grsec_enable_dmesg = 1;
51468+#endif
51469+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
51470+ grsec_enable_blackhole = 1;
51471+ grsec_lastack_retries = 4;
51472+#endif
51473+#ifdef CONFIG_GRKERNSEC_FIFO
51474+ grsec_enable_fifo = 1;
51475+#endif
51476+#ifdef CONFIG_GRKERNSEC_EXECVE
51477+ grsec_enable_execve = 1;
51478+#endif
51479+#ifdef CONFIG_GRKERNSEC_EXECLOG
51480+ grsec_enable_execlog = 1;
51481+#endif
51482+#ifdef CONFIG_GRKERNSEC_SIGNAL
51483+ grsec_enable_signal = 1;
51484+#endif
51485+#ifdef CONFIG_GRKERNSEC_FORKFAIL
51486+ grsec_enable_forkfail = 1;
51487+#endif
51488+#ifdef CONFIG_GRKERNSEC_TIME
51489+ grsec_enable_time = 1;
51490+#endif
51491+#ifdef CONFIG_GRKERNSEC_RESLOG
51492+ grsec_resource_logging = 1;
51493+#endif
51494+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
51495+ grsec_enable_chroot_findtask = 1;
51496+#endif
51497+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
51498+ grsec_enable_chroot_unix = 1;
51499+#endif
51500+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
51501+ grsec_enable_chroot_mount = 1;
51502+#endif
51503+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
51504+ grsec_enable_chroot_fchdir = 1;
51505+#endif
51506+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
51507+ grsec_enable_chroot_shmat = 1;
51508+#endif
51509+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
51510+ grsec_enable_audit_ptrace = 1;
51511+#endif
51512+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
51513+ grsec_enable_chroot_double = 1;
51514+#endif
51515+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
51516+ grsec_enable_chroot_pivot = 1;
51517+#endif
51518+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
51519+ grsec_enable_chroot_chdir = 1;
51520+#endif
51521+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
51522+ grsec_enable_chroot_chmod = 1;
51523+#endif
51524+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
51525+ grsec_enable_chroot_mknod = 1;
51526+#endif
51527+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
51528+ grsec_enable_chroot_nice = 1;
51529+#endif
51530+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
51531+ grsec_enable_chroot_execlog = 1;
51532+#endif
51533+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
51534+ grsec_enable_chroot_caps = 1;
51535+#endif
51536+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
51537+ grsec_enable_chroot_sysctl = 1;
51538+#endif
51539+#ifdef CONFIG_GRKERNSEC_TPE
51540+ grsec_enable_tpe = 1;
51541+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
51542+#ifdef CONFIG_GRKERNSEC_TPE_ALL
51543+ grsec_enable_tpe_all = 1;
51544+#endif
51545+#endif
51546+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
51547+ grsec_enable_socket_all = 1;
51548+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
51549+#endif
51550+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
51551+ grsec_enable_socket_client = 1;
51552+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
51553+#endif
51554+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
51555+ grsec_enable_socket_server = 1;
51556+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
51557+#endif
51558+#endif
51559+
51560+ return;
51561+}
51562diff -urNp linux-2.6.32.43/grsecurity/grsec_link.c linux-2.6.32.43/grsecurity/grsec_link.c
51563--- linux-2.6.32.43/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
51564+++ linux-2.6.32.43/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
51565@@ -0,0 +1,43 @@
51566+#include <linux/kernel.h>
51567+#include <linux/sched.h>
51568+#include <linux/fs.h>
51569+#include <linux/file.h>
51570+#include <linux/grinternal.h>
51571+
51572+int
51573+gr_handle_follow_link(const struct inode *parent,
51574+ const struct inode *inode,
51575+ const struct dentry *dentry, const struct vfsmount *mnt)
51576+{
51577+#ifdef CONFIG_GRKERNSEC_LINK
51578+ const struct cred *cred = current_cred();
51579+
51580+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
51581+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
51582+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
51583+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
51584+ return -EACCES;
51585+ }
51586+#endif
51587+ return 0;
51588+}
51589+
51590+int
51591+gr_handle_hardlink(const struct dentry *dentry,
51592+ const struct vfsmount *mnt,
51593+ struct inode *inode, const int mode, const char *to)
51594+{
51595+#ifdef CONFIG_GRKERNSEC_LINK
51596+ const struct cred *cred = current_cred();
51597+
51598+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
51599+ (!S_ISREG(mode) || (mode & S_ISUID) ||
51600+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
51601+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
51602+ !capable(CAP_FOWNER) && cred->uid) {
51603+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
51604+ return -EPERM;
51605+ }
51606+#endif
51607+ return 0;
51608+}
51609diff -urNp linux-2.6.32.43/grsecurity/grsec_log.c linux-2.6.32.43/grsecurity/grsec_log.c
51610--- linux-2.6.32.43/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
51611+++ linux-2.6.32.43/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
51612@@ -0,0 +1,310 @@
51613+#include <linux/kernel.h>
51614+#include <linux/sched.h>
51615+#include <linux/file.h>
51616+#include <linux/tty.h>
51617+#include <linux/fs.h>
51618+#include <linux/grinternal.h>
51619+
51620+#ifdef CONFIG_TREE_PREEMPT_RCU
51621+#define DISABLE_PREEMPT() preempt_disable()
51622+#define ENABLE_PREEMPT() preempt_enable()
51623+#else
51624+#define DISABLE_PREEMPT()
51625+#define ENABLE_PREEMPT()
51626+#endif
51627+
51628+#define BEGIN_LOCKS(x) \
51629+ DISABLE_PREEMPT(); \
51630+ rcu_read_lock(); \
51631+ read_lock(&tasklist_lock); \
51632+ read_lock(&grsec_exec_file_lock); \
51633+ if (x != GR_DO_AUDIT) \
51634+ spin_lock(&grsec_alert_lock); \
51635+ else \
51636+ spin_lock(&grsec_audit_lock)
51637+
51638+#define END_LOCKS(x) \
51639+ if (x != GR_DO_AUDIT) \
51640+ spin_unlock(&grsec_alert_lock); \
51641+ else \
51642+ spin_unlock(&grsec_audit_lock); \
51643+ read_unlock(&grsec_exec_file_lock); \
51644+ read_unlock(&tasklist_lock); \
51645+ rcu_read_unlock(); \
51646+ ENABLE_PREEMPT(); \
51647+ if (x == GR_DONT_AUDIT) \
51648+ gr_handle_alertkill(current)
51649+
51650+enum {
51651+ FLOODING,
51652+ NO_FLOODING
51653+};
51654+
51655+extern char *gr_alert_log_fmt;
51656+extern char *gr_audit_log_fmt;
51657+extern char *gr_alert_log_buf;
51658+extern char *gr_audit_log_buf;
51659+
51660+static int gr_log_start(int audit)
51661+{
51662+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
51663+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
51664+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51665+
51666+ if (audit == GR_DO_AUDIT)
51667+ goto set_fmt;
51668+
51669+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
51670+ grsec_alert_wtime = jiffies;
51671+ grsec_alert_fyet = 0;
51672+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
51673+ grsec_alert_fyet++;
51674+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
51675+ grsec_alert_wtime = jiffies;
51676+ grsec_alert_fyet++;
51677+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
51678+ return FLOODING;
51679+ } else return FLOODING;
51680+
51681+set_fmt:
51682+ memset(buf, 0, PAGE_SIZE);
51683+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
51684+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
51685+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51686+ } else if (current->signal->curr_ip) {
51687+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
51688+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
51689+ } else if (gr_acl_is_enabled()) {
51690+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
51691+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
51692+ } else {
51693+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
51694+ strcpy(buf, fmt);
51695+ }
51696+
51697+ return NO_FLOODING;
51698+}
51699+
51700+static void gr_log_middle(int audit, const char *msg, va_list ap)
51701+ __attribute__ ((format (printf, 2, 0)));
51702+
51703+static void gr_log_middle(int audit, const char *msg, va_list ap)
51704+{
51705+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51706+ unsigned int len = strlen(buf);
51707+
51708+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51709+
51710+ return;
51711+}
51712+
51713+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51714+ __attribute__ ((format (printf, 2, 3)));
51715+
51716+static void gr_log_middle_varargs(int audit, const char *msg, ...)
51717+{
51718+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51719+ unsigned int len = strlen(buf);
51720+ va_list ap;
51721+
51722+ va_start(ap, msg);
51723+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
51724+ va_end(ap);
51725+
51726+ return;
51727+}
51728+
51729+static void gr_log_end(int audit)
51730+{
51731+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
51732+ unsigned int len = strlen(buf);
51733+
51734+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
51735+ printk("%s\n", buf);
51736+
51737+ return;
51738+}
51739+
51740+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
51741+{
51742+ int logtype;
51743+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
51744+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
51745+ void *voidptr = NULL;
51746+ int num1 = 0, num2 = 0;
51747+ unsigned long ulong1 = 0, ulong2 = 0;
51748+ struct dentry *dentry = NULL;
51749+ struct vfsmount *mnt = NULL;
51750+ struct file *file = NULL;
51751+ struct task_struct *task = NULL;
51752+ const struct cred *cred, *pcred;
51753+ va_list ap;
51754+
51755+ BEGIN_LOCKS(audit);
51756+ logtype = gr_log_start(audit);
51757+ if (logtype == FLOODING) {
51758+ END_LOCKS(audit);
51759+ return;
51760+ }
51761+ va_start(ap, argtypes);
51762+ switch (argtypes) {
51763+ case GR_TTYSNIFF:
51764+ task = va_arg(ap, struct task_struct *);
51765+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
51766+ break;
51767+ case GR_SYSCTL_HIDDEN:
51768+ str1 = va_arg(ap, char *);
51769+ gr_log_middle_varargs(audit, msg, result, str1);
51770+ break;
51771+ case GR_RBAC:
51772+ dentry = va_arg(ap, struct dentry *);
51773+ mnt = va_arg(ap, struct vfsmount *);
51774+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
51775+ break;
51776+ case GR_RBAC_STR:
51777+ dentry = va_arg(ap, struct dentry *);
51778+ mnt = va_arg(ap, struct vfsmount *);
51779+ str1 = va_arg(ap, char *);
51780+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
51781+ break;
51782+ case GR_STR_RBAC:
51783+ str1 = va_arg(ap, char *);
51784+ dentry = va_arg(ap, struct dentry *);
51785+ mnt = va_arg(ap, struct vfsmount *);
51786+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
51787+ break;
51788+ case GR_RBAC_MODE2:
51789+ dentry = va_arg(ap, struct dentry *);
51790+ mnt = va_arg(ap, struct vfsmount *);
51791+ str1 = va_arg(ap, char *);
51792+ str2 = va_arg(ap, char *);
51793+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
51794+ break;
51795+ case GR_RBAC_MODE3:
51796+ dentry = va_arg(ap, struct dentry *);
51797+ mnt = va_arg(ap, struct vfsmount *);
51798+ str1 = va_arg(ap, char *);
51799+ str2 = va_arg(ap, char *);
51800+ str3 = va_arg(ap, char *);
51801+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
51802+ break;
51803+ case GR_FILENAME:
51804+ dentry = va_arg(ap, struct dentry *);
51805+ mnt = va_arg(ap, struct vfsmount *);
51806+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
51807+ break;
51808+ case GR_STR_FILENAME:
51809+ str1 = va_arg(ap, char *);
51810+ dentry = va_arg(ap, struct dentry *);
51811+ mnt = va_arg(ap, struct vfsmount *);
51812+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
51813+ break;
51814+ case GR_FILENAME_STR:
51815+ dentry = va_arg(ap, struct dentry *);
51816+ mnt = va_arg(ap, struct vfsmount *);
51817+ str1 = va_arg(ap, char *);
51818+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
51819+ break;
51820+ case GR_FILENAME_TWO_INT:
51821+ dentry = va_arg(ap, struct dentry *);
51822+ mnt = va_arg(ap, struct vfsmount *);
51823+ num1 = va_arg(ap, int);
51824+ num2 = va_arg(ap, int);
51825+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
51826+ break;
51827+ case GR_FILENAME_TWO_INT_STR:
51828+ dentry = va_arg(ap, struct dentry *);
51829+ mnt = va_arg(ap, struct vfsmount *);
51830+ num1 = va_arg(ap, int);
51831+ num2 = va_arg(ap, int);
51832+ str1 = va_arg(ap, char *);
51833+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
51834+ break;
51835+ case GR_TEXTREL:
51836+ file = va_arg(ap, struct file *);
51837+ ulong1 = va_arg(ap, unsigned long);
51838+ ulong2 = va_arg(ap, unsigned long);
51839+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
51840+ break;
51841+ case GR_PTRACE:
51842+ task = va_arg(ap, struct task_struct *);
51843+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
51844+ break;
51845+ case GR_RESOURCE:
51846+ task = va_arg(ap, struct task_struct *);
51847+ cred = __task_cred(task);
51848+ pcred = __task_cred(task->real_parent);
51849+ ulong1 = va_arg(ap, unsigned long);
51850+ str1 = va_arg(ap, char *);
51851+ ulong2 = va_arg(ap, unsigned long);
51852+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51853+ break;
51854+ case GR_CAP:
51855+ task = va_arg(ap, struct task_struct *);
51856+ cred = __task_cred(task);
51857+ pcred = __task_cred(task->real_parent);
51858+ str1 = va_arg(ap, char *);
51859+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51860+ break;
51861+ case GR_SIG:
51862+ str1 = va_arg(ap, char *);
51863+ voidptr = va_arg(ap, void *);
51864+ gr_log_middle_varargs(audit, msg, str1, voidptr);
51865+ break;
51866+ case GR_SIG2:
51867+ task = va_arg(ap, struct task_struct *);
51868+ cred = __task_cred(task);
51869+ pcred = __task_cred(task->real_parent);
51870+ num1 = va_arg(ap, int);
51871+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51872+ break;
51873+ case GR_CRASH1:
51874+ task = va_arg(ap, struct task_struct *);
51875+ cred = __task_cred(task);
51876+ pcred = __task_cred(task->real_parent);
51877+ ulong1 = va_arg(ap, unsigned long);
51878+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
51879+ break;
51880+ case GR_CRASH2:
51881+ task = va_arg(ap, struct task_struct *);
51882+ cred = __task_cred(task);
51883+ pcred = __task_cred(task->real_parent);
51884+ ulong1 = va_arg(ap, unsigned long);
51885+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
51886+ break;
51887+ case GR_RWXMAP:
51888+ file = va_arg(ap, struct file *);
51889+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
51890+ break;
51891+ case GR_PSACCT:
51892+ {
51893+ unsigned int wday, cday;
51894+ __u8 whr, chr;
51895+ __u8 wmin, cmin;
51896+ __u8 wsec, csec;
51897+ char cur_tty[64] = { 0 };
51898+ char parent_tty[64] = { 0 };
51899+
51900+ task = va_arg(ap, struct task_struct *);
51901+ wday = va_arg(ap, unsigned int);
51902+ cday = va_arg(ap, unsigned int);
51903+ whr = va_arg(ap, int);
51904+ chr = va_arg(ap, int);
51905+ wmin = va_arg(ap, int);
51906+ cmin = va_arg(ap, int);
51907+ wsec = va_arg(ap, int);
51908+ csec = va_arg(ap, int);
51909+ ulong1 = va_arg(ap, unsigned long);
51910+ cred = __task_cred(task);
51911+ pcred = __task_cred(task->real_parent);
51912+
51913+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
51914+ }
51915+ break;
51916+ default:
51917+ gr_log_middle(audit, msg, ap);
51918+ }
51919+ va_end(ap);
51920+ gr_log_end(audit);
51921+ END_LOCKS(audit);
51922+}
51923diff -urNp linux-2.6.32.43/grsecurity/grsec_mem.c linux-2.6.32.43/grsecurity/grsec_mem.c
51924--- linux-2.6.32.43/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
51925+++ linux-2.6.32.43/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
51926@@ -0,0 +1,33 @@
51927+#include <linux/kernel.h>
51928+#include <linux/sched.h>
51929+#include <linux/mm.h>
51930+#include <linux/mman.h>
51931+#include <linux/grinternal.h>
51932+
51933+void
51934+gr_handle_ioperm(void)
51935+{
51936+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
51937+ return;
51938+}
51939+
51940+void
51941+gr_handle_iopl(void)
51942+{
51943+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
51944+ return;
51945+}
51946+
51947+void
51948+gr_handle_mem_readwrite(u64 from, u64 to)
51949+{
51950+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
51951+ return;
51952+}
51953+
51954+void
51955+gr_handle_vm86(void)
51956+{
51957+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
51958+ return;
51959+}
51960diff -urNp linux-2.6.32.43/grsecurity/grsec_mount.c linux-2.6.32.43/grsecurity/grsec_mount.c
51961--- linux-2.6.32.43/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
51962+++ linux-2.6.32.43/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
51963@@ -0,0 +1,62 @@
51964+#include <linux/kernel.h>
51965+#include <linux/sched.h>
51966+#include <linux/mount.h>
51967+#include <linux/grsecurity.h>
51968+#include <linux/grinternal.h>
51969+
51970+void
51971+gr_log_remount(const char *devname, const int retval)
51972+{
51973+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51974+ if (grsec_enable_mount && (retval >= 0))
51975+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
51976+#endif
51977+ return;
51978+}
51979+
51980+void
51981+gr_log_unmount(const char *devname, const int retval)
51982+{
51983+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51984+ if (grsec_enable_mount && (retval >= 0))
51985+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
51986+#endif
51987+ return;
51988+}
51989+
51990+void
51991+gr_log_mount(const char *from, const char *to, const int retval)
51992+{
51993+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
51994+ if (grsec_enable_mount && (retval >= 0))
51995+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
51996+#endif
51997+ return;
51998+}
51999+
52000+int
52001+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
52002+{
52003+#ifdef CONFIG_GRKERNSEC_ROFS
52004+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
52005+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
52006+ return -EPERM;
52007+ } else
52008+ return 0;
52009+#endif
52010+ return 0;
52011+}
52012+
52013+int
52014+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
52015+{
52016+#ifdef CONFIG_GRKERNSEC_ROFS
52017+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
52018+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
52019+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
52020+ return -EPERM;
52021+ } else
52022+ return 0;
52023+#endif
52024+ return 0;
52025+}
52026diff -urNp linux-2.6.32.43/grsecurity/grsec_pax.c linux-2.6.32.43/grsecurity/grsec_pax.c
52027--- linux-2.6.32.43/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
52028+++ linux-2.6.32.43/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
52029@@ -0,0 +1,36 @@
52030+#include <linux/kernel.h>
52031+#include <linux/sched.h>
52032+#include <linux/mm.h>
52033+#include <linux/file.h>
52034+#include <linux/grinternal.h>
52035+#include <linux/grsecurity.h>
52036+
52037+void
52038+gr_log_textrel(struct vm_area_struct * vma)
52039+{
52040+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52041+ if (grsec_enable_audit_textrel)
52042+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
52043+#endif
52044+ return;
52045+}
52046+
52047+void
52048+gr_log_rwxmmap(struct file *file)
52049+{
52050+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52051+ if (grsec_enable_log_rwxmaps)
52052+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
52053+#endif
52054+ return;
52055+}
52056+
52057+void
52058+gr_log_rwxmprotect(struct file *file)
52059+{
52060+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52061+ if (grsec_enable_log_rwxmaps)
52062+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
52063+#endif
52064+ return;
52065+}
52066diff -urNp linux-2.6.32.43/grsecurity/grsec_ptrace.c linux-2.6.32.43/grsecurity/grsec_ptrace.c
52067--- linux-2.6.32.43/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
52068+++ linux-2.6.32.43/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
52069@@ -0,0 +1,14 @@
52070+#include <linux/kernel.h>
52071+#include <linux/sched.h>
52072+#include <linux/grinternal.h>
52073+#include <linux/grsecurity.h>
52074+
52075+void
52076+gr_audit_ptrace(struct task_struct *task)
52077+{
52078+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
52079+ if (grsec_enable_audit_ptrace)
52080+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
52081+#endif
52082+ return;
52083+}
52084diff -urNp linux-2.6.32.43/grsecurity/grsec_sig.c linux-2.6.32.43/grsecurity/grsec_sig.c
52085--- linux-2.6.32.43/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
52086+++ linux-2.6.32.43/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
52087@@ -0,0 +1,205 @@
52088+#include <linux/kernel.h>
52089+#include <linux/sched.h>
52090+#include <linux/delay.h>
52091+#include <linux/grsecurity.h>
52092+#include <linux/grinternal.h>
52093+#include <linux/hardirq.h>
52094+
52095+char *signames[] = {
52096+ [SIGSEGV] = "Segmentation fault",
52097+ [SIGILL] = "Illegal instruction",
52098+ [SIGABRT] = "Abort",
52099+ [SIGBUS] = "Invalid alignment/Bus error"
52100+};
52101+
52102+void
52103+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
52104+{
52105+#ifdef CONFIG_GRKERNSEC_SIGNAL
52106+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
52107+ (sig == SIGABRT) || (sig == SIGBUS))) {
52108+ if (t->pid == current->pid) {
52109+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
52110+ } else {
52111+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
52112+ }
52113+ }
52114+#endif
52115+ return;
52116+}
52117+
52118+int
52119+gr_handle_signal(const struct task_struct *p, const int sig)
52120+{
52121+#ifdef CONFIG_GRKERNSEC
52122+ if (current->pid > 1 && gr_check_protected_task(p)) {
52123+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
52124+ return -EPERM;
52125+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
52126+ return -EPERM;
52127+ }
52128+#endif
52129+ return 0;
52130+}
52131+
52132+#ifdef CONFIG_GRKERNSEC
52133+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
52134+
52135+int gr_fake_force_sig(int sig, struct task_struct *t)
52136+{
52137+ unsigned long int flags;
52138+ int ret, blocked, ignored;
52139+ struct k_sigaction *action;
52140+
52141+ spin_lock_irqsave(&t->sighand->siglock, flags);
52142+ action = &t->sighand->action[sig-1];
52143+ ignored = action->sa.sa_handler == SIG_IGN;
52144+ blocked = sigismember(&t->blocked, sig);
52145+ if (blocked || ignored) {
52146+ action->sa.sa_handler = SIG_DFL;
52147+ if (blocked) {
52148+ sigdelset(&t->blocked, sig);
52149+ recalc_sigpending_and_wake(t);
52150+ }
52151+ }
52152+ if (action->sa.sa_handler == SIG_DFL)
52153+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
52154+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
52155+
52156+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
52157+
52158+ return ret;
52159+}
52160+#endif
52161+
52162+#ifdef CONFIG_GRKERNSEC_BRUTE
52163+#define GR_USER_BAN_TIME (15 * 60)
52164+
52165+static int __get_dumpable(unsigned long mm_flags)
52166+{
52167+ int ret;
52168+
52169+ ret = mm_flags & MMF_DUMPABLE_MASK;
52170+ return (ret >= 2) ? 2 : ret;
52171+}
52172+#endif
52173+
52174+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
52175+{
52176+#ifdef CONFIG_GRKERNSEC_BRUTE
52177+ uid_t uid = 0;
52178+
52179+ if (!grsec_enable_brute)
52180+ return;
52181+
52182+ rcu_read_lock();
52183+ read_lock(&tasklist_lock);
52184+ read_lock(&grsec_exec_file_lock);
52185+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
52186+ p->real_parent->brute = 1;
52187+ else {
52188+ const struct cred *cred = __task_cred(p), *cred2;
52189+ struct task_struct *tsk, *tsk2;
52190+
52191+ if (!__get_dumpable(mm_flags) && cred->uid) {
52192+ struct user_struct *user;
52193+
52194+ uid = cred->uid;
52195+
52196+ /* this is put upon execution past expiration */
52197+ user = find_user(uid);
52198+ if (user == NULL)
52199+ goto unlock;
52200+ user->banned = 1;
52201+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
52202+ if (user->ban_expires == ~0UL)
52203+ user->ban_expires--;
52204+
52205+ do_each_thread(tsk2, tsk) {
52206+ cred2 = __task_cred(tsk);
52207+ if (tsk != p && cred2->uid == uid)
52208+ gr_fake_force_sig(SIGKILL, tsk);
52209+ } while_each_thread(tsk2, tsk);
52210+ }
52211+ }
52212+unlock:
52213+ read_unlock(&grsec_exec_file_lock);
52214+ read_unlock(&tasklist_lock);
52215+ rcu_read_unlock();
52216+
52217+ if (uid)
52218+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
52219+#endif
52220+ return;
52221+}
52222+
52223+void gr_handle_brute_check(void)
52224+{
52225+#ifdef CONFIG_GRKERNSEC_BRUTE
52226+ if (current->brute)
52227+ msleep(30 * 1000);
52228+#endif
52229+ return;
52230+}
52231+
52232+void gr_handle_kernel_exploit(void)
52233+{
52234+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
52235+ const struct cred *cred;
52236+ struct task_struct *tsk, *tsk2;
52237+ struct user_struct *user;
52238+ uid_t uid;
52239+
52240+ if (in_irq() || in_serving_softirq() || in_nmi())
52241+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
52242+
52243+ uid = current_uid();
52244+
52245+ if (uid == 0)
52246+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
52247+ else {
52248+ /* kill all the processes of this user, hold a reference
52249+ to their creds struct, and prevent them from creating
52250+ another process until system reset
52251+ */
52252+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
52253+ /* we intentionally leak this ref */
52254+ user = get_uid(current->cred->user);
52255+ if (user) {
52256+ user->banned = 1;
52257+ user->ban_expires = ~0UL;
52258+ }
52259+
52260+ read_lock(&tasklist_lock);
52261+ do_each_thread(tsk2, tsk) {
52262+ cred = __task_cred(tsk);
52263+ if (cred->uid == uid)
52264+ gr_fake_force_sig(SIGKILL, tsk);
52265+ } while_each_thread(tsk2, tsk);
52266+ read_unlock(&tasklist_lock);
52267+ }
52268+#endif
52269+}
52270+
52271+int __gr_process_user_ban(struct user_struct *user)
52272+{
52273+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52274+ if (unlikely(user->banned)) {
52275+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
52276+ user->banned = 0;
52277+ user->ban_expires = 0;
52278+ free_uid(user);
52279+ } else
52280+ return -EPERM;
52281+ }
52282+#endif
52283+ return 0;
52284+}
52285+
52286+int gr_process_user_ban(void)
52287+{
52288+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
52289+ return __gr_process_user_ban(current->cred->user);
52290+#endif
52291+ return 0;
52292+}
52293diff -urNp linux-2.6.32.43/grsecurity/grsec_sock.c linux-2.6.32.43/grsecurity/grsec_sock.c
52294--- linux-2.6.32.43/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
52295+++ linux-2.6.32.43/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
52296@@ -0,0 +1,275 @@
52297+#include <linux/kernel.h>
52298+#include <linux/module.h>
52299+#include <linux/sched.h>
52300+#include <linux/file.h>
52301+#include <linux/net.h>
52302+#include <linux/in.h>
52303+#include <linux/ip.h>
52304+#include <net/sock.h>
52305+#include <net/inet_sock.h>
52306+#include <linux/grsecurity.h>
52307+#include <linux/grinternal.h>
52308+#include <linux/gracl.h>
52309+
52310+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
52311+EXPORT_SYMBOL(gr_cap_rtnetlink);
52312+
52313+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
52314+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
52315+
52316+EXPORT_SYMBOL(gr_search_udp_recvmsg);
52317+EXPORT_SYMBOL(gr_search_udp_sendmsg);
52318+
52319+#ifdef CONFIG_UNIX_MODULE
52320+EXPORT_SYMBOL(gr_acl_handle_unix);
52321+EXPORT_SYMBOL(gr_acl_handle_mknod);
52322+EXPORT_SYMBOL(gr_handle_chroot_unix);
52323+EXPORT_SYMBOL(gr_handle_create);
52324+#endif
52325+
52326+#ifdef CONFIG_GRKERNSEC
52327+#define gr_conn_table_size 32749
52328+struct conn_table_entry {
52329+ struct conn_table_entry *next;
52330+ struct signal_struct *sig;
52331+};
52332+
52333+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
52334+DEFINE_SPINLOCK(gr_conn_table_lock);
52335+
52336+extern const char * gr_socktype_to_name(unsigned char type);
52337+extern const char * gr_proto_to_name(unsigned char proto);
52338+extern const char * gr_sockfamily_to_name(unsigned char family);
52339+
52340+static __inline__ int
52341+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
52342+{
52343+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
52344+}
52345+
52346+static __inline__ int
52347+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
52348+ __u16 sport, __u16 dport)
52349+{
52350+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
52351+ sig->gr_sport == sport && sig->gr_dport == dport))
52352+ return 1;
52353+ else
52354+ return 0;
52355+}
52356+
52357+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
52358+{
52359+ struct conn_table_entry **match;
52360+ unsigned int index;
52361+
52362+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52363+ sig->gr_sport, sig->gr_dport,
52364+ gr_conn_table_size);
52365+
52366+ newent->sig = sig;
52367+
52368+ match = &gr_conn_table[index];
52369+ newent->next = *match;
52370+ *match = newent;
52371+
52372+ return;
52373+}
52374+
52375+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
52376+{
52377+ struct conn_table_entry *match, *last = NULL;
52378+ unsigned int index;
52379+
52380+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
52381+ sig->gr_sport, sig->gr_dport,
52382+ gr_conn_table_size);
52383+
52384+ match = gr_conn_table[index];
52385+ while (match && !conn_match(match->sig,
52386+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
52387+ sig->gr_dport)) {
52388+ last = match;
52389+ match = match->next;
52390+ }
52391+
52392+ if (match) {
52393+ if (last)
52394+ last->next = match->next;
52395+ else
52396+ gr_conn_table[index] = NULL;
52397+ kfree(match);
52398+ }
52399+
52400+ return;
52401+}
52402+
52403+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
52404+ __u16 sport, __u16 dport)
52405+{
52406+ struct conn_table_entry *match;
52407+ unsigned int index;
52408+
52409+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
52410+
52411+ match = gr_conn_table[index];
52412+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
52413+ match = match->next;
52414+
52415+ if (match)
52416+ return match->sig;
52417+ else
52418+ return NULL;
52419+}
52420+
52421+#endif
52422+
52423+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
52424+{
52425+#ifdef CONFIG_GRKERNSEC
52426+ struct signal_struct *sig = task->signal;
52427+ struct conn_table_entry *newent;
52428+
52429+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
52430+ if (newent == NULL)
52431+ return;
52432+ /* no bh lock needed since we are called with bh disabled */
52433+ spin_lock(&gr_conn_table_lock);
52434+ gr_del_task_from_ip_table_nolock(sig);
52435+ sig->gr_saddr = inet->rcv_saddr;
52436+ sig->gr_daddr = inet->daddr;
52437+ sig->gr_sport = inet->sport;
52438+ sig->gr_dport = inet->dport;
52439+ gr_add_to_task_ip_table_nolock(sig, newent);
52440+ spin_unlock(&gr_conn_table_lock);
52441+#endif
52442+ return;
52443+}
52444+
52445+void gr_del_task_from_ip_table(struct task_struct *task)
52446+{
52447+#ifdef CONFIG_GRKERNSEC
52448+ spin_lock_bh(&gr_conn_table_lock);
52449+ gr_del_task_from_ip_table_nolock(task->signal);
52450+ spin_unlock_bh(&gr_conn_table_lock);
52451+#endif
52452+ return;
52453+}
52454+
52455+void
52456+gr_attach_curr_ip(const struct sock *sk)
52457+{
52458+#ifdef CONFIG_GRKERNSEC
52459+ struct signal_struct *p, *set;
52460+ const struct inet_sock *inet = inet_sk(sk);
52461+
52462+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
52463+ return;
52464+
52465+ set = current->signal;
52466+
52467+ spin_lock_bh(&gr_conn_table_lock);
52468+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
52469+ inet->dport, inet->sport);
52470+ if (unlikely(p != NULL)) {
52471+ set->curr_ip = p->curr_ip;
52472+ set->used_accept = 1;
52473+ gr_del_task_from_ip_table_nolock(p);
52474+ spin_unlock_bh(&gr_conn_table_lock);
52475+ return;
52476+ }
52477+ spin_unlock_bh(&gr_conn_table_lock);
52478+
52479+ set->curr_ip = inet->daddr;
52480+ set->used_accept = 1;
52481+#endif
52482+ return;
52483+}
52484+
52485+int
52486+gr_handle_sock_all(const int family, const int type, const int protocol)
52487+{
52488+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52489+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
52490+ (family != AF_UNIX)) {
52491+ if (family == AF_INET)
52492+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
52493+ else
52494+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
52495+ return -EACCES;
52496+ }
52497+#endif
52498+ return 0;
52499+}
52500+
52501+int
52502+gr_handle_sock_server(const struct sockaddr *sck)
52503+{
52504+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52505+ if (grsec_enable_socket_server &&
52506+ in_group_p(grsec_socket_server_gid) &&
52507+ sck && (sck->sa_family != AF_UNIX) &&
52508+ (sck->sa_family != AF_LOCAL)) {
52509+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52510+ return -EACCES;
52511+ }
52512+#endif
52513+ return 0;
52514+}
52515+
52516+int
52517+gr_handle_sock_server_other(const struct sock *sck)
52518+{
52519+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52520+ if (grsec_enable_socket_server &&
52521+ in_group_p(grsec_socket_server_gid) &&
52522+ sck && (sck->sk_family != AF_UNIX) &&
52523+ (sck->sk_family != AF_LOCAL)) {
52524+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
52525+ return -EACCES;
52526+ }
52527+#endif
52528+ return 0;
52529+}
52530+
52531+int
52532+gr_handle_sock_client(const struct sockaddr *sck)
52533+{
52534+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52535+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
52536+ sck && (sck->sa_family != AF_UNIX) &&
52537+ (sck->sa_family != AF_LOCAL)) {
52538+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
52539+ return -EACCES;
52540+ }
52541+#endif
52542+ return 0;
52543+}
52544+
52545+kernel_cap_t
52546+gr_cap_rtnetlink(struct sock *sock)
52547+{
52548+#ifdef CONFIG_GRKERNSEC
52549+ if (!gr_acl_is_enabled())
52550+ return current_cap();
52551+ else if (sock->sk_protocol == NETLINK_ISCSI &&
52552+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
52553+ gr_is_capable(CAP_SYS_ADMIN))
52554+ return current_cap();
52555+ else if (sock->sk_protocol == NETLINK_AUDIT &&
52556+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
52557+ gr_is_capable(CAP_AUDIT_WRITE) &&
52558+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
52559+ gr_is_capable(CAP_AUDIT_CONTROL))
52560+ return current_cap();
52561+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
52562+ ((sock->sk_protocol == NETLINK_ROUTE) ?
52563+ gr_is_capable_nolog(CAP_NET_ADMIN) :
52564+ gr_is_capable(CAP_NET_ADMIN)))
52565+ return current_cap();
52566+ else
52567+ return __cap_empty_set;
52568+#else
52569+ return current_cap();
52570+#endif
52571+}
52572diff -urNp linux-2.6.32.43/grsecurity/grsec_sysctl.c linux-2.6.32.43/grsecurity/grsec_sysctl.c
52573--- linux-2.6.32.43/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
52574+++ linux-2.6.32.43/grsecurity/grsec_sysctl.c 2011-06-29 19:37:19.000000000 -0400
52575@@ -0,0 +1,489 @@
52576+#include <linux/kernel.h>
52577+#include <linux/sched.h>
52578+#include <linux/sysctl.h>
52579+#include <linux/grsecurity.h>
52580+#include <linux/grinternal.h>
52581+
52582+int
52583+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
52584+{
52585+#ifdef CONFIG_GRKERNSEC_SYSCTL
52586+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
52587+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
52588+ return -EACCES;
52589+ }
52590+#endif
52591+ return 0;
52592+}
52593+
52594+#ifdef CONFIG_GRKERNSEC_ROFS
52595+static int __maybe_unused one = 1;
52596+#endif
52597+
52598+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
52599+ctl_table grsecurity_table[] = {
52600+#ifdef CONFIG_GRKERNSEC_SYSCTL
52601+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
52602+#ifdef CONFIG_GRKERNSEC_IO
52603+ {
52604+ .ctl_name = CTL_UNNUMBERED,
52605+ .procname = "disable_priv_io",
52606+ .data = &grsec_disable_privio,
52607+ .maxlen = sizeof(int),
52608+ .mode = 0600,
52609+ .proc_handler = &proc_dointvec,
52610+ },
52611+#endif
52612+#endif
52613+#ifdef CONFIG_GRKERNSEC_LINK
52614+ {
52615+ .ctl_name = CTL_UNNUMBERED,
52616+ .procname = "linking_restrictions",
52617+ .data = &grsec_enable_link,
52618+ .maxlen = sizeof(int),
52619+ .mode = 0600,
52620+ .proc_handler = &proc_dointvec,
52621+ },
52622+#endif
52623+#ifdef CONFIG_GRKERNSEC_BRUTE
52624+ {
52625+ .ctl_name = CTL_UNNUMBERED,
52626+ .procname = "deter_bruteforce",
52627+ .data = &grsec_enable_brute,
52628+ .maxlen = sizeof(int),
52629+ .mode = 0600,
52630+ .proc_handler = &proc_dointvec,
52631+ },
52632+#endif
52633+#ifdef CONFIG_GRKERNSEC_FIFO
52634+ {
52635+ .ctl_name = CTL_UNNUMBERED,
52636+ .procname = "fifo_restrictions",
52637+ .data = &grsec_enable_fifo,
52638+ .maxlen = sizeof(int),
52639+ .mode = 0600,
52640+ .proc_handler = &proc_dointvec,
52641+ },
52642+#endif
52643+#ifdef CONFIG_GRKERNSEC_EXECVE
52644+ {
52645+ .ctl_name = CTL_UNNUMBERED,
52646+ .procname = "execve_limiting",
52647+ .data = &grsec_enable_execve,
52648+ .maxlen = sizeof(int),
52649+ .mode = 0600,
52650+ .proc_handler = &proc_dointvec,
52651+ },
52652+#endif
52653+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
52654+ {
52655+ .ctl_name = CTL_UNNUMBERED,
52656+ .procname = "ip_blackhole",
52657+ .data = &grsec_enable_blackhole,
52658+ .maxlen = sizeof(int),
52659+ .mode = 0600,
52660+ .proc_handler = &proc_dointvec,
52661+ },
52662+ {
52663+ .ctl_name = CTL_UNNUMBERED,
52664+ .procname = "lastack_retries",
52665+ .data = &grsec_lastack_retries,
52666+ .maxlen = sizeof(int),
52667+ .mode = 0600,
52668+ .proc_handler = &proc_dointvec,
52669+ },
52670+#endif
52671+#ifdef CONFIG_GRKERNSEC_EXECLOG
52672+ {
52673+ .ctl_name = CTL_UNNUMBERED,
52674+ .procname = "exec_logging",
52675+ .data = &grsec_enable_execlog,
52676+ .maxlen = sizeof(int),
52677+ .mode = 0600,
52678+ .proc_handler = &proc_dointvec,
52679+ },
52680+#endif
52681+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
52682+ {
52683+ .ctl_name = CTL_UNNUMBERED,
52684+ .procname = "rwxmap_logging",
52685+ .data = &grsec_enable_log_rwxmaps,
52686+ .maxlen = sizeof(int),
52687+ .mode = 0600,
52688+ .proc_handler = &proc_dointvec,
52689+ },
52690+#endif
52691+#ifdef CONFIG_GRKERNSEC_SIGNAL
52692+ {
52693+ .ctl_name = CTL_UNNUMBERED,
52694+ .procname = "signal_logging",
52695+ .data = &grsec_enable_signal,
52696+ .maxlen = sizeof(int),
52697+ .mode = 0600,
52698+ .proc_handler = &proc_dointvec,
52699+ },
52700+#endif
52701+#ifdef CONFIG_GRKERNSEC_FORKFAIL
52702+ {
52703+ .ctl_name = CTL_UNNUMBERED,
52704+ .procname = "forkfail_logging",
52705+ .data = &grsec_enable_forkfail,
52706+ .maxlen = sizeof(int),
52707+ .mode = 0600,
52708+ .proc_handler = &proc_dointvec,
52709+ },
52710+#endif
52711+#ifdef CONFIG_GRKERNSEC_TIME
52712+ {
52713+ .ctl_name = CTL_UNNUMBERED,
52714+ .procname = "timechange_logging",
52715+ .data = &grsec_enable_time,
52716+ .maxlen = sizeof(int),
52717+ .mode = 0600,
52718+ .proc_handler = &proc_dointvec,
52719+ },
52720+#endif
52721+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
52722+ {
52723+ .ctl_name = CTL_UNNUMBERED,
52724+ .procname = "chroot_deny_shmat",
52725+ .data = &grsec_enable_chroot_shmat,
52726+ .maxlen = sizeof(int),
52727+ .mode = 0600,
52728+ .proc_handler = &proc_dointvec,
52729+ },
52730+#endif
52731+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
52732+ {
52733+ .ctl_name = CTL_UNNUMBERED,
52734+ .procname = "chroot_deny_unix",
52735+ .data = &grsec_enable_chroot_unix,
52736+ .maxlen = sizeof(int),
52737+ .mode = 0600,
52738+ .proc_handler = &proc_dointvec,
52739+ },
52740+#endif
52741+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
52742+ {
52743+ .ctl_name = CTL_UNNUMBERED,
52744+ .procname = "chroot_deny_mount",
52745+ .data = &grsec_enable_chroot_mount,
52746+ .maxlen = sizeof(int),
52747+ .mode = 0600,
52748+ .proc_handler = &proc_dointvec,
52749+ },
52750+#endif
52751+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
52752+ {
52753+ .ctl_name = CTL_UNNUMBERED,
52754+ .procname = "chroot_deny_fchdir",
52755+ .data = &grsec_enable_chroot_fchdir,
52756+ .maxlen = sizeof(int),
52757+ .mode = 0600,
52758+ .proc_handler = &proc_dointvec,
52759+ },
52760+#endif
52761+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
52762+ {
52763+ .ctl_name = CTL_UNNUMBERED,
52764+ .procname = "chroot_deny_chroot",
52765+ .data = &grsec_enable_chroot_double,
52766+ .maxlen = sizeof(int),
52767+ .mode = 0600,
52768+ .proc_handler = &proc_dointvec,
52769+ },
52770+#endif
52771+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
52772+ {
52773+ .ctl_name = CTL_UNNUMBERED,
52774+ .procname = "chroot_deny_pivot",
52775+ .data = &grsec_enable_chroot_pivot,
52776+ .maxlen = sizeof(int),
52777+ .mode = 0600,
52778+ .proc_handler = &proc_dointvec,
52779+ },
52780+#endif
52781+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
52782+ {
52783+ .ctl_name = CTL_UNNUMBERED,
52784+ .procname = "chroot_enforce_chdir",
52785+ .data = &grsec_enable_chroot_chdir,
52786+ .maxlen = sizeof(int),
52787+ .mode = 0600,
52788+ .proc_handler = &proc_dointvec,
52789+ },
52790+#endif
52791+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
52792+ {
52793+ .ctl_name = CTL_UNNUMBERED,
52794+ .procname = "chroot_deny_chmod",
52795+ .data = &grsec_enable_chroot_chmod,
52796+ .maxlen = sizeof(int),
52797+ .mode = 0600,
52798+ .proc_handler = &proc_dointvec,
52799+ },
52800+#endif
52801+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
52802+ {
52803+ .ctl_name = CTL_UNNUMBERED,
52804+ .procname = "chroot_deny_mknod",
52805+ .data = &grsec_enable_chroot_mknod,
52806+ .maxlen = sizeof(int),
52807+ .mode = 0600,
52808+ .proc_handler = &proc_dointvec,
52809+ },
52810+#endif
52811+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
52812+ {
52813+ .ctl_name = CTL_UNNUMBERED,
52814+ .procname = "chroot_restrict_nice",
52815+ .data = &grsec_enable_chroot_nice,
52816+ .maxlen = sizeof(int),
52817+ .mode = 0600,
52818+ .proc_handler = &proc_dointvec,
52819+ },
52820+#endif
52821+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
52822+ {
52823+ .ctl_name = CTL_UNNUMBERED,
52824+ .procname = "chroot_execlog",
52825+ .data = &grsec_enable_chroot_execlog,
52826+ .maxlen = sizeof(int),
52827+ .mode = 0600,
52828+ .proc_handler = &proc_dointvec,
52829+ },
52830+#endif
52831+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
52832+ {
52833+ .ctl_name = CTL_UNNUMBERED,
52834+ .procname = "chroot_caps",
52835+ .data = &grsec_enable_chroot_caps,
52836+ .maxlen = sizeof(int),
52837+ .mode = 0600,
52838+ .proc_handler = &proc_dointvec,
52839+ },
52840+#endif
52841+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
52842+ {
52843+ .ctl_name = CTL_UNNUMBERED,
52844+ .procname = "chroot_deny_sysctl",
52845+ .data = &grsec_enable_chroot_sysctl,
52846+ .maxlen = sizeof(int),
52847+ .mode = 0600,
52848+ .proc_handler = &proc_dointvec,
52849+ },
52850+#endif
52851+#ifdef CONFIG_GRKERNSEC_TPE
52852+ {
52853+ .ctl_name = CTL_UNNUMBERED,
52854+ .procname = "tpe",
52855+ .data = &grsec_enable_tpe,
52856+ .maxlen = sizeof(int),
52857+ .mode = 0600,
52858+ .proc_handler = &proc_dointvec,
52859+ },
52860+ {
52861+ .ctl_name = CTL_UNNUMBERED,
52862+ .procname = "tpe_gid",
52863+ .data = &grsec_tpe_gid,
52864+ .maxlen = sizeof(int),
52865+ .mode = 0600,
52866+ .proc_handler = &proc_dointvec,
52867+ },
52868+#endif
52869+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
52870+ {
52871+ .ctl_name = CTL_UNNUMBERED,
52872+ .procname = "tpe_invert",
52873+ .data = &grsec_enable_tpe_invert,
52874+ .maxlen = sizeof(int),
52875+ .mode = 0600,
52876+ .proc_handler = &proc_dointvec,
52877+ },
52878+#endif
52879+#ifdef CONFIG_GRKERNSEC_TPE_ALL
52880+ {
52881+ .ctl_name = CTL_UNNUMBERED,
52882+ .procname = "tpe_restrict_all",
52883+ .data = &grsec_enable_tpe_all,
52884+ .maxlen = sizeof(int),
52885+ .mode = 0600,
52886+ .proc_handler = &proc_dointvec,
52887+ },
52888+#endif
52889+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
52890+ {
52891+ .ctl_name = CTL_UNNUMBERED,
52892+ .procname = "socket_all",
52893+ .data = &grsec_enable_socket_all,
52894+ .maxlen = sizeof(int),
52895+ .mode = 0600,
52896+ .proc_handler = &proc_dointvec,
52897+ },
52898+ {
52899+ .ctl_name = CTL_UNNUMBERED,
52900+ .procname = "socket_all_gid",
52901+ .data = &grsec_socket_all_gid,
52902+ .maxlen = sizeof(int),
52903+ .mode = 0600,
52904+ .proc_handler = &proc_dointvec,
52905+ },
52906+#endif
52907+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
52908+ {
52909+ .ctl_name = CTL_UNNUMBERED,
52910+ .procname = "socket_client",
52911+ .data = &grsec_enable_socket_client,
52912+ .maxlen = sizeof(int),
52913+ .mode = 0600,
52914+ .proc_handler = &proc_dointvec,
52915+ },
52916+ {
52917+ .ctl_name = CTL_UNNUMBERED,
52918+ .procname = "socket_client_gid",
52919+ .data = &grsec_socket_client_gid,
52920+ .maxlen = sizeof(int),
52921+ .mode = 0600,
52922+ .proc_handler = &proc_dointvec,
52923+ },
52924+#endif
52925+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
52926+ {
52927+ .ctl_name = CTL_UNNUMBERED,
52928+ .procname = "socket_server",
52929+ .data = &grsec_enable_socket_server,
52930+ .maxlen = sizeof(int),
52931+ .mode = 0600,
52932+ .proc_handler = &proc_dointvec,
52933+ },
52934+ {
52935+ .ctl_name = CTL_UNNUMBERED,
52936+ .procname = "socket_server_gid",
52937+ .data = &grsec_socket_server_gid,
52938+ .maxlen = sizeof(int),
52939+ .mode = 0600,
52940+ .proc_handler = &proc_dointvec,
52941+ },
52942+#endif
52943+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
52944+ {
52945+ .ctl_name = CTL_UNNUMBERED,
52946+ .procname = "audit_group",
52947+ .data = &grsec_enable_group,
52948+ .maxlen = sizeof(int),
52949+ .mode = 0600,
52950+ .proc_handler = &proc_dointvec,
52951+ },
52952+ {
52953+ .ctl_name = CTL_UNNUMBERED,
52954+ .procname = "audit_gid",
52955+ .data = &grsec_audit_gid,
52956+ .maxlen = sizeof(int),
52957+ .mode = 0600,
52958+ .proc_handler = &proc_dointvec,
52959+ },
52960+#endif
52961+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
52962+ {
52963+ .ctl_name = CTL_UNNUMBERED,
52964+ .procname = "audit_chdir",
52965+ .data = &grsec_enable_chdir,
52966+ .maxlen = sizeof(int),
52967+ .mode = 0600,
52968+ .proc_handler = &proc_dointvec,
52969+ },
52970+#endif
52971+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
52972+ {
52973+ .ctl_name = CTL_UNNUMBERED,
52974+ .procname = "audit_mount",
52975+ .data = &grsec_enable_mount,
52976+ .maxlen = sizeof(int),
52977+ .mode = 0600,
52978+ .proc_handler = &proc_dointvec,
52979+ },
52980+#endif
52981+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
52982+ {
52983+ .ctl_name = CTL_UNNUMBERED,
52984+ .procname = "audit_textrel",
52985+ .data = &grsec_enable_audit_textrel,
52986+ .maxlen = sizeof(int),
52987+ .mode = 0600,
52988+ .proc_handler = &proc_dointvec,
52989+ },
52990+#endif
52991+#ifdef CONFIG_GRKERNSEC_DMESG
52992+ {
52993+ .ctl_name = CTL_UNNUMBERED,
52994+ .procname = "dmesg",
52995+ .data = &grsec_enable_dmesg,
52996+ .maxlen = sizeof(int),
52997+ .mode = 0600,
52998+ .proc_handler = &proc_dointvec,
52999+ },
53000+#endif
53001+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53002+ {
53003+ .ctl_name = CTL_UNNUMBERED,
53004+ .procname = "chroot_findtask",
53005+ .data = &grsec_enable_chroot_findtask,
53006+ .maxlen = sizeof(int),
53007+ .mode = 0600,
53008+ .proc_handler = &proc_dointvec,
53009+ },
53010+#endif
53011+#ifdef CONFIG_GRKERNSEC_RESLOG
53012+ {
53013+ .ctl_name = CTL_UNNUMBERED,
53014+ .procname = "resource_logging",
53015+ .data = &grsec_resource_logging,
53016+ .maxlen = sizeof(int),
53017+ .mode = 0600,
53018+ .proc_handler = &proc_dointvec,
53019+ },
53020+#endif
53021+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
53022+ {
53023+ .ctl_name = CTL_UNNUMBERED,
53024+ .procname = "audit_ptrace",
53025+ .data = &grsec_enable_audit_ptrace,
53026+ .maxlen = sizeof(int),
53027+ .mode = 0600,
53028+ .proc_handler = &proc_dointvec,
53029+ },
53030+#endif
53031+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
53032+ {
53033+ .ctl_name = CTL_UNNUMBERED,
53034+ .procname = "harden_ptrace",
53035+ .data = &grsec_enable_harden_ptrace,
53036+ .maxlen = sizeof(int),
53037+ .mode = 0600,
53038+ .proc_handler = &proc_dointvec,
53039+ },
53040+#endif
53041+ {
53042+ .ctl_name = CTL_UNNUMBERED,
53043+ .procname = "grsec_lock",
53044+ .data = &grsec_lock,
53045+ .maxlen = sizeof(int),
53046+ .mode = 0600,
53047+ .proc_handler = &proc_dointvec,
53048+ },
53049+#endif
53050+#ifdef CONFIG_GRKERNSEC_ROFS
53051+ {
53052+ .ctl_name = CTL_UNNUMBERED,
53053+ .procname = "romount_protect",
53054+ .data = &grsec_enable_rofs,
53055+ .maxlen = sizeof(int),
53056+ .mode = 0600,
53057+ .proc_handler = &proc_dointvec_minmax,
53058+ .extra1 = &one,
53059+ .extra2 = &one,
53060+ },
53061+#endif
53062+ { .ctl_name = 0 }
53063+};
53064+#endif
53065diff -urNp linux-2.6.32.43/grsecurity/grsec_time.c linux-2.6.32.43/grsecurity/grsec_time.c
53066--- linux-2.6.32.43/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
53067+++ linux-2.6.32.43/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
53068@@ -0,0 +1,16 @@
53069+#include <linux/kernel.h>
53070+#include <linux/sched.h>
53071+#include <linux/grinternal.h>
53072+#include <linux/module.h>
53073+
53074+void
53075+gr_log_timechange(void)
53076+{
53077+#ifdef CONFIG_GRKERNSEC_TIME
53078+ if (grsec_enable_time)
53079+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
53080+#endif
53081+ return;
53082+}
53083+
53084+EXPORT_SYMBOL(gr_log_timechange);
53085diff -urNp linux-2.6.32.43/grsecurity/grsec_tpe.c linux-2.6.32.43/grsecurity/grsec_tpe.c
53086--- linux-2.6.32.43/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
53087+++ linux-2.6.32.43/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
53088@@ -0,0 +1,39 @@
53089+#include <linux/kernel.h>
53090+#include <linux/sched.h>
53091+#include <linux/file.h>
53092+#include <linux/fs.h>
53093+#include <linux/grinternal.h>
53094+
53095+extern int gr_acl_tpe_check(void);
53096+
53097+int
53098+gr_tpe_allow(const struct file *file)
53099+{
53100+#ifdef CONFIG_GRKERNSEC
53101+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
53102+ const struct cred *cred = current_cred();
53103+
53104+ if (cred->uid && ((grsec_enable_tpe &&
53105+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
53106+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
53107+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
53108+#else
53109+ in_group_p(grsec_tpe_gid)
53110+#endif
53111+ ) || gr_acl_tpe_check()) &&
53112+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
53113+ (inode->i_mode & S_IWOTH))))) {
53114+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53115+ return 0;
53116+ }
53117+#ifdef CONFIG_GRKERNSEC_TPE_ALL
53118+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
53119+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
53120+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
53121+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
53122+ return 0;
53123+ }
53124+#endif
53125+#endif
53126+ return 1;
53127+}
53128diff -urNp linux-2.6.32.43/grsecurity/grsum.c linux-2.6.32.43/grsecurity/grsum.c
53129--- linux-2.6.32.43/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
53130+++ linux-2.6.32.43/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
53131@@ -0,0 +1,61 @@
53132+#include <linux/err.h>
53133+#include <linux/kernel.h>
53134+#include <linux/sched.h>
53135+#include <linux/mm.h>
53136+#include <linux/scatterlist.h>
53137+#include <linux/crypto.h>
53138+#include <linux/gracl.h>
53139+
53140+
53141+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
53142+#error "crypto and sha256 must be built into the kernel"
53143+#endif
53144+
53145+int
53146+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
53147+{
53148+ char *p;
53149+ struct crypto_hash *tfm;
53150+ struct hash_desc desc;
53151+ struct scatterlist sg;
53152+ unsigned char temp_sum[GR_SHA_LEN];
53153+ volatile int retval = 0;
53154+ volatile int dummy = 0;
53155+ unsigned int i;
53156+
53157+ sg_init_table(&sg, 1);
53158+
53159+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
53160+ if (IS_ERR(tfm)) {
53161+ /* should never happen, since sha256 should be built in */
53162+ return 1;
53163+ }
53164+
53165+ desc.tfm = tfm;
53166+ desc.flags = 0;
53167+
53168+ crypto_hash_init(&desc);
53169+
53170+ p = salt;
53171+ sg_set_buf(&sg, p, GR_SALT_LEN);
53172+ crypto_hash_update(&desc, &sg, sg.length);
53173+
53174+ p = entry->pw;
53175+ sg_set_buf(&sg, p, strlen(p));
53176+
53177+ crypto_hash_update(&desc, &sg, sg.length);
53178+
53179+ crypto_hash_final(&desc, temp_sum);
53180+
53181+ memset(entry->pw, 0, GR_PW_LEN);
53182+
53183+ for (i = 0; i < GR_SHA_LEN; i++)
53184+ if (sum[i] != temp_sum[i])
53185+ retval = 1;
53186+ else
53187+ dummy = 1; // waste a cycle
53188+
53189+ crypto_free_hash(tfm);
53190+
53191+ return retval;
53192+}
53193diff -urNp linux-2.6.32.43/grsecurity/Kconfig linux-2.6.32.43/grsecurity/Kconfig
53194--- linux-2.6.32.43/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
53195+++ linux-2.6.32.43/grsecurity/Kconfig 2011-07-06 19:57:57.000000000 -0400
53196@@ -0,0 +1,1047 @@
53197+#
53198+# grecurity configuration
53199+#
53200+
53201+menu "Grsecurity"
53202+
53203+config GRKERNSEC
53204+ bool "Grsecurity"
53205+ select CRYPTO
53206+ select CRYPTO_SHA256
53207+ help
53208+ If you say Y here, you will be able to configure many features
53209+ that will enhance the security of your system. It is highly
53210+ recommended that you say Y here and read through the help
53211+ for each option so that you fully understand the features and
53212+ can evaluate their usefulness for your machine.
53213+
53214+choice
53215+ prompt "Security Level"
53216+ depends on GRKERNSEC
53217+ default GRKERNSEC_CUSTOM
53218+
53219+config GRKERNSEC_LOW
53220+ bool "Low"
53221+ select GRKERNSEC_LINK
53222+ select GRKERNSEC_FIFO
53223+ select GRKERNSEC_EXECVE
53224+ select GRKERNSEC_RANDNET
53225+ select GRKERNSEC_DMESG
53226+ select GRKERNSEC_CHROOT
53227+ select GRKERNSEC_CHROOT_CHDIR
53228+
53229+ help
53230+ If you choose this option, several of the grsecurity options will
53231+ be enabled that will give you greater protection against a number
53232+ of attacks, while assuring that none of your software will have any
53233+ conflicts with the additional security measures. If you run a lot
53234+ of unusual software, or you are having problems with the higher
53235+ security levels, you should say Y here. With this option, the
53236+ following features are enabled:
53237+
53238+ - Linking restrictions
53239+ - FIFO restrictions
53240+ - Enforcing RLIMIT_NPROC on execve
53241+ - Restricted dmesg
53242+ - Enforced chdir("/") on chroot
53243+ - Runtime module disabling
53244+
53245+config GRKERNSEC_MEDIUM
53246+ bool "Medium"
53247+ select PAX
53248+ select PAX_EI_PAX
53249+ select PAX_PT_PAX_FLAGS
53250+ select PAX_HAVE_ACL_FLAGS
53251+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53252+ select GRKERNSEC_CHROOT
53253+ select GRKERNSEC_CHROOT_SYSCTL
53254+ select GRKERNSEC_LINK
53255+ select GRKERNSEC_FIFO
53256+ select GRKERNSEC_EXECVE
53257+ select GRKERNSEC_DMESG
53258+ select GRKERNSEC_RANDNET
53259+ select GRKERNSEC_FORKFAIL
53260+ select GRKERNSEC_TIME
53261+ select GRKERNSEC_SIGNAL
53262+ select GRKERNSEC_CHROOT
53263+ select GRKERNSEC_CHROOT_UNIX
53264+ select GRKERNSEC_CHROOT_MOUNT
53265+ select GRKERNSEC_CHROOT_PIVOT
53266+ select GRKERNSEC_CHROOT_DOUBLE
53267+ select GRKERNSEC_CHROOT_CHDIR
53268+ select GRKERNSEC_CHROOT_MKNOD
53269+ select GRKERNSEC_PROC
53270+ select GRKERNSEC_PROC_USERGROUP
53271+ select PAX_RANDUSTACK
53272+ select PAX_ASLR
53273+ select PAX_RANDMMAP
53274+ select PAX_REFCOUNT if (X86 || SPARC64)
53275+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53276+
53277+ help
53278+ If you say Y here, several features in addition to those included
53279+ in the low additional security level will be enabled. These
53280+ features provide even more security to your system, though in rare
53281+ cases they may be incompatible with very old or poorly written
53282+ software. If you enable this option, make sure that your auth
53283+ service (identd) is running as gid 1001. With this option,
53284+ the following features (in addition to those provided in the
53285+ low additional security level) will be enabled:
53286+
53287+ - Failed fork logging
53288+ - Time change logging
53289+ - Signal logging
53290+ - Deny mounts in chroot
53291+ - Deny double chrooting
53292+ - Deny sysctl writes in chroot
53293+ - Deny mknod in chroot
53294+ - Deny access to abstract AF_UNIX sockets out of chroot
53295+ - Deny pivot_root in chroot
53296+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
53297+ - /proc restrictions with special GID set to 10 (usually wheel)
53298+ - Address Space Layout Randomization (ASLR)
53299+ - Prevent exploitation of most refcount overflows
53300+ - Bounds checking of copying between the kernel and userland
53301+
53302+config GRKERNSEC_HIGH
53303+ bool "High"
53304+ select GRKERNSEC_LINK
53305+ select GRKERNSEC_FIFO
53306+ select GRKERNSEC_EXECVE
53307+ select GRKERNSEC_DMESG
53308+ select GRKERNSEC_FORKFAIL
53309+ select GRKERNSEC_TIME
53310+ select GRKERNSEC_SIGNAL
53311+ select GRKERNSEC_CHROOT
53312+ select GRKERNSEC_CHROOT_SHMAT
53313+ select GRKERNSEC_CHROOT_UNIX
53314+ select GRKERNSEC_CHROOT_MOUNT
53315+ select GRKERNSEC_CHROOT_FCHDIR
53316+ select GRKERNSEC_CHROOT_PIVOT
53317+ select GRKERNSEC_CHROOT_DOUBLE
53318+ select GRKERNSEC_CHROOT_CHDIR
53319+ select GRKERNSEC_CHROOT_MKNOD
53320+ select GRKERNSEC_CHROOT_CAPS
53321+ select GRKERNSEC_CHROOT_SYSCTL
53322+ select GRKERNSEC_CHROOT_FINDTASK
53323+ select GRKERNSEC_SYSFS_RESTRICT
53324+ select GRKERNSEC_PROC
53325+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
53326+ select GRKERNSEC_HIDESYM
53327+ select GRKERNSEC_BRUTE
53328+ select GRKERNSEC_PROC_USERGROUP
53329+ select GRKERNSEC_KMEM
53330+ select GRKERNSEC_RESLOG
53331+ select GRKERNSEC_RANDNET
53332+ select GRKERNSEC_PROC_ADD
53333+ select GRKERNSEC_CHROOT_CHMOD
53334+ select GRKERNSEC_CHROOT_NICE
53335+ select GRKERNSEC_AUDIT_MOUNT
53336+ select GRKERNSEC_MODHARDEN if (MODULES)
53337+ select GRKERNSEC_HARDEN_PTRACE
53338+ select GRKERNSEC_VM86 if (X86_32)
53339+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
53340+ select PAX
53341+ select PAX_RANDUSTACK
53342+ select PAX_ASLR
53343+ select PAX_RANDMMAP
53344+ select PAX_NOEXEC
53345+ select PAX_MPROTECT
53346+ select PAX_EI_PAX
53347+ select PAX_PT_PAX_FLAGS
53348+ select PAX_HAVE_ACL_FLAGS
53349+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
53350+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
53351+ select PAX_RANDKSTACK if (X86_TSC && X86)
53352+ select PAX_SEGMEXEC if (X86_32)
53353+ select PAX_PAGEEXEC
53354+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
53355+ select PAX_EMUTRAMP if (PARISC)
53356+ select PAX_EMUSIGRT if (PARISC)
53357+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
53358+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
53359+ select PAX_REFCOUNT if (X86 || SPARC64)
53360+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
53361+ help
53362+ If you say Y here, many of the features of grsecurity will be
53363+ enabled, which will protect you against many kinds of attacks
53364+ against your system. The heightened security comes at a cost
53365+ of an increased chance of incompatibilities with rare software
53366+ on your machine. Since this security level enables PaX, you should
53367+ view <http://pax.grsecurity.net> and read about the PaX
53368+ project. While you are there, download chpax and run it on
53369+ binaries that cause problems with PaX. Also remember that
53370+ since the /proc restrictions are enabled, you must run your
53371+ identd as gid 1001. This security level enables the following
53372+ features in addition to those listed in the low and medium
53373+ security levels:
53374+
53375+ - Additional /proc restrictions
53376+ - Chmod restrictions in chroot
53377+ - No signals, ptrace, or viewing of processes outside of chroot
53378+ - Capability restrictions in chroot
53379+ - Deny fchdir out of chroot
53380+ - Priority restrictions in chroot
53381+ - Segmentation-based implementation of PaX
53382+ - Mprotect restrictions
53383+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
53384+ - Kernel stack randomization
53385+ - Mount/unmount/remount logging
53386+ - Kernel symbol hiding
53387+ - Prevention of memory exhaustion-based exploits
53388+ - Hardening of module auto-loading
53389+ - Ptrace restrictions
53390+ - Restricted vm86 mode
53391+ - Restricted sysfs/debugfs
53392+ - Active kernel exploit response
53393+
53394+config GRKERNSEC_CUSTOM
53395+ bool "Custom"
53396+ help
53397+ If you say Y here, you will be able to configure every grsecurity
53398+ option, which allows you to enable many more features that aren't
53399+ covered in the basic security levels. These additional features
53400+ include TPE, socket restrictions, and the sysctl system for
53401+ grsecurity. It is advised that you read through the help for
53402+ each option to determine its usefulness in your situation.
53403+
53404+endchoice
53405+
53406+menu "Address Space Protection"
53407+depends on GRKERNSEC
53408+
53409+config GRKERNSEC_KMEM
53410+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
53411+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
53412+ help
53413+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
53414+ be written to via mmap or otherwise to modify the running kernel.
53415+ /dev/port will also not be allowed to be opened. If you have module
53416+ support disabled, enabling this will close up four ways that are
53417+ currently used to insert malicious code into the running kernel.
53418+ Even with all these features enabled, we still highly recommend that
53419+ you use the RBAC system, as it is still possible for an attacker to
53420+ modify the running kernel through privileged I/O granted by ioperm/iopl.
53421+ If you are not using XFree86, you may be able to stop this additional
53422+ case by enabling the 'Disable privileged I/O' option. Though nothing
53423+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
53424+ but only to video memory, which is the only writing we allow in this
53425+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
53426+ not be allowed to mprotect it with PROT_WRITE later.
53427+ It is highly recommended that you say Y here if you meet all the
53428+ conditions above.
53429+
53430+config GRKERNSEC_VM86
53431+ bool "Restrict VM86 mode"
53432+ depends on X86_32
53433+
53434+ help
53435+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
53436+ make use of a special execution mode on 32bit x86 processors called
53437+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
53438+ video cards and will still work with this option enabled. The purpose
53439+ of the option is to prevent exploitation of emulation errors in
53440+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
53441+ Nearly all users should be able to enable this option.
53442+
53443+config GRKERNSEC_IO
53444+ bool "Disable privileged I/O"
53445+ depends on X86
53446+ select RTC_CLASS
53447+ select RTC_INTF_DEV
53448+ select RTC_DRV_CMOS
53449+
53450+ help
53451+ If you say Y here, all ioperm and iopl calls will return an error.
53452+ Ioperm and iopl can be used to modify the running kernel.
53453+ Unfortunately, some programs need this access to operate properly,
53454+ the most notable of which are XFree86 and hwclock. hwclock can be
53455+ remedied by having RTC support in the kernel, so real-time
53456+ clock support is enabled if this option is enabled, to ensure
53457+ that hwclock operates correctly. XFree86 still will not
53458+ operate correctly with this option enabled, so DO NOT CHOOSE Y
53459+ IF YOU USE XFree86. If you use XFree86 and you still want to
53460+ protect your kernel against modification, use the RBAC system.
53461+
53462+config GRKERNSEC_PROC_MEMMAP
53463+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
53464+ default y if (PAX_NOEXEC || PAX_ASLR)
53465+ depends on PAX_NOEXEC || PAX_ASLR
53466+ help
53467+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
53468+ give no information about the addresses of its mappings if
53469+ PaX features that rely on random addresses are enabled on the task.
53470+ If you use PaX it is greatly recommended that you say Y here as it
53471+ closes up a hole that makes the full ASLR useless for suid
53472+ binaries.
53473+
53474+config GRKERNSEC_BRUTE
53475+ bool "Deter exploit bruteforcing"
53476+ help
53477+ If you say Y here, attempts to bruteforce exploits against forking
53478+ daemons such as apache or sshd, as well as against suid/sgid binaries
53479+ will be deterred. When a child of a forking daemon is killed by PaX
53480+ or crashes due to an illegal instruction or other suspicious signal,
53481+ the parent process will be delayed 30 seconds upon every subsequent
53482+ fork until the administrator is able to assess the situation and
53483+ restart the daemon.
53484+ In the suid/sgid case, the attempt is logged, the user has all their
53485+ processes terminated, and they are prevented from executing any further
53486+ processes for 15 minutes.
53487+ It is recommended that you also enable signal logging in the auditing
53488+ section so that logs are generated when a process triggers a suspicious
53489+ signal.
53490+ If the sysctl option is enabled, a sysctl option with name
53491+ "deter_bruteforce" is created.
53492+
53493+config GRKERNSEC_MODHARDEN
53494+ bool "Harden module auto-loading"
53495+ depends on MODULES
53496+ help
53497+ If you say Y here, module auto-loading in response to use of some
53498+ feature implemented by an unloaded module will be restricted to
53499+ root users. Enabling this option helps defend against attacks
53500+ by unprivileged users who abuse the auto-loading behavior to
53501+ cause a vulnerable module to load that is then exploited.
53502+
53503+ If this option prevents a legitimate use of auto-loading for a
53504+ non-root user, the administrator can execute modprobe manually
53505+ with the exact name of the module mentioned in the alert log.
53506+ Alternatively, the administrator can add the module to the list
53507+ of modules loaded at boot by modifying init scripts.
53508+
53509+ Modification of init scripts will most likely be needed on
53510+ Ubuntu servers with encrypted home directory support enabled,
53511+ as the first non-root user logging in will cause the ecb(aes),
53512+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
53513+
53514+config GRKERNSEC_HIDESYM
53515+ bool "Hide kernel symbols"
53516+ help
53517+ If you say Y here, getting information on loaded modules, and
53518+ displaying all kernel symbols through a syscall will be restricted
53519+ to users with CAP_SYS_MODULE. For software compatibility reasons,
53520+ /proc/kallsyms will be restricted to the root user. The RBAC
53521+ system can hide that entry even from root.
53522+
53523+ This option also prevents leaking of kernel addresses through
53524+ several /proc entries.
53525+
53526+ Note that this option is only effective provided the following
53527+ conditions are met:
53528+ 1) The kernel using grsecurity is not precompiled by some distribution
53529+ 2) You have also enabled GRKERNSEC_DMESG
53530+ 3) You are using the RBAC system and hiding other files such as your
53531+ kernel image and System.map. Alternatively, enabling this option
53532+ causes the permissions on /boot, /lib/modules, and the kernel
53533+ source directory to change at compile time to prevent
53534+ reading by non-root users.
53535+ If the above conditions are met, this option will aid in providing a
53536+ useful protection against local kernel exploitation of overflows
53537+ and arbitrary read/write vulnerabilities.
53538+
53539+config GRKERNSEC_KERN_LOCKOUT
53540+ bool "Active kernel exploit response"
53541+ depends on X86 || ARM || PPC || SPARC
53542+ help
53543+ If you say Y here, when a PaX alert is triggered due to suspicious
53544+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
53545+ or an OOPs occurs due to bad memory accesses, instead of just
53546+ terminating the offending process (and potentially allowing
53547+ a subsequent exploit from the same user), we will take one of two
53548+ actions:
53549+ If the user was root, we will panic the system
53550+ If the user was non-root, we will log the attempt, terminate
53551+ all processes owned by the user, then prevent them from creating
53552+ any new processes until the system is restarted
53553+ This deters repeated kernel exploitation/bruteforcing attempts
53554+ and is useful for later forensics.
53555+
53556+endmenu
53557+menu "Role Based Access Control Options"
53558+depends on GRKERNSEC
53559+
53560+config GRKERNSEC_RBAC_DEBUG
53561+ bool
53562+
53563+config GRKERNSEC_NO_RBAC
53564+ bool "Disable RBAC system"
53565+ help
53566+ If you say Y here, the /dev/grsec device will be removed from the kernel,
53567+ preventing the RBAC system from being enabled. You should only say Y
53568+ here if you have no intention of using the RBAC system, so as to prevent
53569+ an attacker with root access from misusing the RBAC system to hide files
53570+ and processes when loadable module support and /dev/[k]mem have been
53571+ locked down.
53572+
53573+config GRKERNSEC_ACL_HIDEKERN
53574+ bool "Hide kernel processes"
53575+ help
53576+ If you say Y here, all kernel threads will be hidden to all
53577+ processes but those whose subject has the "view hidden processes"
53578+ flag.
53579+
53580+config GRKERNSEC_ACL_MAXTRIES
53581+ int "Maximum tries before password lockout"
53582+ default 3
53583+ help
53584+ This option enforces the maximum number of times a user can attempt
53585+ to authorize themselves with the grsecurity RBAC system before being
53586+ denied the ability to attempt authorization again for a specified time.
53587+ The lower the number, the harder it will be to brute-force a password.
53588+
53589+config GRKERNSEC_ACL_TIMEOUT
53590+ int "Time to wait after max password tries, in seconds"
53591+ default 30
53592+ help
53593+ This option specifies the time the user must wait after attempting to
53594+ authorize to the RBAC system with the maximum number of invalid
53595+ passwords. The higher the number, the harder it will be to brute-force
53596+ a password.
53597+
53598+endmenu
53599+menu "Filesystem Protections"
53600+depends on GRKERNSEC
53601+
53602+config GRKERNSEC_PROC
53603+ bool "Proc restrictions"
53604+ help
53605+ If you say Y here, the permissions of the /proc filesystem
53606+ will be altered to enhance system security and privacy. You MUST
53607+ choose either a user only restriction or a user and group restriction.
53608+ Depending upon the option you choose, you can either restrict users to
53609+ see only the processes they themselves run, or choose a group that can
53610+ view all processes and files normally restricted to root if you choose
53611+ the "restrict to user only" option. NOTE: If you're running identd as
53612+ a non-root user, you will have to run it as the group you specify here.
53613+
53614+config GRKERNSEC_PROC_USER
53615+ bool "Restrict /proc to user only"
53616+ depends on GRKERNSEC_PROC
53617+ help
53618+ If you say Y here, non-root users will only be able to view their own
53619+ processes, and restricts them from viewing network-related information,
53620+ and viewing kernel symbol and module information.
53621+
53622+config GRKERNSEC_PROC_USERGROUP
53623+ bool "Allow special group"
53624+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
53625+ help
53626+ If you say Y here, you will be able to select a group that will be
53627+ able to view all processes and network-related information. If you've
53628+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
53629+ remain hidden. This option is useful if you want to run identd as
53630+ a non-root user.
53631+
53632+config GRKERNSEC_PROC_GID
53633+ int "GID for special group"
53634+ depends on GRKERNSEC_PROC_USERGROUP
53635+ default 1001
53636+
53637+config GRKERNSEC_PROC_ADD
53638+ bool "Additional restrictions"
53639+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
53640+ help
53641+ If you say Y here, additional restrictions will be placed on
53642+ /proc that keep normal users from viewing device information and
53643+ slabinfo information that could be useful for exploits.
53644+
53645+config GRKERNSEC_LINK
53646+ bool "Linking restrictions"
53647+ help
53648+ If you say Y here, /tmp race exploits will be prevented, since users
53649+ will no longer be able to follow symlinks owned by other users in
53650+ world-writable +t directories (e.g. /tmp), unless the owner of the
53651+ symlink is the owner of the directory. users will also not be
53652+ able to hardlink to files they do not own. If the sysctl option is
53653+ enabled, a sysctl option with name "linking_restrictions" is created.
53654+
53655+config GRKERNSEC_FIFO
53656+ bool "FIFO restrictions"
53657+ help
53658+ If you say Y here, users will not be able to write to FIFOs they don't
53659+ own in world-writable +t directories (e.g. /tmp), unless the owner of
53660+ the FIFO is the same owner of the directory it's held in. If the sysctl
53661+ option is enabled, a sysctl option with name "fifo_restrictions" is
53662+ created.
53663+
53664+config GRKERNSEC_SYSFS_RESTRICT
53665+ bool "Sysfs/debugfs restriction"
53666+ depends on SYSFS
53667+ help
53668+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
53669+ any filesystem normally mounted under it (e.g. debugfs) will only
53670+ be accessible by root. These filesystems generally provide access
53671+ to hardware and debug information that isn't appropriate for unprivileged
53672+ users of the system. Sysfs and debugfs have also become a large source
53673+ of new vulnerabilities, ranging from infoleaks to local compromise.
53674+ There has been very little oversight with an eye toward security involved
53675+ in adding new exporters of information to these filesystems, so their
53676+ use is discouraged.
53677+ This option is equivalent to a chmod 0700 of the mount paths.
53678+
53679+config GRKERNSEC_ROFS
53680+ bool "Runtime read-only mount protection"
53681+ help
53682+ If you say Y here, a sysctl option with name "romount_protect" will
53683+ be created. By setting this option to 1 at runtime, filesystems
53684+ will be protected in the following ways:
53685+ * No new writable mounts will be allowed
53686+ * Existing read-only mounts won't be able to be remounted read/write
53687+ * Write operations will be denied on all block devices
53688+ This option acts independently of grsec_lock: once it is set to 1,
53689+ it cannot be turned off. Therefore, please be mindful of the resulting
53690+ behavior if this option is enabled in an init script on a read-only
53691+ filesystem. This feature is mainly intended for secure embedded systems.
53692+
53693+config GRKERNSEC_CHROOT
53694+ bool "Chroot jail restrictions"
53695+ help
53696+ If you say Y here, you will be able to choose several options that will
53697+ make breaking out of a chrooted jail much more difficult. If you
53698+ encounter no software incompatibilities with the following options, it
53699+ is recommended that you enable each one.
53700+
53701+config GRKERNSEC_CHROOT_MOUNT
53702+ bool "Deny mounts"
53703+ depends on GRKERNSEC_CHROOT
53704+ help
53705+ If you say Y here, processes inside a chroot will not be able to
53706+ mount or remount filesystems. If the sysctl option is enabled, a
53707+ sysctl option with name "chroot_deny_mount" is created.
53708+
53709+config GRKERNSEC_CHROOT_DOUBLE
53710+ bool "Deny double-chroots"
53711+ depends on GRKERNSEC_CHROOT
53712+ help
53713+ If you say Y here, processes inside a chroot will not be able to chroot
53714+ again outside the chroot. This is a widely used method of breaking
53715+ out of a chroot jail and should not be allowed. If the sysctl
53716+ option is enabled, a sysctl option with name
53717+ "chroot_deny_chroot" is created.
53718+
53719+config GRKERNSEC_CHROOT_PIVOT
53720+ bool "Deny pivot_root in chroot"
53721+ depends on GRKERNSEC_CHROOT
53722+ help
53723+ If you say Y here, processes inside a chroot will not be able to use
53724+ a function called pivot_root() that was introduced in Linux 2.3.41. It
53725+ works similar to chroot in that it changes the root filesystem. This
53726+ function could be misused in a chrooted process to attempt to break out
53727+ of the chroot, and therefore should not be allowed. If the sysctl
53728+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
53729+ created.
53730+
53731+config GRKERNSEC_CHROOT_CHDIR
53732+ bool "Enforce chdir(\"/\") on all chroots"
53733+ depends on GRKERNSEC_CHROOT
53734+ help
53735+ If you say Y here, the current working directory of all newly-chrooted
53736+ applications will be set to the the root directory of the chroot.
53737+ The man page on chroot(2) states:
53738+ Note that this call does not change the current working
53739+ directory, so that `.' can be outside the tree rooted at
53740+ `/'. In particular, the super-user can escape from a
53741+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
53742+
53743+ It is recommended that you say Y here, since it's not known to break
53744+ any software. If the sysctl option is enabled, a sysctl option with
53745+ name "chroot_enforce_chdir" is created.
53746+
53747+config GRKERNSEC_CHROOT_CHMOD
53748+ bool "Deny (f)chmod +s"
53749+ depends on GRKERNSEC_CHROOT
53750+ help
53751+ If you say Y here, processes inside a chroot will not be able to chmod
53752+ or fchmod files to make them have suid or sgid bits. This protects
53753+ against another published method of breaking a chroot. If the sysctl
53754+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
53755+ created.
53756+
53757+config GRKERNSEC_CHROOT_FCHDIR
53758+ bool "Deny fchdir out of chroot"
53759+ depends on GRKERNSEC_CHROOT
53760+ help
53761+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
53762+ to a file descriptor of the chrooting process that points to a directory
53763+ outside the filesystem will be stopped. If the sysctl option
53764+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
53765+
53766+config GRKERNSEC_CHROOT_MKNOD
53767+ bool "Deny mknod"
53768+ depends on GRKERNSEC_CHROOT
53769+ help
53770+ If you say Y here, processes inside a chroot will not be allowed to
53771+ mknod. The problem with using mknod inside a chroot is that it
53772+ would allow an attacker to create a device entry that is the same
53773+ as one on the physical root of your system, which could range from
53774+ anything from the console device to a device for your harddrive (which
53775+ they could then use to wipe the drive or steal data). It is recommended
53776+ that you say Y here, unless you run into software incompatibilities.
53777+ If the sysctl option is enabled, a sysctl option with name
53778+ "chroot_deny_mknod" is created.
53779+
53780+config GRKERNSEC_CHROOT_SHMAT
53781+ bool "Deny shmat() out of chroot"
53782+ depends on GRKERNSEC_CHROOT
53783+ help
53784+ If you say Y here, processes inside a chroot will not be able to attach
53785+ to shared memory segments that were created outside of the chroot jail.
53786+ It is recommended that you say Y here. If the sysctl option is enabled,
53787+ a sysctl option with name "chroot_deny_shmat" is created.
53788+
53789+config GRKERNSEC_CHROOT_UNIX
53790+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
53791+ depends on GRKERNSEC_CHROOT
53792+ help
53793+ If you say Y here, processes inside a chroot will not be able to
53794+ connect to abstract (meaning not belonging to a filesystem) Unix
53795+ domain sockets that were bound outside of a chroot. It is recommended
53796+ that you say Y here. If the sysctl option is enabled, a sysctl option
53797+ with name "chroot_deny_unix" is created.
53798+
53799+config GRKERNSEC_CHROOT_FINDTASK
53800+ bool "Protect outside processes"
53801+ depends on GRKERNSEC_CHROOT
53802+ help
53803+ If you say Y here, processes inside a chroot will not be able to
53804+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
53805+ getsid, or view any process outside of the chroot. If the sysctl
53806+ option is enabled, a sysctl option with name "chroot_findtask" is
53807+ created.
53808+
53809+config GRKERNSEC_CHROOT_NICE
53810+ bool "Restrict priority changes"
53811+ depends on GRKERNSEC_CHROOT
53812+ help
53813+ If you say Y here, processes inside a chroot will not be able to raise
53814+ the priority of processes in the chroot, or alter the priority of
53815+ processes outside the chroot. This provides more security than simply
53816+ removing CAP_SYS_NICE from the process' capability set. If the
53817+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
53818+ is created.
53819+
53820+config GRKERNSEC_CHROOT_SYSCTL
53821+ bool "Deny sysctl writes"
53822+ depends on GRKERNSEC_CHROOT
53823+ help
53824+ If you say Y here, an attacker in a chroot will not be able to
53825+ write to sysctl entries, either by sysctl(2) or through a /proc
53826+ interface. It is strongly recommended that you say Y here. If the
53827+ sysctl option is enabled, a sysctl option with name
53828+ "chroot_deny_sysctl" is created.
53829+
53830+config GRKERNSEC_CHROOT_CAPS
53831+ bool "Capability restrictions"
53832+ depends on GRKERNSEC_CHROOT
53833+ help
53834+ If you say Y here, the capabilities on all root processes within a
53835+ chroot jail will be lowered to stop module insertion, raw i/o,
53836+ system and net admin tasks, rebooting the system, modifying immutable
53837+ files, modifying IPC owned by another, and changing the system time.
53838+ This is left an option because it can break some apps. Disable this
53839+ if your chrooted apps are having problems performing those kinds of
53840+ tasks. If the sysctl option is enabled, a sysctl option with
53841+ name "chroot_caps" is created.
53842+
53843+endmenu
53844+menu "Kernel Auditing"
53845+depends on GRKERNSEC
53846+
53847+config GRKERNSEC_AUDIT_GROUP
53848+ bool "Single group for auditing"
53849+ help
53850+ If you say Y here, the exec, chdir, and (un)mount logging features
53851+ will only operate on a group you specify. This option is recommended
53852+ if you only want to watch certain users instead of having a large
53853+ amount of logs from the entire system. If the sysctl option is enabled,
53854+ a sysctl option with name "audit_group" is created.
53855+
53856+config GRKERNSEC_AUDIT_GID
53857+ int "GID for auditing"
53858+ depends on GRKERNSEC_AUDIT_GROUP
53859+ default 1007
53860+
53861+config GRKERNSEC_EXECLOG
53862+ bool "Exec logging"
53863+ help
53864+ If you say Y here, all execve() calls will be logged (since the
53865+ other exec*() calls are frontends to execve(), all execution
53866+ will be logged). Useful for shell-servers that like to keep track
53867+ of their users. If the sysctl option is enabled, a sysctl option with
53868+ name "exec_logging" is created.
53869+ WARNING: This option when enabled will produce a LOT of logs, especially
53870+ on an active system.
53871+
53872+config GRKERNSEC_RESLOG
53873+ bool "Resource logging"
53874+ help
53875+ If you say Y here, all attempts to overstep resource limits will
53876+ be logged with the resource name, the requested size, and the current
53877+ limit. It is highly recommended that you say Y here. If the sysctl
53878+ option is enabled, a sysctl option with name "resource_logging" is
53879+ created. If the RBAC system is enabled, the sysctl value is ignored.
53880+
53881+config GRKERNSEC_CHROOT_EXECLOG
53882+ bool "Log execs within chroot"
53883+ help
53884+ If you say Y here, all executions inside a chroot jail will be logged
53885+ to syslog. This can cause a large amount of logs if certain
53886+ applications (eg. djb's daemontools) are installed on the system, and
53887+ is therefore left as an option. If the sysctl option is enabled, a
53888+ sysctl option with name "chroot_execlog" is created.
53889+
53890+config GRKERNSEC_AUDIT_PTRACE
53891+ bool "Ptrace logging"
53892+ help
53893+ If you say Y here, all attempts to attach to a process via ptrace
53894+ will be logged. If the sysctl option is enabled, a sysctl option
53895+ with name "audit_ptrace" is created.
53896+
53897+config GRKERNSEC_AUDIT_CHDIR
53898+ bool "Chdir logging"
53899+ help
53900+ If you say Y here, all chdir() calls will be logged. If the sysctl
53901+ option is enabled, a sysctl option with name "audit_chdir" is created.
53902+
53903+config GRKERNSEC_AUDIT_MOUNT
53904+ bool "(Un)Mount logging"
53905+ help
53906+ If you say Y here, all mounts and unmounts will be logged. If the
53907+ sysctl option is enabled, a sysctl option with name "audit_mount" is
53908+ created.
53909+
53910+config GRKERNSEC_SIGNAL
53911+ bool "Signal logging"
53912+ help
53913+ If you say Y here, certain important signals will be logged, such as
53914+ SIGSEGV, which will as a result inform you of when a error in a program
53915+ occurred, which in some cases could mean a possible exploit attempt.
53916+ If the sysctl option is enabled, a sysctl option with name
53917+ "signal_logging" is created.
53918+
53919+config GRKERNSEC_FORKFAIL
53920+ bool "Fork failure logging"
53921+ help
53922+ If you say Y here, all failed fork() attempts will be logged.
53923+ This could suggest a fork bomb, or someone attempting to overstep
53924+ their process limit. If the sysctl option is enabled, a sysctl option
53925+ with name "forkfail_logging" is created.
53926+
53927+config GRKERNSEC_TIME
53928+ bool "Time change logging"
53929+ help
53930+ If you say Y here, any changes of the system clock will be logged.
53931+ If the sysctl option is enabled, a sysctl option with name
53932+ "timechange_logging" is created.
53933+
53934+config GRKERNSEC_PROC_IPADDR
53935+ bool "/proc/<pid>/ipaddr support"
53936+ help
53937+ If you say Y here, a new entry will be added to each /proc/<pid>
53938+ directory that contains the IP address of the person using the task.
53939+ The IP is carried across local TCP and AF_UNIX stream sockets.
53940+ This information can be useful for IDS/IPSes to perform remote response
53941+ to a local attack. The entry is readable by only the owner of the
53942+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
53943+ the RBAC system), and thus does not create privacy concerns.
53944+
53945+config GRKERNSEC_RWXMAP_LOG
53946+ bool 'Denied RWX mmap/mprotect logging'
53947+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
53948+ help
53949+ If you say Y here, calls to mmap() and mprotect() with explicit
53950+ usage of PROT_WRITE and PROT_EXEC together will be logged when
53951+ denied by the PAX_MPROTECT feature. If the sysctl option is
53952+ enabled, a sysctl option with name "rwxmap_logging" is created.
53953+
53954+config GRKERNSEC_AUDIT_TEXTREL
53955+ bool 'ELF text relocations logging (READ HELP)'
53956+ depends on PAX_MPROTECT
53957+ help
53958+ If you say Y here, text relocations will be logged with the filename
53959+ of the offending library or binary. The purpose of the feature is
53960+ to help Linux distribution developers get rid of libraries and
53961+ binaries that need text relocations which hinder the future progress
53962+ of PaX. Only Linux distribution developers should say Y here, and
53963+ never on a production machine, as this option creates an information
53964+ leak that could aid an attacker in defeating the randomization of
53965+ a single memory region. If the sysctl option is enabled, a sysctl
53966+ option with name "audit_textrel" is created.
53967+
53968+endmenu
53969+
53970+menu "Executable Protections"
53971+depends on GRKERNSEC
53972+
53973+config GRKERNSEC_EXECVE
53974+ bool "Enforce RLIMIT_NPROC on execs"
53975+ help
53976+ If you say Y here, users with a resource limit on processes will
53977+ have the value checked during execve() calls. The current system
53978+ only checks the system limit during fork() calls. If the sysctl option
53979+ is enabled, a sysctl option with name "execve_limiting" is created.
53980+
53981+config GRKERNSEC_DMESG
53982+ bool "Dmesg(8) restriction"
53983+ help
53984+ If you say Y here, non-root users will not be able to use dmesg(8)
53985+ to view up to the last 4kb of messages in the kernel's log buffer.
53986+ The kernel's log buffer often contains kernel addresses and other
53987+ identifying information useful to an attacker in fingerprinting a
53988+ system for a targeted exploit.
53989+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
53990+ created.
53991+
53992+config GRKERNSEC_HARDEN_PTRACE
53993+ bool "Deter ptrace-based process snooping"
53994+ help
53995+ If you say Y here, TTY sniffers and other malicious monitoring
53996+ programs implemented through ptrace will be defeated. If you
53997+ have been using the RBAC system, this option has already been
53998+ enabled for several years for all users, with the ability to make
53999+ fine-grained exceptions.
54000+
54001+ This option only affects the ability of non-root users to ptrace
54002+ processes that are not a descendent of the ptracing process.
54003+ This means that strace ./binary and gdb ./binary will still work,
54004+ but attaching to arbitrary processes will not. If the sysctl
54005+ option is enabled, a sysctl option with name "harden_ptrace" is
54006+ created.
54007+
54008+config GRKERNSEC_TPE
54009+ bool "Trusted Path Execution (TPE)"
54010+ help
54011+ If you say Y here, you will be able to choose a gid to add to the
54012+ supplementary groups of users you want to mark as "untrusted."
54013+ These users will not be able to execute any files that are not in
54014+ root-owned directories writable only by root. If the sysctl option
54015+ is enabled, a sysctl option with name "tpe" is created.
54016+
54017+config GRKERNSEC_TPE_ALL
54018+ bool "Partially restrict all non-root users"
54019+ depends on GRKERNSEC_TPE
54020+ help
54021+ If you say Y here, all non-root users will be covered under
54022+ a weaker TPE restriction. This is separate from, and in addition to,
54023+ the main TPE options that you have selected elsewhere. Thus, if a
54024+ "trusted" GID is chosen, this restriction applies to even that GID.
54025+ Under this restriction, all non-root users will only be allowed to
54026+ execute files in directories they own that are not group or
54027+ world-writable, or in directories owned by root and writable only by
54028+ root. If the sysctl option is enabled, a sysctl option with name
54029+ "tpe_restrict_all" is created.
54030+
54031+config GRKERNSEC_TPE_INVERT
54032+ bool "Invert GID option"
54033+ depends on GRKERNSEC_TPE
54034+ help
54035+ If you say Y here, the group you specify in the TPE configuration will
54036+ decide what group TPE restrictions will be *disabled* for. This
54037+ option is useful if you want TPE restrictions to be applied to most
54038+ users on the system. If the sysctl option is enabled, a sysctl option
54039+ with name "tpe_invert" is created. Unlike other sysctl options, this
54040+ entry will default to on for backward-compatibility.
54041+
54042+config GRKERNSEC_TPE_GID
54043+ int "GID for untrusted users"
54044+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
54045+ default 1005
54046+ help
54047+ Setting this GID determines what group TPE restrictions will be
54048+ *enabled* for. If the sysctl option is enabled, a sysctl option
54049+ with name "tpe_gid" is created.
54050+
54051+config GRKERNSEC_TPE_GID
54052+ int "GID for trusted users"
54053+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
54054+ default 1005
54055+ help
54056+ Setting this GID determines what group TPE restrictions will be
54057+ *disabled* for. If the sysctl option is enabled, a sysctl option
54058+ with name "tpe_gid" is created.
54059+
54060+endmenu
54061+menu "Network Protections"
54062+depends on GRKERNSEC
54063+
54064+config GRKERNSEC_RANDNET
54065+ bool "Larger entropy pools"
54066+ help
54067+ If you say Y here, the entropy pools used for many features of Linux
54068+ and grsecurity will be doubled in size. Since several grsecurity
54069+ features use additional randomness, it is recommended that you say Y
54070+ here. Saying Y here has a similar effect as modifying
54071+ /proc/sys/kernel/random/poolsize.
54072+
54073+config GRKERNSEC_BLACKHOLE
54074+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
54075+ help
54076+ If you say Y here, neither TCP resets nor ICMP
54077+ destination-unreachable packets will be sent in response to packets
54078+ sent to ports for which no associated listening process exists.
54079+ This feature supports both IPV4 and IPV6 and exempts the
54080+ loopback interface from blackholing. Enabling this feature
54081+ makes a host more resilient to DoS attacks and reduces network
54082+ visibility against scanners.
54083+
54084+ The blackhole feature as-implemented is equivalent to the FreeBSD
54085+ blackhole feature, as it prevents RST responses to all packets, not
54086+ just SYNs. Under most application behavior this causes no
54087+ problems, but applications (like haproxy) may not close certain
54088+ connections in a way that cleanly terminates them on the remote
54089+ end, leaving the remote host in LAST_ACK state. Because of this
54090+ side-effect and to prevent intentional LAST_ACK DoSes, this
54091+ feature also adds automatic mitigation against such attacks.
54092+ The mitigation drastically reduces the amount of time a socket
54093+ can spend in LAST_ACK state. If you're using haproxy and not
54094+ all servers it connects to have this option enabled, consider
54095+ disabling this feature on the haproxy host.
54096+
54097+ If the sysctl option is enabled, two sysctl options with names
54098+ "ip_blackhole" and "lastack_retries" will be created.
54099+ While "ip_blackhole" takes the standard zero/non-zero on/off
54100+ toggle, "lastack_retries" uses the same kinds of values as
54101+ "tcp_retries1" and "tcp_retries2". The default value of 4
54102+ prevents a socket from lasting more than 45 seconds in LAST_ACK
54103+ state.
54104+
54105+config GRKERNSEC_SOCKET
54106+ bool "Socket restrictions"
54107+ help
54108+ If you say Y here, you will be able to choose from several options.
54109+ If you assign a GID on your system and add it to the supplementary
54110+ groups of users you want to restrict socket access to, this patch
54111+ will perform up to three things, based on the option(s) you choose.
54112+
54113+config GRKERNSEC_SOCKET_ALL
54114+ bool "Deny any sockets to group"
54115+ depends on GRKERNSEC_SOCKET
54116+ help
54117+ If you say Y here, you will be able to choose a GID of whose users will
54118+ be unable to connect to other hosts from your machine or run server
54119+ applications from your machine. If the sysctl option is enabled, a
54120+ sysctl option with name "socket_all" is created.
54121+
54122+config GRKERNSEC_SOCKET_ALL_GID
54123+ int "GID to deny all sockets for"
54124+ depends on GRKERNSEC_SOCKET_ALL
54125+ default 1004
54126+ help
54127+ Here you can choose the GID to disable socket access for. Remember to
54128+ add the users you want socket access disabled for to the GID
54129+ specified here. If the sysctl option is enabled, a sysctl option
54130+ with name "socket_all_gid" is created.
54131+
54132+config GRKERNSEC_SOCKET_CLIENT
54133+ bool "Deny client sockets to group"
54134+ depends on GRKERNSEC_SOCKET
54135+ help
54136+ If you say Y here, you will be able to choose a GID of whose users will
54137+ be unable to connect to other hosts from your machine, but will be
54138+ able to run servers. If this option is enabled, all users in the group
54139+ you specify will have to use passive mode when initiating ftp transfers
54140+ from the shell on your machine. If the sysctl option is enabled, a
54141+ sysctl option with name "socket_client" is created.
54142+
54143+config GRKERNSEC_SOCKET_CLIENT_GID
54144+ int "GID to deny client sockets for"
54145+ depends on GRKERNSEC_SOCKET_CLIENT
54146+ default 1003
54147+ help
54148+ Here you can choose the GID to disable client socket access for.
54149+ Remember to add the users you want client socket access disabled for to
54150+ the GID specified here. If the sysctl option is enabled, a sysctl
54151+ option with name "socket_client_gid" is created.
54152+
54153+config GRKERNSEC_SOCKET_SERVER
54154+ bool "Deny server sockets to group"
54155+ depends on GRKERNSEC_SOCKET
54156+ help
54157+ If you say Y here, you will be able to choose a GID of whose users will
54158+ be unable to run server applications from your machine. If the sysctl
54159+ option is enabled, a sysctl option with name "socket_server" is created.
54160+
54161+config GRKERNSEC_SOCKET_SERVER_GID
54162+ int "GID to deny server sockets for"
54163+ depends on GRKERNSEC_SOCKET_SERVER
54164+ default 1002
54165+ help
54166+ Here you can choose the GID to disable server socket access for.
54167+ Remember to add the users you want server socket access disabled for to
54168+ the GID specified here. If the sysctl option is enabled, a sysctl
54169+ option with name "socket_server_gid" is created.
54170+
54171+endmenu
54172+menu "Sysctl support"
54173+depends on GRKERNSEC && SYSCTL
54174+
54175+config GRKERNSEC_SYSCTL
54176+ bool "Sysctl support"
54177+ help
54178+ If you say Y here, you will be able to change the options that
54179+ grsecurity runs with at bootup, without having to recompile your
54180+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
54181+ to enable (1) or disable (0) various features. All the sysctl entries
54182+ are mutable until the "grsec_lock" entry is set to a non-zero value.
54183+ All features enabled in the kernel configuration are disabled at boot
54184+ if you do not say Y to the "Turn on features by default" option.
54185+ All options should be set at startup, and the grsec_lock entry should
54186+ be set to a non-zero value after all the options are set.
54187+ *THIS IS EXTREMELY IMPORTANT*
54188+
54189+config GRKERNSEC_SYSCTL_DISTRO
54190+ bool "Extra sysctl support for distro makers (READ HELP)"
54191+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
54192+ help
54193+ If you say Y here, additional sysctl options will be created
54194+ for features that affect processes running as root. Therefore,
54195+ it is critical when using this option that the grsec_lock entry be
54196+ enabled after boot. Only distros with prebuilt kernel packages
54197+ with this option enabled that can ensure grsec_lock is enabled
54198+ after boot should use this option.
54199+ *Failure to set grsec_lock after boot makes all grsec features
54200+ this option covers useless*
54201+
54202+ Currently this option creates the following sysctl entries:
54203+ "Disable Privileged I/O": "disable_priv_io"
54204+
54205+config GRKERNSEC_SYSCTL_ON
54206+ bool "Turn on features by default"
54207+ depends on GRKERNSEC_SYSCTL
54208+ help
54209+ If you say Y here, instead of having all features enabled in the
54210+ kernel configuration disabled at boot time, the features will be
54211+ enabled at boot time. It is recommended you say Y here unless
54212+ there is some reason you would want all sysctl-tunable features to
54213+ be disabled by default. As mentioned elsewhere, it is important
54214+ to enable the grsec_lock entry once you have finished modifying
54215+ the sysctl entries.
54216+
54217+endmenu
54218+menu "Logging Options"
54219+depends on GRKERNSEC
54220+
54221+config GRKERNSEC_FLOODTIME
54222+ int "Seconds in between log messages (minimum)"
54223+ default 10
54224+ help
54225+ This option allows you to enforce the number of seconds between
54226+ grsecurity log messages. The default should be suitable for most
54227+ people, however, if you choose to change it, choose a value small enough
54228+ to allow informative logs to be produced, but large enough to
54229+ prevent flooding.
54230+
54231+config GRKERNSEC_FLOODBURST
54232+ int "Number of messages in a burst (maximum)"
54233+ default 4
54234+ help
54235+ This option allows you to choose the maximum number of messages allowed
54236+ within the flood time interval you chose in a separate option. The
54237+ default should be suitable for most people, however if you find that
54238+ many of your logs are being interpreted as flooding, you may want to
54239+ raise this value.
54240+
54241+endmenu
54242+
54243+endmenu
54244diff -urNp linux-2.6.32.43/grsecurity/Makefile linux-2.6.32.43/grsecurity/Makefile
54245--- linux-2.6.32.43/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
54246+++ linux-2.6.32.43/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
54247@@ -0,0 +1,33 @@
54248+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
54249+# during 2001-2009 it has been completely redesigned by Brad Spengler
54250+# into an RBAC system
54251+#
54252+# All code in this directory and various hooks inserted throughout the kernel
54253+# are copyright Brad Spengler - Open Source Security, Inc., and released
54254+# under the GPL v2 or higher
54255+
54256+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
54257+ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
54258+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
54259+
54260+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
54261+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
54262+ gracl_learn.o grsec_log.o
54263+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
54264+
54265+ifdef CONFIG_NET
54266+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
54267+endif
54268+
54269+ifndef CONFIG_GRKERNSEC
54270+obj-y += grsec_disabled.o
54271+endif
54272+
54273+ifdef CONFIG_GRKERNSEC_HIDESYM
54274+extra-y := grsec_hidesym.o
54275+$(obj)/grsec_hidesym.o:
54276+ @-chmod -f 500 /boot
54277+ @-chmod -f 500 /lib/modules
54278+ @-chmod -f 700 .
54279+ @echo ' grsec: protected kernel image paths'
54280+endif
54281diff -urNp linux-2.6.32.43/include/acpi/acpi_drivers.h linux-2.6.32.43/include/acpi/acpi_drivers.h
54282--- linux-2.6.32.43/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
54283+++ linux-2.6.32.43/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
54284@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
54285 Dock Station
54286 -------------------------------------------------------------------------- */
54287 struct acpi_dock_ops {
54288- acpi_notify_handler handler;
54289- acpi_notify_handler uevent;
54290+ const acpi_notify_handler handler;
54291+ const acpi_notify_handler uevent;
54292 };
54293
54294 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
54295@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
54296 extern int register_dock_notifier(struct notifier_block *nb);
54297 extern void unregister_dock_notifier(struct notifier_block *nb);
54298 extern int register_hotplug_dock_device(acpi_handle handle,
54299- struct acpi_dock_ops *ops,
54300+ const struct acpi_dock_ops *ops,
54301 void *context);
54302 extern void unregister_hotplug_dock_device(acpi_handle handle);
54303 #else
54304@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
54305 {
54306 }
54307 static inline int register_hotplug_dock_device(acpi_handle handle,
54308- struct acpi_dock_ops *ops,
54309+ const struct acpi_dock_ops *ops,
54310 void *context)
54311 {
54312 return -ENODEV;
54313diff -urNp linux-2.6.32.43/include/asm-generic/atomic-long.h linux-2.6.32.43/include/asm-generic/atomic-long.h
54314--- linux-2.6.32.43/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
54315+++ linux-2.6.32.43/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
54316@@ -22,6 +22,12 @@
54317
54318 typedef atomic64_t atomic_long_t;
54319
54320+#ifdef CONFIG_PAX_REFCOUNT
54321+typedef atomic64_unchecked_t atomic_long_unchecked_t;
54322+#else
54323+typedef atomic64_t atomic_long_unchecked_t;
54324+#endif
54325+
54326 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
54327
54328 static inline long atomic_long_read(atomic_long_t *l)
54329@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
54330 return (long)atomic64_read(v);
54331 }
54332
54333+#ifdef CONFIG_PAX_REFCOUNT
54334+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54335+{
54336+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54337+
54338+ return (long)atomic64_read_unchecked(v);
54339+}
54340+#endif
54341+
54342 static inline void atomic_long_set(atomic_long_t *l, long i)
54343 {
54344 atomic64_t *v = (atomic64_t *)l;
54345@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
54346 atomic64_set(v, i);
54347 }
54348
54349+#ifdef CONFIG_PAX_REFCOUNT
54350+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54351+{
54352+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54353+
54354+ atomic64_set_unchecked(v, i);
54355+}
54356+#endif
54357+
54358 static inline void atomic_long_inc(atomic_long_t *l)
54359 {
54360 atomic64_t *v = (atomic64_t *)l;
54361@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
54362 atomic64_inc(v);
54363 }
54364
54365+#ifdef CONFIG_PAX_REFCOUNT
54366+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54367+{
54368+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54369+
54370+ atomic64_inc_unchecked(v);
54371+}
54372+#endif
54373+
54374 static inline void atomic_long_dec(atomic_long_t *l)
54375 {
54376 atomic64_t *v = (atomic64_t *)l;
54377@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
54378 atomic64_dec(v);
54379 }
54380
54381+#ifdef CONFIG_PAX_REFCOUNT
54382+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54383+{
54384+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54385+
54386+ atomic64_dec_unchecked(v);
54387+}
54388+#endif
54389+
54390 static inline void atomic_long_add(long i, atomic_long_t *l)
54391 {
54392 atomic64_t *v = (atomic64_t *)l;
54393@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
54394 atomic64_add(i, v);
54395 }
54396
54397+#ifdef CONFIG_PAX_REFCOUNT
54398+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54399+{
54400+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54401+
54402+ atomic64_add_unchecked(i, v);
54403+}
54404+#endif
54405+
54406 static inline void atomic_long_sub(long i, atomic_long_t *l)
54407 {
54408 atomic64_t *v = (atomic64_t *)l;
54409@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
54410 return (long)atomic64_inc_return(v);
54411 }
54412
54413+#ifdef CONFIG_PAX_REFCOUNT
54414+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54415+{
54416+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
54417+
54418+ return (long)atomic64_inc_return_unchecked(v);
54419+}
54420+#endif
54421+
54422 static inline long atomic_long_dec_return(atomic_long_t *l)
54423 {
54424 atomic64_t *v = (atomic64_t *)l;
54425@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
54426
54427 typedef atomic_t atomic_long_t;
54428
54429+#ifdef CONFIG_PAX_REFCOUNT
54430+typedef atomic_unchecked_t atomic_long_unchecked_t;
54431+#else
54432+typedef atomic_t atomic_long_unchecked_t;
54433+#endif
54434+
54435 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
54436 static inline long atomic_long_read(atomic_long_t *l)
54437 {
54438@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
54439 return (long)atomic_read(v);
54440 }
54441
54442+#ifdef CONFIG_PAX_REFCOUNT
54443+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
54444+{
54445+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54446+
54447+ return (long)atomic_read_unchecked(v);
54448+}
54449+#endif
54450+
54451 static inline void atomic_long_set(atomic_long_t *l, long i)
54452 {
54453 atomic_t *v = (atomic_t *)l;
54454@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
54455 atomic_set(v, i);
54456 }
54457
54458+#ifdef CONFIG_PAX_REFCOUNT
54459+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
54460+{
54461+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54462+
54463+ atomic_set_unchecked(v, i);
54464+}
54465+#endif
54466+
54467 static inline void atomic_long_inc(atomic_long_t *l)
54468 {
54469 atomic_t *v = (atomic_t *)l;
54470@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
54471 atomic_inc(v);
54472 }
54473
54474+#ifdef CONFIG_PAX_REFCOUNT
54475+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
54476+{
54477+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54478+
54479+ atomic_inc_unchecked(v);
54480+}
54481+#endif
54482+
54483 static inline void atomic_long_dec(atomic_long_t *l)
54484 {
54485 atomic_t *v = (atomic_t *)l;
54486@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
54487 atomic_dec(v);
54488 }
54489
54490+#ifdef CONFIG_PAX_REFCOUNT
54491+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
54492+{
54493+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54494+
54495+ atomic_dec_unchecked(v);
54496+}
54497+#endif
54498+
54499 static inline void atomic_long_add(long i, atomic_long_t *l)
54500 {
54501 atomic_t *v = (atomic_t *)l;
54502@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
54503 atomic_add(i, v);
54504 }
54505
54506+#ifdef CONFIG_PAX_REFCOUNT
54507+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
54508+{
54509+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54510+
54511+ atomic_add_unchecked(i, v);
54512+}
54513+#endif
54514+
54515 static inline void atomic_long_sub(long i, atomic_long_t *l)
54516 {
54517 atomic_t *v = (atomic_t *)l;
54518@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
54519 return (long)atomic_inc_return(v);
54520 }
54521
54522+#ifdef CONFIG_PAX_REFCOUNT
54523+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
54524+{
54525+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
54526+
54527+ return (long)atomic_inc_return_unchecked(v);
54528+}
54529+#endif
54530+
54531 static inline long atomic_long_dec_return(atomic_long_t *l)
54532 {
54533 atomic_t *v = (atomic_t *)l;
54534@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
54535
54536 #endif /* BITS_PER_LONG == 64 */
54537
54538+#ifdef CONFIG_PAX_REFCOUNT
54539+static inline void pax_refcount_needs_these_functions(void)
54540+{
54541+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
54542+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
54543+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
54544+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
54545+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
54546+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
54547+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
54548+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
54549+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
54550+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
54551+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
54552+
54553+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
54554+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
54555+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
54556+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
54557+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
54558+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
54559+}
54560+#else
54561+#define atomic_read_unchecked(v) atomic_read(v)
54562+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
54563+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
54564+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
54565+#define atomic_inc_unchecked(v) atomic_inc(v)
54566+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
54567+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
54568+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
54569+#define atomic_dec_unchecked(v) atomic_dec(v)
54570+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
54571+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
54572+
54573+#define atomic_long_read_unchecked(v) atomic_long_read(v)
54574+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
54575+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
54576+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
54577+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
54578+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
54579+#endif
54580+
54581 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
54582diff -urNp linux-2.6.32.43/include/asm-generic/cache.h linux-2.6.32.43/include/asm-generic/cache.h
54583--- linux-2.6.32.43/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
54584+++ linux-2.6.32.43/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
54585@@ -6,7 +6,7 @@
54586 * cache lines need to provide their own cache.h.
54587 */
54588
54589-#define L1_CACHE_SHIFT 5
54590-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
54591+#define L1_CACHE_SHIFT 5UL
54592+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
54593
54594 #endif /* __ASM_GENERIC_CACHE_H */
54595diff -urNp linux-2.6.32.43/include/asm-generic/dma-mapping-common.h linux-2.6.32.43/include/asm-generic/dma-mapping-common.h
54596--- linux-2.6.32.43/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
54597+++ linux-2.6.32.43/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
54598@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
54599 enum dma_data_direction dir,
54600 struct dma_attrs *attrs)
54601 {
54602- struct dma_map_ops *ops = get_dma_ops(dev);
54603+ const struct dma_map_ops *ops = get_dma_ops(dev);
54604 dma_addr_t addr;
54605
54606 kmemcheck_mark_initialized(ptr, size);
54607@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
54608 enum dma_data_direction dir,
54609 struct dma_attrs *attrs)
54610 {
54611- struct dma_map_ops *ops = get_dma_ops(dev);
54612+ const struct dma_map_ops *ops = get_dma_ops(dev);
54613
54614 BUG_ON(!valid_dma_direction(dir));
54615 if (ops->unmap_page)
54616@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
54617 int nents, enum dma_data_direction dir,
54618 struct dma_attrs *attrs)
54619 {
54620- struct dma_map_ops *ops = get_dma_ops(dev);
54621+ const struct dma_map_ops *ops = get_dma_ops(dev);
54622 int i, ents;
54623 struct scatterlist *s;
54624
54625@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
54626 int nents, enum dma_data_direction dir,
54627 struct dma_attrs *attrs)
54628 {
54629- struct dma_map_ops *ops = get_dma_ops(dev);
54630+ const struct dma_map_ops *ops = get_dma_ops(dev);
54631
54632 BUG_ON(!valid_dma_direction(dir));
54633 debug_dma_unmap_sg(dev, sg, nents, dir);
54634@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
54635 size_t offset, size_t size,
54636 enum dma_data_direction dir)
54637 {
54638- struct dma_map_ops *ops = get_dma_ops(dev);
54639+ const struct dma_map_ops *ops = get_dma_ops(dev);
54640 dma_addr_t addr;
54641
54642 kmemcheck_mark_initialized(page_address(page) + offset, size);
54643@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
54644 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
54645 size_t size, enum dma_data_direction dir)
54646 {
54647- struct dma_map_ops *ops = get_dma_ops(dev);
54648+ const struct dma_map_ops *ops = get_dma_ops(dev);
54649
54650 BUG_ON(!valid_dma_direction(dir));
54651 if (ops->unmap_page)
54652@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
54653 size_t size,
54654 enum dma_data_direction dir)
54655 {
54656- struct dma_map_ops *ops = get_dma_ops(dev);
54657+ const struct dma_map_ops *ops = get_dma_ops(dev);
54658
54659 BUG_ON(!valid_dma_direction(dir));
54660 if (ops->sync_single_for_cpu)
54661@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
54662 dma_addr_t addr, size_t size,
54663 enum dma_data_direction dir)
54664 {
54665- struct dma_map_ops *ops = get_dma_ops(dev);
54666+ const struct dma_map_ops *ops = get_dma_ops(dev);
54667
54668 BUG_ON(!valid_dma_direction(dir));
54669 if (ops->sync_single_for_device)
54670@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
54671 size_t size,
54672 enum dma_data_direction dir)
54673 {
54674- struct dma_map_ops *ops = get_dma_ops(dev);
54675+ const struct dma_map_ops *ops = get_dma_ops(dev);
54676
54677 BUG_ON(!valid_dma_direction(dir));
54678 if (ops->sync_single_range_for_cpu) {
54679@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
54680 size_t size,
54681 enum dma_data_direction dir)
54682 {
54683- struct dma_map_ops *ops = get_dma_ops(dev);
54684+ const struct dma_map_ops *ops = get_dma_ops(dev);
54685
54686 BUG_ON(!valid_dma_direction(dir));
54687 if (ops->sync_single_range_for_device) {
54688@@ -155,7 +155,7 @@ static inline void
54689 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
54690 int nelems, enum dma_data_direction dir)
54691 {
54692- struct dma_map_ops *ops = get_dma_ops(dev);
54693+ const struct dma_map_ops *ops = get_dma_ops(dev);
54694
54695 BUG_ON(!valid_dma_direction(dir));
54696 if (ops->sync_sg_for_cpu)
54697@@ -167,7 +167,7 @@ static inline void
54698 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
54699 int nelems, enum dma_data_direction dir)
54700 {
54701- struct dma_map_ops *ops = get_dma_ops(dev);
54702+ const struct dma_map_ops *ops = get_dma_ops(dev);
54703
54704 BUG_ON(!valid_dma_direction(dir));
54705 if (ops->sync_sg_for_device)
54706diff -urNp linux-2.6.32.43/include/asm-generic/futex.h linux-2.6.32.43/include/asm-generic/futex.h
54707--- linux-2.6.32.43/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
54708+++ linux-2.6.32.43/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
54709@@ -6,7 +6,7 @@
54710 #include <asm/errno.h>
54711
54712 static inline int
54713-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
54714+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
54715 {
54716 int op = (encoded_op >> 28) & 7;
54717 int cmp = (encoded_op >> 24) & 15;
54718@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
54719 }
54720
54721 static inline int
54722-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
54723+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
54724 {
54725 return -ENOSYS;
54726 }
54727diff -urNp linux-2.6.32.43/include/asm-generic/int-l64.h linux-2.6.32.43/include/asm-generic/int-l64.h
54728--- linux-2.6.32.43/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
54729+++ linux-2.6.32.43/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
54730@@ -46,6 +46,8 @@ typedef unsigned int u32;
54731 typedef signed long s64;
54732 typedef unsigned long u64;
54733
54734+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
54735+
54736 #define S8_C(x) x
54737 #define U8_C(x) x ## U
54738 #define S16_C(x) x
54739diff -urNp linux-2.6.32.43/include/asm-generic/int-ll64.h linux-2.6.32.43/include/asm-generic/int-ll64.h
54740--- linux-2.6.32.43/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
54741+++ linux-2.6.32.43/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
54742@@ -51,6 +51,8 @@ typedef unsigned int u32;
54743 typedef signed long long s64;
54744 typedef unsigned long long u64;
54745
54746+typedef unsigned long long intoverflow_t;
54747+
54748 #define S8_C(x) x
54749 #define U8_C(x) x ## U
54750 #define S16_C(x) x
54751diff -urNp linux-2.6.32.43/include/asm-generic/kmap_types.h linux-2.6.32.43/include/asm-generic/kmap_types.h
54752--- linux-2.6.32.43/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
54753+++ linux-2.6.32.43/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
54754@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
54755 KMAP_D(16) KM_IRQ_PTE,
54756 KMAP_D(17) KM_NMI,
54757 KMAP_D(18) KM_NMI_PTE,
54758-KMAP_D(19) KM_TYPE_NR
54759+KMAP_D(19) KM_CLEARPAGE,
54760+KMAP_D(20) KM_TYPE_NR
54761 };
54762
54763 #undef KMAP_D
54764diff -urNp linux-2.6.32.43/include/asm-generic/pgtable.h linux-2.6.32.43/include/asm-generic/pgtable.h
54765--- linux-2.6.32.43/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
54766+++ linux-2.6.32.43/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
54767@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
54768 unsigned long size);
54769 #endif
54770
54771+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
54772+static inline unsigned long pax_open_kernel(void) { return 0; }
54773+#endif
54774+
54775+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
54776+static inline unsigned long pax_close_kernel(void) { return 0; }
54777+#endif
54778+
54779 #endif /* !__ASSEMBLY__ */
54780
54781 #endif /* _ASM_GENERIC_PGTABLE_H */
54782diff -urNp linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h
54783--- linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
54784+++ linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
54785@@ -1,14 +1,19 @@
54786 #ifndef _PGTABLE_NOPMD_H
54787 #define _PGTABLE_NOPMD_H
54788
54789-#ifndef __ASSEMBLY__
54790-
54791 #include <asm-generic/pgtable-nopud.h>
54792
54793-struct mm_struct;
54794-
54795 #define __PAGETABLE_PMD_FOLDED
54796
54797+#define PMD_SHIFT PUD_SHIFT
54798+#define PTRS_PER_PMD 1
54799+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
54800+#define PMD_MASK (~(PMD_SIZE-1))
54801+
54802+#ifndef __ASSEMBLY__
54803+
54804+struct mm_struct;
54805+
54806 /*
54807 * Having the pmd type consist of a pud gets the size right, and allows
54808 * us to conceptually access the pud entry that this pmd is folded into
54809@@ -16,11 +21,6 @@ struct mm_struct;
54810 */
54811 typedef struct { pud_t pud; } pmd_t;
54812
54813-#define PMD_SHIFT PUD_SHIFT
54814-#define PTRS_PER_PMD 1
54815-#define PMD_SIZE (1UL << PMD_SHIFT)
54816-#define PMD_MASK (~(PMD_SIZE-1))
54817-
54818 /*
54819 * The "pud_xxx()" functions here are trivial for a folded two-level
54820 * setup: the pmd is never bad, and a pmd always exists (as it's folded
54821diff -urNp linux-2.6.32.43/include/asm-generic/pgtable-nopud.h linux-2.6.32.43/include/asm-generic/pgtable-nopud.h
54822--- linux-2.6.32.43/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
54823+++ linux-2.6.32.43/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
54824@@ -1,10 +1,15 @@
54825 #ifndef _PGTABLE_NOPUD_H
54826 #define _PGTABLE_NOPUD_H
54827
54828-#ifndef __ASSEMBLY__
54829-
54830 #define __PAGETABLE_PUD_FOLDED
54831
54832+#define PUD_SHIFT PGDIR_SHIFT
54833+#define PTRS_PER_PUD 1
54834+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
54835+#define PUD_MASK (~(PUD_SIZE-1))
54836+
54837+#ifndef __ASSEMBLY__
54838+
54839 /*
54840 * Having the pud type consist of a pgd gets the size right, and allows
54841 * us to conceptually access the pgd entry that this pud is folded into
54842@@ -12,11 +17,6 @@
54843 */
54844 typedef struct { pgd_t pgd; } pud_t;
54845
54846-#define PUD_SHIFT PGDIR_SHIFT
54847-#define PTRS_PER_PUD 1
54848-#define PUD_SIZE (1UL << PUD_SHIFT)
54849-#define PUD_MASK (~(PUD_SIZE-1))
54850-
54851 /*
54852 * The "pgd_xxx()" functions here are trivial for a folded two-level
54853 * setup: the pud is never bad, and a pud always exists (as it's folded
54854diff -urNp linux-2.6.32.43/include/asm-generic/vmlinux.lds.h linux-2.6.32.43/include/asm-generic/vmlinux.lds.h
54855--- linux-2.6.32.43/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
54856+++ linux-2.6.32.43/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
54857@@ -199,6 +199,7 @@
54858 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
54859 VMLINUX_SYMBOL(__start_rodata) = .; \
54860 *(.rodata) *(.rodata.*) \
54861+ *(.data.read_only) \
54862 *(__vermagic) /* Kernel version magic */ \
54863 *(__markers_strings) /* Markers: strings */ \
54864 *(__tracepoints_strings)/* Tracepoints: strings */ \
54865@@ -656,22 +657,24 @@
54866 * section in the linker script will go there too. @phdr should have
54867 * a leading colon.
54868 *
54869- * Note that this macros defines __per_cpu_load as an absolute symbol.
54870+ * Note that this macros defines per_cpu_load as an absolute symbol.
54871 * If there is no need to put the percpu section at a predetermined
54872 * address, use PERCPU().
54873 */
54874 #define PERCPU_VADDR(vaddr, phdr) \
54875- VMLINUX_SYMBOL(__per_cpu_load) = .; \
54876- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
54877+ per_cpu_load = .; \
54878+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
54879 - LOAD_OFFSET) { \
54880+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
54881 VMLINUX_SYMBOL(__per_cpu_start) = .; \
54882 *(.data.percpu.first) \
54883- *(.data.percpu.page_aligned) \
54884 *(.data.percpu) \
54885+ . = ALIGN(PAGE_SIZE); \
54886+ *(.data.percpu.page_aligned) \
54887 *(.data.percpu.shared_aligned) \
54888 VMLINUX_SYMBOL(__per_cpu_end) = .; \
54889 } phdr \
54890- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
54891+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
54892
54893 /**
54894 * PERCPU - define output section for percpu area, simple version
54895diff -urNp linux-2.6.32.43/include/drm/drmP.h linux-2.6.32.43/include/drm/drmP.h
54896--- linux-2.6.32.43/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
54897+++ linux-2.6.32.43/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
54898@@ -71,6 +71,7 @@
54899 #include <linux/workqueue.h>
54900 #include <linux/poll.h>
54901 #include <asm/pgalloc.h>
54902+#include <asm/local.h>
54903 #include "drm.h"
54904
54905 #include <linux/idr.h>
54906@@ -814,7 +815,7 @@ struct drm_driver {
54907 void (*vgaarb_irq)(struct drm_device *dev, bool state);
54908
54909 /* Driver private ops for this object */
54910- struct vm_operations_struct *gem_vm_ops;
54911+ const struct vm_operations_struct *gem_vm_ops;
54912
54913 int major;
54914 int minor;
54915@@ -917,7 +918,7 @@ struct drm_device {
54916
54917 /** \name Usage Counters */
54918 /*@{ */
54919- int open_count; /**< Outstanding files open */
54920+ local_t open_count; /**< Outstanding files open */
54921 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
54922 atomic_t vma_count; /**< Outstanding vma areas open */
54923 int buf_use; /**< Buffers in use -- cannot alloc */
54924@@ -928,7 +929,7 @@ struct drm_device {
54925 /*@{ */
54926 unsigned long counters;
54927 enum drm_stat_type types[15];
54928- atomic_t counts[15];
54929+ atomic_unchecked_t counts[15];
54930 /*@} */
54931
54932 struct list_head filelist;
54933@@ -1016,7 +1017,7 @@ struct drm_device {
54934 struct pci_controller *hose;
54935 #endif
54936 struct drm_sg_mem *sg; /**< Scatter gather memory */
54937- unsigned int num_crtcs; /**< Number of CRTCs on this device */
54938+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
54939 void *dev_private; /**< device private data */
54940 void *mm_private;
54941 struct address_space *dev_mapping;
54942@@ -1042,11 +1043,11 @@ struct drm_device {
54943 spinlock_t object_name_lock;
54944 struct idr object_name_idr;
54945 atomic_t object_count;
54946- atomic_t object_memory;
54947+ atomic_unchecked_t object_memory;
54948 atomic_t pin_count;
54949- atomic_t pin_memory;
54950+ atomic_unchecked_t pin_memory;
54951 atomic_t gtt_count;
54952- atomic_t gtt_memory;
54953+ atomic_unchecked_t gtt_memory;
54954 uint32_t gtt_total;
54955 uint32_t invalidate_domains; /* domains pending invalidation */
54956 uint32_t flush_domains; /* domains pending flush */
54957diff -urNp linux-2.6.32.43/include/linux/a.out.h linux-2.6.32.43/include/linux/a.out.h
54958--- linux-2.6.32.43/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
54959+++ linux-2.6.32.43/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
54960@@ -39,6 +39,14 @@ enum machine_type {
54961 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
54962 };
54963
54964+/* Constants for the N_FLAGS field */
54965+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
54966+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
54967+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
54968+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
54969+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
54970+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
54971+
54972 #if !defined (N_MAGIC)
54973 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
54974 #endif
54975diff -urNp linux-2.6.32.43/include/linux/atmdev.h linux-2.6.32.43/include/linux/atmdev.h
54976--- linux-2.6.32.43/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
54977+++ linux-2.6.32.43/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
54978@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
54979 #endif
54980
54981 struct k_atm_aal_stats {
54982-#define __HANDLE_ITEM(i) atomic_t i
54983+#define __HANDLE_ITEM(i) atomic_unchecked_t i
54984 __AAL_STAT_ITEMS
54985 #undef __HANDLE_ITEM
54986 };
54987diff -urNp linux-2.6.32.43/include/linux/backlight.h linux-2.6.32.43/include/linux/backlight.h
54988--- linux-2.6.32.43/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
54989+++ linux-2.6.32.43/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
54990@@ -36,18 +36,18 @@ struct backlight_device;
54991 struct fb_info;
54992
54993 struct backlight_ops {
54994- unsigned int options;
54995+ const unsigned int options;
54996
54997 #define BL_CORE_SUSPENDRESUME (1 << 0)
54998
54999 /* Notify the backlight driver some property has changed */
55000- int (*update_status)(struct backlight_device *);
55001+ int (* const update_status)(struct backlight_device *);
55002 /* Return the current backlight brightness (accounting for power,
55003 fb_blank etc.) */
55004- int (*get_brightness)(struct backlight_device *);
55005+ int (* const get_brightness)(struct backlight_device *);
55006 /* Check if given framebuffer device is the one bound to this backlight;
55007 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
55008- int (*check_fb)(struct fb_info *);
55009+ int (* const check_fb)(struct fb_info *);
55010 };
55011
55012 /* This structure defines all the properties of a backlight */
55013@@ -86,7 +86,7 @@ struct backlight_device {
55014 registered this device has been unloaded, and if class_get_devdata()
55015 points to something in the body of that driver, it is also invalid. */
55016 struct mutex ops_lock;
55017- struct backlight_ops *ops;
55018+ const struct backlight_ops *ops;
55019
55020 /* The framebuffer notifier block */
55021 struct notifier_block fb_notif;
55022@@ -103,7 +103,7 @@ static inline void backlight_update_stat
55023 }
55024
55025 extern struct backlight_device *backlight_device_register(const char *name,
55026- struct device *dev, void *devdata, struct backlight_ops *ops);
55027+ struct device *dev, void *devdata, const struct backlight_ops *ops);
55028 extern void backlight_device_unregister(struct backlight_device *bd);
55029 extern void backlight_force_update(struct backlight_device *bd,
55030 enum backlight_update_reason reason);
55031diff -urNp linux-2.6.32.43/include/linux/binfmts.h linux-2.6.32.43/include/linux/binfmts.h
55032--- linux-2.6.32.43/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
55033+++ linux-2.6.32.43/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
55034@@ -83,6 +83,7 @@ struct linux_binfmt {
55035 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
55036 int (*load_shlib)(struct file *);
55037 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
55038+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
55039 unsigned long min_coredump; /* minimal dump size */
55040 int hasvdso;
55041 };
55042diff -urNp linux-2.6.32.43/include/linux/blkdev.h linux-2.6.32.43/include/linux/blkdev.h
55043--- linux-2.6.32.43/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
55044+++ linux-2.6.32.43/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
55045@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
55046 #endif /* CONFIG_BLK_DEV_INTEGRITY */
55047
55048 struct block_device_operations {
55049- int (*open) (struct block_device *, fmode_t);
55050- int (*release) (struct gendisk *, fmode_t);
55051- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55052- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55053- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55054- int (*direct_access) (struct block_device *, sector_t,
55055+ int (* const open) (struct block_device *, fmode_t);
55056+ int (* const release) (struct gendisk *, fmode_t);
55057+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55058+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55059+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
55060+ int (* const direct_access) (struct block_device *, sector_t,
55061 void **, unsigned long *);
55062- int (*media_changed) (struct gendisk *);
55063- unsigned long long (*set_capacity) (struct gendisk *,
55064+ int (* const media_changed) (struct gendisk *);
55065+ unsigned long long (* const set_capacity) (struct gendisk *,
55066 unsigned long long);
55067- int (*revalidate_disk) (struct gendisk *);
55068- int (*getgeo)(struct block_device *, struct hd_geometry *);
55069- struct module *owner;
55070+ int (* const revalidate_disk) (struct gendisk *);
55071+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
55072+ struct module * const owner;
55073 };
55074
55075 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
55076diff -urNp linux-2.6.32.43/include/linux/blktrace_api.h linux-2.6.32.43/include/linux/blktrace_api.h
55077--- linux-2.6.32.43/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
55078+++ linux-2.6.32.43/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
55079@@ -160,7 +160,7 @@ struct blk_trace {
55080 struct dentry *dir;
55081 struct dentry *dropped_file;
55082 struct dentry *msg_file;
55083- atomic_t dropped;
55084+ atomic_unchecked_t dropped;
55085 };
55086
55087 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
55088diff -urNp linux-2.6.32.43/include/linux/byteorder/little_endian.h linux-2.6.32.43/include/linux/byteorder/little_endian.h
55089--- linux-2.6.32.43/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
55090+++ linux-2.6.32.43/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
55091@@ -42,51 +42,51 @@
55092
55093 static inline __le64 __cpu_to_le64p(const __u64 *p)
55094 {
55095- return (__force __le64)*p;
55096+ return (__force const __le64)*p;
55097 }
55098 static inline __u64 __le64_to_cpup(const __le64 *p)
55099 {
55100- return (__force __u64)*p;
55101+ return (__force const __u64)*p;
55102 }
55103 static inline __le32 __cpu_to_le32p(const __u32 *p)
55104 {
55105- return (__force __le32)*p;
55106+ return (__force const __le32)*p;
55107 }
55108 static inline __u32 __le32_to_cpup(const __le32 *p)
55109 {
55110- return (__force __u32)*p;
55111+ return (__force const __u32)*p;
55112 }
55113 static inline __le16 __cpu_to_le16p(const __u16 *p)
55114 {
55115- return (__force __le16)*p;
55116+ return (__force const __le16)*p;
55117 }
55118 static inline __u16 __le16_to_cpup(const __le16 *p)
55119 {
55120- return (__force __u16)*p;
55121+ return (__force const __u16)*p;
55122 }
55123 static inline __be64 __cpu_to_be64p(const __u64 *p)
55124 {
55125- return (__force __be64)__swab64p(p);
55126+ return (__force const __be64)__swab64p(p);
55127 }
55128 static inline __u64 __be64_to_cpup(const __be64 *p)
55129 {
55130- return __swab64p((__u64 *)p);
55131+ return __swab64p((const __u64 *)p);
55132 }
55133 static inline __be32 __cpu_to_be32p(const __u32 *p)
55134 {
55135- return (__force __be32)__swab32p(p);
55136+ return (__force const __be32)__swab32p(p);
55137 }
55138 static inline __u32 __be32_to_cpup(const __be32 *p)
55139 {
55140- return __swab32p((__u32 *)p);
55141+ return __swab32p((const __u32 *)p);
55142 }
55143 static inline __be16 __cpu_to_be16p(const __u16 *p)
55144 {
55145- return (__force __be16)__swab16p(p);
55146+ return (__force const __be16)__swab16p(p);
55147 }
55148 static inline __u16 __be16_to_cpup(const __be16 *p)
55149 {
55150- return __swab16p((__u16 *)p);
55151+ return __swab16p((const __u16 *)p);
55152 }
55153 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
55154 #define __le64_to_cpus(x) do { (void)(x); } while (0)
55155diff -urNp linux-2.6.32.43/include/linux/cache.h linux-2.6.32.43/include/linux/cache.h
55156--- linux-2.6.32.43/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
55157+++ linux-2.6.32.43/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
55158@@ -16,6 +16,10 @@
55159 #define __read_mostly
55160 #endif
55161
55162+#ifndef __read_only
55163+#define __read_only __read_mostly
55164+#endif
55165+
55166 #ifndef ____cacheline_aligned
55167 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
55168 #endif
55169diff -urNp linux-2.6.32.43/include/linux/capability.h linux-2.6.32.43/include/linux/capability.h
55170--- linux-2.6.32.43/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
55171+++ linux-2.6.32.43/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
55172@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
55173 (security_real_capable_noaudit((t), (cap)) == 0)
55174
55175 extern int capable(int cap);
55176+int capable_nolog(int cap);
55177
55178 /* audit system wants to get cap info from files as well */
55179 struct dentry;
55180diff -urNp linux-2.6.32.43/include/linux/compiler-gcc4.h linux-2.6.32.43/include/linux/compiler-gcc4.h
55181--- linux-2.6.32.43/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
55182+++ linux-2.6.32.43/include/linux/compiler-gcc4.h 2011-04-17 15:56:46.000000000 -0400
55183@@ -36,4 +36,8 @@
55184 the kernel context */
55185 #define __cold __attribute__((__cold__))
55186
55187+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
55188+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
55189+#define __bos0(ptr) __bos((ptr), 0)
55190+#define __bos1(ptr) __bos((ptr), 1)
55191 #endif
55192diff -urNp linux-2.6.32.43/include/linux/compiler.h linux-2.6.32.43/include/linux/compiler.h
55193--- linux-2.6.32.43/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
55194+++ linux-2.6.32.43/include/linux/compiler.h 2011-04-17 15:56:46.000000000 -0400
55195@@ -256,6 +256,22 @@ void ftrace_likely_update(struct ftrace_
55196 #define __cold
55197 #endif
55198
55199+#ifndef __alloc_size
55200+#define __alloc_size
55201+#endif
55202+
55203+#ifndef __bos
55204+#define __bos
55205+#endif
55206+
55207+#ifndef __bos0
55208+#define __bos0
55209+#endif
55210+
55211+#ifndef __bos1
55212+#define __bos1
55213+#endif
55214+
55215 /* Simple shorthand for a section definition */
55216 #ifndef __section
55217 # define __section(S) __attribute__ ((__section__(#S)))
55218@@ -278,6 +294,7 @@ void ftrace_likely_update(struct ftrace_
55219 * use is to mediate communication between process-level code and irq/NMI
55220 * handlers, all running on the same CPU.
55221 */
55222-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
55223+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
55224+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
55225
55226 #endif /* __LINUX_COMPILER_H */
55227diff -urNp linux-2.6.32.43/include/linux/dcache.h linux-2.6.32.43/include/linux/dcache.h
55228--- linux-2.6.32.43/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
55229+++ linux-2.6.32.43/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
55230@@ -119,6 +119,8 @@ struct dentry {
55231 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
55232 };
55233
55234+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
55235+
55236 /*
55237 * dentry->d_lock spinlock nesting subclasses:
55238 *
55239diff -urNp linux-2.6.32.43/include/linux/decompress/mm.h linux-2.6.32.43/include/linux/decompress/mm.h
55240--- linux-2.6.32.43/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
55241+++ linux-2.6.32.43/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
55242@@ -78,7 +78,7 @@ static void free(void *where)
55243 * warnings when not needed (indeed large_malloc / large_free are not
55244 * needed by inflate */
55245
55246-#define malloc(a) kmalloc(a, GFP_KERNEL)
55247+#define malloc(a) kmalloc((a), GFP_KERNEL)
55248 #define free(a) kfree(a)
55249
55250 #define large_malloc(a) vmalloc(a)
55251diff -urNp linux-2.6.32.43/include/linux/dma-mapping.h linux-2.6.32.43/include/linux/dma-mapping.h
55252--- linux-2.6.32.43/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
55253+++ linux-2.6.32.43/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
55254@@ -16,50 +16,50 @@ enum dma_data_direction {
55255 };
55256
55257 struct dma_map_ops {
55258- void* (*alloc_coherent)(struct device *dev, size_t size,
55259+ void* (* const alloc_coherent)(struct device *dev, size_t size,
55260 dma_addr_t *dma_handle, gfp_t gfp);
55261- void (*free_coherent)(struct device *dev, size_t size,
55262+ void (* const free_coherent)(struct device *dev, size_t size,
55263 void *vaddr, dma_addr_t dma_handle);
55264- dma_addr_t (*map_page)(struct device *dev, struct page *page,
55265+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
55266 unsigned long offset, size_t size,
55267 enum dma_data_direction dir,
55268 struct dma_attrs *attrs);
55269- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
55270+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
55271 size_t size, enum dma_data_direction dir,
55272 struct dma_attrs *attrs);
55273- int (*map_sg)(struct device *dev, struct scatterlist *sg,
55274+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
55275 int nents, enum dma_data_direction dir,
55276 struct dma_attrs *attrs);
55277- void (*unmap_sg)(struct device *dev,
55278+ void (* const unmap_sg)(struct device *dev,
55279 struct scatterlist *sg, int nents,
55280 enum dma_data_direction dir,
55281 struct dma_attrs *attrs);
55282- void (*sync_single_for_cpu)(struct device *dev,
55283+ void (* const sync_single_for_cpu)(struct device *dev,
55284 dma_addr_t dma_handle, size_t size,
55285 enum dma_data_direction dir);
55286- void (*sync_single_for_device)(struct device *dev,
55287+ void (* const sync_single_for_device)(struct device *dev,
55288 dma_addr_t dma_handle, size_t size,
55289 enum dma_data_direction dir);
55290- void (*sync_single_range_for_cpu)(struct device *dev,
55291+ void (* const sync_single_range_for_cpu)(struct device *dev,
55292 dma_addr_t dma_handle,
55293 unsigned long offset,
55294 size_t size,
55295 enum dma_data_direction dir);
55296- void (*sync_single_range_for_device)(struct device *dev,
55297+ void (* const sync_single_range_for_device)(struct device *dev,
55298 dma_addr_t dma_handle,
55299 unsigned long offset,
55300 size_t size,
55301 enum dma_data_direction dir);
55302- void (*sync_sg_for_cpu)(struct device *dev,
55303+ void (* const sync_sg_for_cpu)(struct device *dev,
55304 struct scatterlist *sg, int nents,
55305 enum dma_data_direction dir);
55306- void (*sync_sg_for_device)(struct device *dev,
55307+ void (* const sync_sg_for_device)(struct device *dev,
55308 struct scatterlist *sg, int nents,
55309 enum dma_data_direction dir);
55310- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
55311- int (*dma_supported)(struct device *dev, u64 mask);
55312+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
55313+ int (* const dma_supported)(struct device *dev, u64 mask);
55314 int (*set_dma_mask)(struct device *dev, u64 mask);
55315- int is_phys;
55316+ const int is_phys;
55317 };
55318
55319 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
55320diff -urNp linux-2.6.32.43/include/linux/dst.h linux-2.6.32.43/include/linux/dst.h
55321--- linux-2.6.32.43/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
55322+++ linux-2.6.32.43/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
55323@@ -380,7 +380,7 @@ struct dst_node
55324 struct thread_pool *pool;
55325
55326 /* Transaction IDs live here */
55327- atomic_long_t gen;
55328+ atomic_long_unchecked_t gen;
55329
55330 /*
55331 * How frequently and how many times transaction
55332diff -urNp linux-2.6.32.43/include/linux/elf.h linux-2.6.32.43/include/linux/elf.h
55333--- linux-2.6.32.43/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
55334+++ linux-2.6.32.43/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
55335@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
55336 #define PT_GNU_EH_FRAME 0x6474e550
55337
55338 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
55339+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
55340+
55341+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
55342+
55343+/* Constants for the e_flags field */
55344+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
55345+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
55346+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
55347+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
55348+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
55349+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
55350
55351 /* These constants define the different elf file types */
55352 #define ET_NONE 0
55353@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
55354 #define DT_DEBUG 21
55355 #define DT_TEXTREL 22
55356 #define DT_JMPREL 23
55357+#define DT_FLAGS 30
55358+ #define DF_TEXTREL 0x00000004
55359 #define DT_ENCODING 32
55360 #define OLD_DT_LOOS 0x60000000
55361 #define DT_LOOS 0x6000000d
55362@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
55363 #define PF_W 0x2
55364 #define PF_X 0x1
55365
55366+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
55367+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
55368+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
55369+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
55370+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
55371+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
55372+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
55373+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
55374+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
55375+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
55376+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
55377+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
55378+
55379 typedef struct elf32_phdr{
55380 Elf32_Word p_type;
55381 Elf32_Off p_offset;
55382@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
55383 #define EI_OSABI 7
55384 #define EI_PAD 8
55385
55386+#define EI_PAX 14
55387+
55388 #define ELFMAG0 0x7f /* EI_MAG */
55389 #define ELFMAG1 'E'
55390 #define ELFMAG2 'L'
55391@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
55392 #define elf_phdr elf32_phdr
55393 #define elf_note elf32_note
55394 #define elf_addr_t Elf32_Off
55395+#define elf_dyn Elf32_Dyn
55396
55397 #else
55398
55399@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
55400 #define elf_phdr elf64_phdr
55401 #define elf_note elf64_note
55402 #define elf_addr_t Elf64_Off
55403+#define elf_dyn Elf64_Dyn
55404
55405 #endif
55406
55407diff -urNp linux-2.6.32.43/include/linux/fscache-cache.h linux-2.6.32.43/include/linux/fscache-cache.h
55408--- linux-2.6.32.43/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
55409+++ linux-2.6.32.43/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
55410@@ -116,7 +116,7 @@ struct fscache_operation {
55411 #endif
55412 };
55413
55414-extern atomic_t fscache_op_debug_id;
55415+extern atomic_unchecked_t fscache_op_debug_id;
55416 extern const struct slow_work_ops fscache_op_slow_work_ops;
55417
55418 extern void fscache_enqueue_operation(struct fscache_operation *);
55419@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
55420 fscache_operation_release_t release)
55421 {
55422 atomic_set(&op->usage, 1);
55423- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
55424+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
55425 op->release = release;
55426 INIT_LIST_HEAD(&op->pend_link);
55427 fscache_set_op_state(op, "Init");
55428diff -urNp linux-2.6.32.43/include/linux/fs.h linux-2.6.32.43/include/linux/fs.h
55429--- linux-2.6.32.43/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
55430+++ linux-2.6.32.43/include/linux/fs.h 2011-07-13 17:23:19.000000000 -0400
55431@@ -90,6 +90,11 @@ struct inodes_stat_t {
55432 /* Expect random access pattern */
55433 #define FMODE_RANDOM ((__force fmode_t)4096)
55434
55435+/* Hack for grsec so as not to require read permission simply to execute
55436+ * a binary
55437+ */
55438+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
55439+
55440 /*
55441 * The below are the various read and write types that we support. Some of
55442 * them include behavioral modifiers that send information down to the
55443@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
55444 unsigned long, unsigned long);
55445
55446 struct address_space_operations {
55447- int (*writepage)(struct page *page, struct writeback_control *wbc);
55448- int (*readpage)(struct file *, struct page *);
55449- void (*sync_page)(struct page *);
55450+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
55451+ int (* const readpage)(struct file *, struct page *);
55452+ void (* const sync_page)(struct page *);
55453
55454 /* Write back some dirty pages from this mapping. */
55455- int (*writepages)(struct address_space *, struct writeback_control *);
55456+ int (* const writepages)(struct address_space *, struct writeback_control *);
55457
55458 /* Set a page dirty. Return true if this dirtied it */
55459- int (*set_page_dirty)(struct page *page);
55460+ int (* const set_page_dirty)(struct page *page);
55461
55462- int (*readpages)(struct file *filp, struct address_space *mapping,
55463+ int (* const readpages)(struct file *filp, struct address_space *mapping,
55464 struct list_head *pages, unsigned nr_pages);
55465
55466- int (*write_begin)(struct file *, struct address_space *mapping,
55467+ int (* const write_begin)(struct file *, struct address_space *mapping,
55468 loff_t pos, unsigned len, unsigned flags,
55469 struct page **pagep, void **fsdata);
55470- int (*write_end)(struct file *, struct address_space *mapping,
55471+ int (* const write_end)(struct file *, struct address_space *mapping,
55472 loff_t pos, unsigned len, unsigned copied,
55473 struct page *page, void *fsdata);
55474
55475 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
55476- sector_t (*bmap)(struct address_space *, sector_t);
55477- void (*invalidatepage) (struct page *, unsigned long);
55478- int (*releasepage) (struct page *, gfp_t);
55479- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
55480+ sector_t (* const bmap)(struct address_space *, sector_t);
55481+ void (* const invalidatepage) (struct page *, unsigned long);
55482+ int (* const releasepage) (struct page *, gfp_t);
55483+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
55484 loff_t offset, unsigned long nr_segs);
55485- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
55486+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
55487 void **, unsigned long *);
55488 /* migrate the contents of a page to the specified target */
55489- int (*migratepage) (struct address_space *,
55490+ int (* const migratepage) (struct address_space *,
55491 struct page *, struct page *);
55492- int (*launder_page) (struct page *);
55493- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
55494+ int (* const launder_page) (struct page *);
55495+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
55496 unsigned long);
55497- int (*error_remove_page)(struct address_space *, struct page *);
55498+ int (* const error_remove_page)(struct address_space *, struct page *);
55499 };
55500
55501 /*
55502@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
55503 typedef struct files_struct *fl_owner_t;
55504
55505 struct file_lock_operations {
55506- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55507- void (*fl_release_private)(struct file_lock *);
55508+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55509+ void (* const fl_release_private)(struct file_lock *);
55510 };
55511
55512 struct lock_manager_operations {
55513- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
55514- void (*fl_notify)(struct file_lock *); /* unblock callback */
55515- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
55516- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
55517- void (*fl_release_private)(struct file_lock *);
55518- void (*fl_break)(struct file_lock *);
55519- int (*fl_mylease)(struct file_lock *, struct file_lock *);
55520- int (*fl_change)(struct file_lock **, int);
55521+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
55522+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
55523+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
55524+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
55525+ void (* const fl_release_private)(struct file_lock *);
55526+ void (* const fl_break)(struct file_lock *);
55527+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
55528+ int (* const fl_change)(struct file_lock **, int);
55529 };
55530
55531 struct lock_manager {
55532@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
55533 unsigned int fi_flags; /* Flags as passed from user */
55534 unsigned int fi_extents_mapped; /* Number of mapped extents */
55535 unsigned int fi_extents_max; /* Size of fiemap_extent array */
55536- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
55537+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
55538 * array */
55539 };
55540 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
55541@@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
55542 unsigned long, loff_t *);
55543
55544 struct super_operations {
55545- struct inode *(*alloc_inode)(struct super_block *sb);
55546- void (*destroy_inode)(struct inode *);
55547+ struct inode *(* const alloc_inode)(struct super_block *sb);
55548+ void (* const destroy_inode)(struct inode *);
55549
55550- void (*dirty_inode) (struct inode *);
55551- int (*write_inode) (struct inode *, int);
55552- void (*drop_inode) (struct inode *);
55553- void (*delete_inode) (struct inode *);
55554- void (*put_super) (struct super_block *);
55555- void (*write_super) (struct super_block *);
55556- int (*sync_fs)(struct super_block *sb, int wait);
55557- int (*freeze_fs) (struct super_block *);
55558- int (*unfreeze_fs) (struct super_block *);
55559- int (*statfs) (struct dentry *, struct kstatfs *);
55560- int (*remount_fs) (struct super_block *, int *, char *);
55561- void (*clear_inode) (struct inode *);
55562- void (*umount_begin) (struct super_block *);
55563+ void (* const dirty_inode) (struct inode *);
55564+ int (* const write_inode) (struct inode *, int);
55565+ void (* const drop_inode) (struct inode *);
55566+ void (* const delete_inode) (struct inode *);
55567+ void (* const put_super) (struct super_block *);
55568+ void (* const write_super) (struct super_block *);
55569+ int (* const sync_fs)(struct super_block *sb, int wait);
55570+ int (* const freeze_fs) (struct super_block *);
55571+ int (* const unfreeze_fs) (struct super_block *);
55572+ int (* const statfs) (struct dentry *, struct kstatfs *);
55573+ int (* const remount_fs) (struct super_block *, int *, char *);
55574+ void (* const clear_inode) (struct inode *);
55575+ void (* const umount_begin) (struct super_block *);
55576
55577- int (*show_options)(struct seq_file *, struct vfsmount *);
55578- int (*show_stats)(struct seq_file *, struct vfsmount *);
55579+ int (* const show_options)(struct seq_file *, struct vfsmount *);
55580+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
55581 #ifdef CONFIG_QUOTA
55582- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
55583- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55584+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
55585+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
55586 #endif
55587- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55588+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
55589 };
55590
55591 /*
55592diff -urNp linux-2.6.32.43/include/linux/fs_struct.h linux-2.6.32.43/include/linux/fs_struct.h
55593--- linux-2.6.32.43/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
55594+++ linux-2.6.32.43/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
55595@@ -4,7 +4,7 @@
55596 #include <linux/path.h>
55597
55598 struct fs_struct {
55599- int users;
55600+ atomic_t users;
55601 rwlock_t lock;
55602 int umask;
55603 int in_exec;
55604diff -urNp linux-2.6.32.43/include/linux/ftrace_event.h linux-2.6.32.43/include/linux/ftrace_event.h
55605--- linux-2.6.32.43/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
55606+++ linux-2.6.32.43/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
55607@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
55608 int filter_type);
55609 extern int trace_define_common_fields(struct ftrace_event_call *call);
55610
55611-#define is_signed_type(type) (((type)(-1)) < 0)
55612+#define is_signed_type(type) (((type)(-1)) < (type)1)
55613
55614 int trace_set_clr_event(const char *system, const char *event, int set);
55615
55616diff -urNp linux-2.6.32.43/include/linux/genhd.h linux-2.6.32.43/include/linux/genhd.h
55617--- linux-2.6.32.43/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
55618+++ linux-2.6.32.43/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
55619@@ -161,7 +161,7 @@ struct gendisk {
55620
55621 struct timer_rand_state *random;
55622
55623- atomic_t sync_io; /* RAID */
55624+ atomic_unchecked_t sync_io; /* RAID */
55625 struct work_struct async_notify;
55626 #ifdef CONFIG_BLK_DEV_INTEGRITY
55627 struct blk_integrity *integrity;
55628diff -urNp linux-2.6.32.43/include/linux/gracl.h linux-2.6.32.43/include/linux/gracl.h
55629--- linux-2.6.32.43/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
55630+++ linux-2.6.32.43/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
55631@@ -0,0 +1,317 @@
55632+#ifndef GR_ACL_H
55633+#define GR_ACL_H
55634+
55635+#include <linux/grdefs.h>
55636+#include <linux/resource.h>
55637+#include <linux/capability.h>
55638+#include <linux/dcache.h>
55639+#include <asm/resource.h>
55640+
55641+/* Major status information */
55642+
55643+#define GR_VERSION "grsecurity 2.2.2"
55644+#define GRSECURITY_VERSION 0x2202
55645+
55646+enum {
55647+ GR_SHUTDOWN = 0,
55648+ GR_ENABLE = 1,
55649+ GR_SPROLE = 2,
55650+ GR_RELOAD = 3,
55651+ GR_SEGVMOD = 4,
55652+ GR_STATUS = 5,
55653+ GR_UNSPROLE = 6,
55654+ GR_PASSSET = 7,
55655+ GR_SPROLEPAM = 8,
55656+};
55657+
55658+/* Password setup definitions
55659+ * kernel/grhash.c */
55660+enum {
55661+ GR_PW_LEN = 128,
55662+ GR_SALT_LEN = 16,
55663+ GR_SHA_LEN = 32,
55664+};
55665+
55666+enum {
55667+ GR_SPROLE_LEN = 64,
55668+};
55669+
55670+enum {
55671+ GR_NO_GLOB = 0,
55672+ GR_REG_GLOB,
55673+ GR_CREATE_GLOB
55674+};
55675+
55676+#define GR_NLIMITS 32
55677+
55678+/* Begin Data Structures */
55679+
55680+struct sprole_pw {
55681+ unsigned char *rolename;
55682+ unsigned char salt[GR_SALT_LEN];
55683+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
55684+};
55685+
55686+struct name_entry {
55687+ __u32 key;
55688+ ino_t inode;
55689+ dev_t device;
55690+ char *name;
55691+ __u16 len;
55692+ __u8 deleted;
55693+ struct name_entry *prev;
55694+ struct name_entry *next;
55695+};
55696+
55697+struct inodev_entry {
55698+ struct name_entry *nentry;
55699+ struct inodev_entry *prev;
55700+ struct inodev_entry *next;
55701+};
55702+
55703+struct acl_role_db {
55704+ struct acl_role_label **r_hash;
55705+ __u32 r_size;
55706+};
55707+
55708+struct inodev_db {
55709+ struct inodev_entry **i_hash;
55710+ __u32 i_size;
55711+};
55712+
55713+struct name_db {
55714+ struct name_entry **n_hash;
55715+ __u32 n_size;
55716+};
55717+
55718+struct crash_uid {
55719+ uid_t uid;
55720+ unsigned long expires;
55721+};
55722+
55723+struct gr_hash_struct {
55724+ void **table;
55725+ void **nametable;
55726+ void *first;
55727+ __u32 table_size;
55728+ __u32 used_size;
55729+ int type;
55730+};
55731+
55732+/* Userspace Grsecurity ACL data structures */
55733+
55734+struct acl_subject_label {
55735+ char *filename;
55736+ ino_t inode;
55737+ dev_t device;
55738+ __u32 mode;
55739+ kernel_cap_t cap_mask;
55740+ kernel_cap_t cap_lower;
55741+ kernel_cap_t cap_invert_audit;
55742+
55743+ struct rlimit res[GR_NLIMITS];
55744+ __u32 resmask;
55745+
55746+ __u8 user_trans_type;
55747+ __u8 group_trans_type;
55748+ uid_t *user_transitions;
55749+ gid_t *group_transitions;
55750+ __u16 user_trans_num;
55751+ __u16 group_trans_num;
55752+
55753+ __u32 sock_families[2];
55754+ __u32 ip_proto[8];
55755+ __u32 ip_type;
55756+ struct acl_ip_label **ips;
55757+ __u32 ip_num;
55758+ __u32 inaddr_any_override;
55759+
55760+ __u32 crashes;
55761+ unsigned long expires;
55762+
55763+ struct acl_subject_label *parent_subject;
55764+ struct gr_hash_struct *hash;
55765+ struct acl_subject_label *prev;
55766+ struct acl_subject_label *next;
55767+
55768+ struct acl_object_label **obj_hash;
55769+ __u32 obj_hash_size;
55770+ __u16 pax_flags;
55771+};
55772+
55773+struct role_allowed_ip {
55774+ __u32 addr;
55775+ __u32 netmask;
55776+
55777+ struct role_allowed_ip *prev;
55778+ struct role_allowed_ip *next;
55779+};
55780+
55781+struct role_transition {
55782+ char *rolename;
55783+
55784+ struct role_transition *prev;
55785+ struct role_transition *next;
55786+};
55787+
55788+struct acl_role_label {
55789+ char *rolename;
55790+ uid_t uidgid;
55791+ __u16 roletype;
55792+
55793+ __u16 auth_attempts;
55794+ unsigned long expires;
55795+
55796+ struct acl_subject_label *root_label;
55797+ struct gr_hash_struct *hash;
55798+
55799+ struct acl_role_label *prev;
55800+ struct acl_role_label *next;
55801+
55802+ struct role_transition *transitions;
55803+ struct role_allowed_ip *allowed_ips;
55804+ uid_t *domain_children;
55805+ __u16 domain_child_num;
55806+
55807+ struct acl_subject_label **subj_hash;
55808+ __u32 subj_hash_size;
55809+};
55810+
55811+struct user_acl_role_db {
55812+ struct acl_role_label **r_table;
55813+ __u32 num_pointers; /* Number of allocations to track */
55814+ __u32 num_roles; /* Number of roles */
55815+ __u32 num_domain_children; /* Number of domain children */
55816+ __u32 num_subjects; /* Number of subjects */
55817+ __u32 num_objects; /* Number of objects */
55818+};
55819+
55820+struct acl_object_label {
55821+ char *filename;
55822+ ino_t inode;
55823+ dev_t device;
55824+ __u32 mode;
55825+
55826+ struct acl_subject_label *nested;
55827+ struct acl_object_label *globbed;
55828+
55829+ /* next two structures not used */
55830+
55831+ struct acl_object_label *prev;
55832+ struct acl_object_label *next;
55833+};
55834+
55835+struct acl_ip_label {
55836+ char *iface;
55837+ __u32 addr;
55838+ __u32 netmask;
55839+ __u16 low, high;
55840+ __u8 mode;
55841+ __u32 type;
55842+ __u32 proto[8];
55843+
55844+ /* next two structures not used */
55845+
55846+ struct acl_ip_label *prev;
55847+ struct acl_ip_label *next;
55848+};
55849+
55850+struct gr_arg {
55851+ struct user_acl_role_db role_db;
55852+ unsigned char pw[GR_PW_LEN];
55853+ unsigned char salt[GR_SALT_LEN];
55854+ unsigned char sum[GR_SHA_LEN];
55855+ unsigned char sp_role[GR_SPROLE_LEN];
55856+ struct sprole_pw *sprole_pws;
55857+ dev_t segv_device;
55858+ ino_t segv_inode;
55859+ uid_t segv_uid;
55860+ __u16 num_sprole_pws;
55861+ __u16 mode;
55862+};
55863+
55864+struct gr_arg_wrapper {
55865+ struct gr_arg *arg;
55866+ __u32 version;
55867+ __u32 size;
55868+};
55869+
55870+struct subject_map {
55871+ struct acl_subject_label *user;
55872+ struct acl_subject_label *kernel;
55873+ struct subject_map *prev;
55874+ struct subject_map *next;
55875+};
55876+
55877+struct acl_subj_map_db {
55878+ struct subject_map **s_hash;
55879+ __u32 s_size;
55880+};
55881+
55882+/* End Data Structures Section */
55883+
55884+/* Hash functions generated by empirical testing by Brad Spengler
55885+ Makes good use of the low bits of the inode. Generally 0-1 times
55886+ in loop for successful match. 0-3 for unsuccessful match.
55887+ Shift/add algorithm with modulus of table size and an XOR*/
55888+
55889+static __inline__ unsigned int
55890+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
55891+{
55892+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
55893+}
55894+
55895+ static __inline__ unsigned int
55896+shash(const struct acl_subject_label *userp, const unsigned int sz)
55897+{
55898+ return ((const unsigned long)userp % sz);
55899+}
55900+
55901+static __inline__ unsigned int
55902+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
55903+{
55904+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
55905+}
55906+
55907+static __inline__ unsigned int
55908+nhash(const char *name, const __u16 len, const unsigned int sz)
55909+{
55910+ return full_name_hash((const unsigned char *)name, len) % sz;
55911+}
55912+
55913+#define FOR_EACH_ROLE_START(role) \
55914+ role = role_list; \
55915+ while (role) {
55916+
55917+#define FOR_EACH_ROLE_END(role) \
55918+ role = role->prev; \
55919+ }
55920+
55921+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
55922+ subj = NULL; \
55923+ iter = 0; \
55924+ while (iter < role->subj_hash_size) { \
55925+ if (subj == NULL) \
55926+ subj = role->subj_hash[iter]; \
55927+ if (subj == NULL) { \
55928+ iter++; \
55929+ continue; \
55930+ }
55931+
55932+#define FOR_EACH_SUBJECT_END(subj,iter) \
55933+ subj = subj->next; \
55934+ if (subj == NULL) \
55935+ iter++; \
55936+ }
55937+
55938+
55939+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
55940+ subj = role->hash->first; \
55941+ while (subj != NULL) {
55942+
55943+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
55944+ subj = subj->next; \
55945+ }
55946+
55947+#endif
55948+
55949diff -urNp linux-2.6.32.43/include/linux/gralloc.h linux-2.6.32.43/include/linux/gralloc.h
55950--- linux-2.6.32.43/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
55951+++ linux-2.6.32.43/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
55952@@ -0,0 +1,9 @@
55953+#ifndef __GRALLOC_H
55954+#define __GRALLOC_H
55955+
55956+void acl_free_all(void);
55957+int acl_alloc_stack_init(unsigned long size);
55958+void *acl_alloc(unsigned long len);
55959+void *acl_alloc_num(unsigned long num, unsigned long len);
55960+
55961+#endif
55962diff -urNp linux-2.6.32.43/include/linux/grdefs.h linux-2.6.32.43/include/linux/grdefs.h
55963--- linux-2.6.32.43/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
55964+++ linux-2.6.32.43/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
55965@@ -0,0 +1,140 @@
55966+#ifndef GRDEFS_H
55967+#define GRDEFS_H
55968+
55969+/* Begin grsecurity status declarations */
55970+
55971+enum {
55972+ GR_READY = 0x01,
55973+ GR_STATUS_INIT = 0x00 // disabled state
55974+};
55975+
55976+/* Begin ACL declarations */
55977+
55978+/* Role flags */
55979+
55980+enum {
55981+ GR_ROLE_USER = 0x0001,
55982+ GR_ROLE_GROUP = 0x0002,
55983+ GR_ROLE_DEFAULT = 0x0004,
55984+ GR_ROLE_SPECIAL = 0x0008,
55985+ GR_ROLE_AUTH = 0x0010,
55986+ GR_ROLE_NOPW = 0x0020,
55987+ GR_ROLE_GOD = 0x0040,
55988+ GR_ROLE_LEARN = 0x0080,
55989+ GR_ROLE_TPE = 0x0100,
55990+ GR_ROLE_DOMAIN = 0x0200,
55991+ GR_ROLE_PAM = 0x0400,
55992+ GR_ROLE_PERSIST = 0x800
55993+};
55994+
55995+/* ACL Subject and Object mode flags */
55996+enum {
55997+ GR_DELETED = 0x80000000
55998+};
55999+
56000+/* ACL Object-only mode flags */
56001+enum {
56002+ GR_READ = 0x00000001,
56003+ GR_APPEND = 0x00000002,
56004+ GR_WRITE = 0x00000004,
56005+ GR_EXEC = 0x00000008,
56006+ GR_FIND = 0x00000010,
56007+ GR_INHERIT = 0x00000020,
56008+ GR_SETID = 0x00000040,
56009+ GR_CREATE = 0x00000080,
56010+ GR_DELETE = 0x00000100,
56011+ GR_LINK = 0x00000200,
56012+ GR_AUDIT_READ = 0x00000400,
56013+ GR_AUDIT_APPEND = 0x00000800,
56014+ GR_AUDIT_WRITE = 0x00001000,
56015+ GR_AUDIT_EXEC = 0x00002000,
56016+ GR_AUDIT_FIND = 0x00004000,
56017+ GR_AUDIT_INHERIT= 0x00008000,
56018+ GR_AUDIT_SETID = 0x00010000,
56019+ GR_AUDIT_CREATE = 0x00020000,
56020+ GR_AUDIT_DELETE = 0x00040000,
56021+ GR_AUDIT_LINK = 0x00080000,
56022+ GR_PTRACERD = 0x00100000,
56023+ GR_NOPTRACE = 0x00200000,
56024+ GR_SUPPRESS = 0x00400000,
56025+ GR_NOLEARN = 0x00800000,
56026+ GR_INIT_TRANSFER= 0x01000000
56027+};
56028+
56029+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
56030+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
56031+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
56032+
56033+/* ACL subject-only mode flags */
56034+enum {
56035+ GR_KILL = 0x00000001,
56036+ GR_VIEW = 0x00000002,
56037+ GR_PROTECTED = 0x00000004,
56038+ GR_LEARN = 0x00000008,
56039+ GR_OVERRIDE = 0x00000010,
56040+ /* just a placeholder, this mode is only used in userspace */
56041+ GR_DUMMY = 0x00000020,
56042+ GR_PROTSHM = 0x00000040,
56043+ GR_KILLPROC = 0x00000080,
56044+ GR_KILLIPPROC = 0x00000100,
56045+ /* just a placeholder, this mode is only used in userspace */
56046+ GR_NOTROJAN = 0x00000200,
56047+ GR_PROTPROCFD = 0x00000400,
56048+ GR_PROCACCT = 0x00000800,
56049+ GR_RELAXPTRACE = 0x00001000,
56050+ GR_NESTED = 0x00002000,
56051+ GR_INHERITLEARN = 0x00004000,
56052+ GR_PROCFIND = 0x00008000,
56053+ GR_POVERRIDE = 0x00010000,
56054+ GR_KERNELAUTH = 0x00020000,
56055+ GR_ATSECURE = 0x00040000,
56056+ GR_SHMEXEC = 0x00080000
56057+};
56058+
56059+enum {
56060+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
56061+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
56062+ GR_PAX_ENABLE_MPROTECT = 0x0004,
56063+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
56064+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
56065+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
56066+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
56067+ GR_PAX_DISABLE_MPROTECT = 0x0400,
56068+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
56069+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
56070+};
56071+
56072+enum {
56073+ GR_ID_USER = 0x01,
56074+ GR_ID_GROUP = 0x02,
56075+};
56076+
56077+enum {
56078+ GR_ID_ALLOW = 0x01,
56079+ GR_ID_DENY = 0x02,
56080+};
56081+
56082+#define GR_CRASH_RES 31
56083+#define GR_UIDTABLE_MAX 500
56084+
56085+/* begin resource learning section */
56086+enum {
56087+ GR_RLIM_CPU_BUMP = 60,
56088+ GR_RLIM_FSIZE_BUMP = 50000,
56089+ GR_RLIM_DATA_BUMP = 10000,
56090+ GR_RLIM_STACK_BUMP = 1000,
56091+ GR_RLIM_CORE_BUMP = 10000,
56092+ GR_RLIM_RSS_BUMP = 500000,
56093+ GR_RLIM_NPROC_BUMP = 1,
56094+ GR_RLIM_NOFILE_BUMP = 5,
56095+ GR_RLIM_MEMLOCK_BUMP = 50000,
56096+ GR_RLIM_AS_BUMP = 500000,
56097+ GR_RLIM_LOCKS_BUMP = 2,
56098+ GR_RLIM_SIGPENDING_BUMP = 5,
56099+ GR_RLIM_MSGQUEUE_BUMP = 10000,
56100+ GR_RLIM_NICE_BUMP = 1,
56101+ GR_RLIM_RTPRIO_BUMP = 1,
56102+ GR_RLIM_RTTIME_BUMP = 1000000
56103+};
56104+
56105+#endif
56106diff -urNp linux-2.6.32.43/include/linux/grinternal.h linux-2.6.32.43/include/linux/grinternal.h
56107--- linux-2.6.32.43/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
56108+++ linux-2.6.32.43/include/linux/grinternal.h 2011-07-14 20:35:29.000000000 -0400
56109@@ -0,0 +1,218 @@
56110+#ifndef __GRINTERNAL_H
56111+#define __GRINTERNAL_H
56112+
56113+#ifdef CONFIG_GRKERNSEC
56114+
56115+#include <linux/fs.h>
56116+#include <linux/mnt_namespace.h>
56117+#include <linux/nsproxy.h>
56118+#include <linux/gracl.h>
56119+#include <linux/grdefs.h>
56120+#include <linux/grmsg.h>
56121+
56122+void gr_add_learn_entry(const char *fmt, ...)
56123+ __attribute__ ((format (printf, 1, 2)));
56124+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
56125+ const struct vfsmount *mnt);
56126+__u32 gr_check_create(const struct dentry *new_dentry,
56127+ const struct dentry *parent,
56128+ const struct vfsmount *mnt, const __u32 mode);
56129+int gr_check_protected_task(const struct task_struct *task);
56130+__u32 to_gr_audit(const __u32 reqmode);
56131+int gr_set_acls(const int type);
56132+int gr_apply_subject_to_task(struct task_struct *task);
56133+int gr_acl_is_enabled(void);
56134+char gr_roletype_to_char(void);
56135+
56136+void gr_handle_alertkill(struct task_struct *task);
56137+char *gr_to_filename(const struct dentry *dentry,
56138+ const struct vfsmount *mnt);
56139+char *gr_to_filename1(const struct dentry *dentry,
56140+ const struct vfsmount *mnt);
56141+char *gr_to_filename2(const struct dentry *dentry,
56142+ const struct vfsmount *mnt);
56143+char *gr_to_filename3(const struct dentry *dentry,
56144+ const struct vfsmount *mnt);
56145+
56146+extern int grsec_enable_harden_ptrace;
56147+extern int grsec_enable_link;
56148+extern int grsec_enable_fifo;
56149+extern int grsec_enable_execve;
56150+extern int grsec_enable_shm;
56151+extern int grsec_enable_execlog;
56152+extern int grsec_enable_signal;
56153+extern int grsec_enable_audit_ptrace;
56154+extern int grsec_enable_forkfail;
56155+extern int grsec_enable_time;
56156+extern int grsec_enable_rofs;
56157+extern int grsec_enable_chroot_shmat;
56158+extern int grsec_enable_chroot_mount;
56159+extern int grsec_enable_chroot_double;
56160+extern int grsec_enable_chroot_pivot;
56161+extern int grsec_enable_chroot_chdir;
56162+extern int grsec_enable_chroot_chmod;
56163+extern int grsec_enable_chroot_mknod;
56164+extern int grsec_enable_chroot_fchdir;
56165+extern int grsec_enable_chroot_nice;
56166+extern int grsec_enable_chroot_execlog;
56167+extern int grsec_enable_chroot_caps;
56168+extern int grsec_enable_chroot_sysctl;
56169+extern int grsec_enable_chroot_unix;
56170+extern int grsec_enable_tpe;
56171+extern int grsec_tpe_gid;
56172+extern int grsec_enable_tpe_all;
56173+extern int grsec_enable_tpe_invert;
56174+extern int grsec_enable_socket_all;
56175+extern int grsec_socket_all_gid;
56176+extern int grsec_enable_socket_client;
56177+extern int grsec_socket_client_gid;
56178+extern int grsec_enable_socket_server;
56179+extern int grsec_socket_server_gid;
56180+extern int grsec_audit_gid;
56181+extern int grsec_enable_group;
56182+extern int grsec_enable_audit_textrel;
56183+extern int grsec_enable_log_rwxmaps;
56184+extern int grsec_enable_mount;
56185+extern int grsec_enable_chdir;
56186+extern int grsec_resource_logging;
56187+extern int grsec_enable_blackhole;
56188+extern int grsec_lastack_retries;
56189+extern int grsec_enable_brute;
56190+extern int grsec_lock;
56191+
56192+extern spinlock_t grsec_alert_lock;
56193+extern unsigned long grsec_alert_wtime;
56194+extern unsigned long grsec_alert_fyet;
56195+
56196+extern spinlock_t grsec_audit_lock;
56197+
56198+extern rwlock_t grsec_exec_file_lock;
56199+
56200+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
56201+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
56202+ (tsk)->exec_file->f_vfsmnt) : "/")
56203+
56204+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
56205+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
56206+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56207+
56208+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
56209+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
56210+ (tsk)->exec_file->f_vfsmnt) : "/")
56211+
56212+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
56213+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
56214+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
56215+
56216+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
56217+
56218+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
56219+
56220+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
56221+ (task)->pid, (cred)->uid, \
56222+ (cred)->euid, (cred)->gid, (cred)->egid, \
56223+ gr_parent_task_fullpath(task), \
56224+ (task)->real_parent->comm, (task)->real_parent->pid, \
56225+ (pcred)->uid, (pcred)->euid, \
56226+ (pcred)->gid, (pcred)->egid
56227+
56228+#define GR_CHROOT_CAPS {{ \
56229+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
56230+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
56231+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
56232+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
56233+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
56234+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
56235+
56236+#define security_learn(normal_msg,args...) \
56237+({ \
56238+ read_lock(&grsec_exec_file_lock); \
56239+ gr_add_learn_entry(normal_msg "\n", ## args); \
56240+ read_unlock(&grsec_exec_file_lock); \
56241+})
56242+
56243+enum {
56244+ GR_DO_AUDIT,
56245+ GR_DONT_AUDIT,
56246+ GR_DONT_AUDIT_GOOD
56247+};
56248+
56249+enum {
56250+ GR_TTYSNIFF,
56251+ GR_RBAC,
56252+ GR_RBAC_STR,
56253+ GR_STR_RBAC,
56254+ GR_RBAC_MODE2,
56255+ GR_RBAC_MODE3,
56256+ GR_FILENAME,
56257+ GR_SYSCTL_HIDDEN,
56258+ GR_NOARGS,
56259+ GR_ONE_INT,
56260+ GR_ONE_INT_TWO_STR,
56261+ GR_ONE_STR,
56262+ GR_STR_INT,
56263+ GR_TWO_STR_INT,
56264+ GR_TWO_INT,
56265+ GR_TWO_U64,
56266+ GR_THREE_INT,
56267+ GR_FIVE_INT_TWO_STR,
56268+ GR_TWO_STR,
56269+ GR_THREE_STR,
56270+ GR_FOUR_STR,
56271+ GR_STR_FILENAME,
56272+ GR_FILENAME_STR,
56273+ GR_FILENAME_TWO_INT,
56274+ GR_FILENAME_TWO_INT_STR,
56275+ GR_TEXTREL,
56276+ GR_PTRACE,
56277+ GR_RESOURCE,
56278+ GR_CAP,
56279+ GR_SIG,
56280+ GR_SIG2,
56281+ GR_CRASH1,
56282+ GR_CRASH2,
56283+ GR_PSACCT,
56284+ GR_RWXMAP
56285+};
56286+
56287+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
56288+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
56289+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
56290+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
56291+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
56292+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
56293+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
56294+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
56295+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
56296+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
56297+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
56298+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
56299+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
56300+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
56301+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
56302+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
56303+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
56304+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
56305+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
56306+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
56307+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
56308+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
56309+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
56310+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
56311+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
56312+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
56313+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
56314+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
56315+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
56316+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
56317+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
56318+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
56319+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
56320+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
56321+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
56322+
56323+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
56324+
56325+#endif
56326+
56327+#endif
56328diff -urNp linux-2.6.32.43/include/linux/grmsg.h linux-2.6.32.43/include/linux/grmsg.h
56329--- linux-2.6.32.43/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
56330+++ linux-2.6.32.43/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
56331@@ -0,0 +1,108 @@
56332+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
56333+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
56334+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
56335+#define GR_STOPMOD_MSG "denied modification of module state by "
56336+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
56337+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
56338+#define GR_IOPERM_MSG "denied use of ioperm() by "
56339+#define GR_IOPL_MSG "denied use of iopl() by "
56340+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
56341+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
56342+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
56343+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
56344+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
56345+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
56346+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
56347+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
56348+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
56349+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
56350+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
56351+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
56352+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
56353+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
56354+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
56355+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
56356+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
56357+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
56358+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
56359+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
56360+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
56361+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
56362+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
56363+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
56364+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
56365+#define GR_NPROC_MSG "denied overstep of process limit by "
56366+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
56367+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
56368+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
56369+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
56370+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
56371+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
56372+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
56373+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
56374+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
56375+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
56376+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
56377+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
56378+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
56379+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
56380+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
56381+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
56382+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
56383+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
56384+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
56385+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
56386+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
56387+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
56388+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
56389+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
56390+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
56391+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
56392+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
56393+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
56394+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
56395+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
56396+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
56397+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
56398+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
56399+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
56400+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
56401+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
56402+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
56403+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
56404+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
56405+#define GR_FAILFORK_MSG "failed fork with errno %s by "
56406+#define GR_NICE_CHROOT_MSG "denied priority change by "
56407+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
56408+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
56409+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
56410+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
56411+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
56412+#define GR_TIME_MSG "time set by "
56413+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
56414+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
56415+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
56416+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
56417+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
56418+#define GR_BIND_MSG "denied bind() by "
56419+#define GR_CONNECT_MSG "denied connect() by "
56420+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
56421+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
56422+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
56423+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
56424+#define GR_CAP_ACL_MSG "use of %s denied for "
56425+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
56426+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
56427+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
56428+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
56429+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
56430+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
56431+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
56432+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
56433+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
56434+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
56435+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
56436+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
56437+#define GR_VM86_MSG "denied use of vm86 by "
56438+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
56439+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
56440diff -urNp linux-2.6.32.43/include/linux/grsecurity.h linux-2.6.32.43/include/linux/grsecurity.h
56441--- linux-2.6.32.43/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
56442+++ linux-2.6.32.43/include/linux/grsecurity.h 2011-07-14 20:35:17.000000000 -0400
56443@@ -0,0 +1,215 @@
56444+#ifndef GR_SECURITY_H
56445+#define GR_SECURITY_H
56446+#include <linux/fs.h>
56447+#include <linux/fs_struct.h>
56448+#include <linux/binfmts.h>
56449+#include <linux/gracl.h>
56450+#include <linux/compat.h>
56451+
56452+/* notify of brain-dead configs */
56453+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
56454+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
56455+#endif
56456+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56457+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56458+#endif
56459+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
56460+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
56461+#endif
56462+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
56463+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
56464+#endif
56465+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
56466+#error "CONFIG_PAX enabled, but no PaX options are enabled."
56467+#endif
56468+
56469+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
56470+void gr_handle_brute_check(void);
56471+void gr_handle_kernel_exploit(void);
56472+int gr_process_user_ban(void);
56473+
56474+char gr_roletype_to_char(void);
56475+
56476+int gr_acl_enable_at_secure(void);
56477+
56478+int gr_check_user_change(int real, int effective, int fs);
56479+int gr_check_group_change(int real, int effective, int fs);
56480+
56481+void gr_del_task_from_ip_table(struct task_struct *p);
56482+
56483+int gr_pid_is_chrooted(struct task_struct *p);
56484+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
56485+int gr_handle_chroot_nice(void);
56486+int gr_handle_chroot_sysctl(const int op);
56487+int gr_handle_chroot_setpriority(struct task_struct *p,
56488+ const int niceval);
56489+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
56490+int gr_handle_chroot_chroot(const struct dentry *dentry,
56491+ const struct vfsmount *mnt);
56492+int gr_handle_chroot_caps(struct path *path);
56493+void gr_handle_chroot_chdir(struct path *path);
56494+int gr_handle_chroot_chmod(const struct dentry *dentry,
56495+ const struct vfsmount *mnt, const int mode);
56496+int gr_handle_chroot_mknod(const struct dentry *dentry,
56497+ const struct vfsmount *mnt, const int mode);
56498+int gr_handle_chroot_mount(const struct dentry *dentry,
56499+ const struct vfsmount *mnt,
56500+ const char *dev_name);
56501+int gr_handle_chroot_pivot(void);
56502+int gr_handle_chroot_unix(const pid_t pid);
56503+
56504+int gr_handle_rawio(const struct inode *inode);
56505+int gr_handle_nproc(void);
56506+
56507+void gr_handle_ioperm(void);
56508+void gr_handle_iopl(void);
56509+
56510+int gr_tpe_allow(const struct file *file);
56511+
56512+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
56513+void gr_clear_chroot_entries(struct task_struct *task);
56514+
56515+void gr_log_forkfail(const int retval);
56516+void gr_log_timechange(void);
56517+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
56518+void gr_log_chdir(const struct dentry *dentry,
56519+ const struct vfsmount *mnt);
56520+void gr_log_chroot_exec(const struct dentry *dentry,
56521+ const struct vfsmount *mnt);
56522+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
56523+#ifdef CONFIG_COMPAT
56524+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
56525+#endif
56526+void gr_log_remount(const char *devname, const int retval);
56527+void gr_log_unmount(const char *devname, const int retval);
56528+void gr_log_mount(const char *from, const char *to, const int retval);
56529+void gr_log_textrel(struct vm_area_struct *vma);
56530+void gr_log_rwxmmap(struct file *file);
56531+void gr_log_rwxmprotect(struct file *file);
56532+
56533+int gr_handle_follow_link(const struct inode *parent,
56534+ const struct inode *inode,
56535+ const struct dentry *dentry,
56536+ const struct vfsmount *mnt);
56537+int gr_handle_fifo(const struct dentry *dentry,
56538+ const struct vfsmount *mnt,
56539+ const struct dentry *dir, const int flag,
56540+ const int acc_mode);
56541+int gr_handle_hardlink(const struct dentry *dentry,
56542+ const struct vfsmount *mnt,
56543+ struct inode *inode,
56544+ const int mode, const char *to);
56545+
56546+int gr_is_capable(const int cap);
56547+int gr_is_capable_nolog(const int cap);
56548+void gr_learn_resource(const struct task_struct *task, const int limit,
56549+ const unsigned long wanted, const int gt);
56550+void gr_copy_label(struct task_struct *tsk);
56551+void gr_handle_crash(struct task_struct *task, const int sig);
56552+int gr_handle_signal(const struct task_struct *p, const int sig);
56553+int gr_check_crash_uid(const uid_t uid);
56554+int gr_check_protected_task(const struct task_struct *task);
56555+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
56556+int gr_acl_handle_mmap(const struct file *file,
56557+ const unsigned long prot);
56558+int gr_acl_handle_mprotect(const struct file *file,
56559+ const unsigned long prot);
56560+int gr_check_hidden_task(const struct task_struct *tsk);
56561+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
56562+ const struct vfsmount *mnt);
56563+__u32 gr_acl_handle_utime(const struct dentry *dentry,
56564+ const struct vfsmount *mnt);
56565+__u32 gr_acl_handle_access(const struct dentry *dentry,
56566+ const struct vfsmount *mnt, const int fmode);
56567+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
56568+ const struct vfsmount *mnt, mode_t mode);
56569+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
56570+ const struct vfsmount *mnt, mode_t mode);
56571+__u32 gr_acl_handle_chown(const struct dentry *dentry,
56572+ const struct vfsmount *mnt);
56573+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
56574+ const struct vfsmount *mnt);
56575+int gr_handle_ptrace(struct task_struct *task, const long request);
56576+int gr_handle_proc_ptrace(struct task_struct *task);
56577+__u32 gr_acl_handle_execve(const struct dentry *dentry,
56578+ const struct vfsmount *mnt);
56579+int gr_check_crash_exec(const struct file *filp);
56580+int gr_acl_is_enabled(void);
56581+void gr_set_kernel_label(struct task_struct *task);
56582+void gr_set_role_label(struct task_struct *task, const uid_t uid,
56583+ const gid_t gid);
56584+int gr_set_proc_label(const struct dentry *dentry,
56585+ const struct vfsmount *mnt,
56586+ const int unsafe_share);
56587+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
56588+ const struct vfsmount *mnt);
56589+__u32 gr_acl_handle_open(const struct dentry *dentry,
56590+ const struct vfsmount *mnt, const int fmode);
56591+__u32 gr_acl_handle_creat(const struct dentry *dentry,
56592+ const struct dentry *p_dentry,
56593+ const struct vfsmount *p_mnt, const int fmode,
56594+ const int imode);
56595+void gr_handle_create(const struct dentry *dentry,
56596+ const struct vfsmount *mnt);
56597+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
56598+ const struct dentry *parent_dentry,
56599+ const struct vfsmount *parent_mnt,
56600+ const int mode);
56601+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
56602+ const struct dentry *parent_dentry,
56603+ const struct vfsmount *parent_mnt);
56604+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
56605+ const struct vfsmount *mnt);
56606+void gr_handle_delete(const ino_t ino, const dev_t dev);
56607+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
56608+ const struct vfsmount *mnt);
56609+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
56610+ const struct dentry *parent_dentry,
56611+ const struct vfsmount *parent_mnt,
56612+ const char *from);
56613+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
56614+ const struct dentry *parent_dentry,
56615+ const struct vfsmount *parent_mnt,
56616+ const struct dentry *old_dentry,
56617+ const struct vfsmount *old_mnt, const char *to);
56618+int gr_acl_handle_rename(struct dentry *new_dentry,
56619+ struct dentry *parent_dentry,
56620+ const struct vfsmount *parent_mnt,
56621+ struct dentry *old_dentry,
56622+ struct inode *old_parent_inode,
56623+ struct vfsmount *old_mnt, const char *newname);
56624+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
56625+ struct dentry *old_dentry,
56626+ struct dentry *new_dentry,
56627+ struct vfsmount *mnt, const __u8 replace);
56628+__u32 gr_check_link(const struct dentry *new_dentry,
56629+ const struct dentry *parent_dentry,
56630+ const struct vfsmount *parent_mnt,
56631+ const struct dentry *old_dentry,
56632+ const struct vfsmount *old_mnt);
56633+int gr_acl_handle_filldir(const struct file *file, const char *name,
56634+ const unsigned int namelen, const ino_t ino);
56635+
56636+__u32 gr_acl_handle_unix(const struct dentry *dentry,
56637+ const struct vfsmount *mnt);
56638+void gr_acl_handle_exit(void);
56639+void gr_acl_handle_psacct(struct task_struct *task, const long code);
56640+int gr_acl_handle_procpidmem(const struct task_struct *task);
56641+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
56642+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
56643+void gr_audit_ptrace(struct task_struct *task);
56644+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
56645+
56646+#ifdef CONFIG_GRKERNSEC
56647+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
56648+void gr_handle_vm86(void);
56649+void gr_handle_mem_readwrite(u64 from, u64 to);
56650+
56651+extern int grsec_enable_dmesg;
56652+extern int grsec_disable_privio;
56653+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
56654+extern int grsec_enable_chroot_findtask;
56655+#endif
56656+#endif
56657+
56658+#endif
56659diff -urNp linux-2.6.32.43/include/linux/hdpu_features.h linux-2.6.32.43/include/linux/hdpu_features.h
56660--- linux-2.6.32.43/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
56661+++ linux-2.6.32.43/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
56662@@ -3,7 +3,7 @@
56663 struct cpustate_t {
56664 spinlock_t lock;
56665 int excl;
56666- int open_count;
56667+ atomic_t open_count;
56668 unsigned char cached_val;
56669 int inited;
56670 unsigned long *set_addr;
56671diff -urNp linux-2.6.32.43/include/linux/highmem.h linux-2.6.32.43/include/linux/highmem.h
56672--- linux-2.6.32.43/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
56673+++ linux-2.6.32.43/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
56674@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
56675 kunmap_atomic(kaddr, KM_USER0);
56676 }
56677
56678+static inline void sanitize_highpage(struct page *page)
56679+{
56680+ void *kaddr;
56681+ unsigned long flags;
56682+
56683+ local_irq_save(flags);
56684+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
56685+ clear_page(kaddr);
56686+ kunmap_atomic(kaddr, KM_CLEARPAGE);
56687+ local_irq_restore(flags);
56688+}
56689+
56690 static inline void zero_user_segments(struct page *page,
56691 unsigned start1, unsigned end1,
56692 unsigned start2, unsigned end2)
56693diff -urNp linux-2.6.32.43/include/linux/i2o.h linux-2.6.32.43/include/linux/i2o.h
56694--- linux-2.6.32.43/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
56695+++ linux-2.6.32.43/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
56696@@ -564,7 +564,7 @@ struct i2o_controller {
56697 struct i2o_device *exec; /* Executive */
56698 #if BITS_PER_LONG == 64
56699 spinlock_t context_list_lock; /* lock for context_list */
56700- atomic_t context_list_counter; /* needed for unique contexts */
56701+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
56702 struct list_head context_list; /* list of context id's
56703 and pointers */
56704 #endif
56705diff -urNp linux-2.6.32.43/include/linux/init_task.h linux-2.6.32.43/include/linux/init_task.h
56706--- linux-2.6.32.43/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
56707+++ linux-2.6.32.43/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
56708@@ -83,6 +83,12 @@ extern struct group_info init_groups;
56709 #define INIT_IDS
56710 #endif
56711
56712+#ifdef CONFIG_X86
56713+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
56714+#else
56715+#define INIT_TASK_THREAD_INFO
56716+#endif
56717+
56718 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
56719 /*
56720 * Because of the reduced scope of CAP_SETPCAP when filesystem
56721@@ -156,6 +162,7 @@ extern struct cred init_cred;
56722 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
56723 .comm = "swapper", \
56724 .thread = INIT_THREAD, \
56725+ INIT_TASK_THREAD_INFO \
56726 .fs = &init_fs, \
56727 .files = &init_files, \
56728 .signal = &init_signals, \
56729diff -urNp linux-2.6.32.43/include/linux/interrupt.h linux-2.6.32.43/include/linux/interrupt.h
56730--- linux-2.6.32.43/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
56731+++ linux-2.6.32.43/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
56732@@ -363,7 +363,7 @@ enum
56733 /* map softirq index to softirq name. update 'softirq_to_name' in
56734 * kernel/softirq.c when adding a new softirq.
56735 */
56736-extern char *softirq_to_name[NR_SOFTIRQS];
56737+extern const char * const softirq_to_name[NR_SOFTIRQS];
56738
56739 /* softirq mask and active fields moved to irq_cpustat_t in
56740 * asm/hardirq.h to get better cache usage. KAO
56741@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
56742
56743 struct softirq_action
56744 {
56745- void (*action)(struct softirq_action *);
56746+ void (*action)(void);
56747 };
56748
56749 asmlinkage void do_softirq(void);
56750 asmlinkage void __do_softirq(void);
56751-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
56752+extern void open_softirq(int nr, void (*action)(void));
56753 extern void softirq_init(void);
56754 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
56755 extern void raise_softirq_irqoff(unsigned int nr);
56756diff -urNp linux-2.6.32.43/include/linux/irq.h linux-2.6.32.43/include/linux/irq.h
56757--- linux-2.6.32.43/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
56758+++ linux-2.6.32.43/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
56759@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
56760 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
56761 bool boot)
56762 {
56763+#ifdef CONFIG_CPUMASK_OFFSTACK
56764 gfp_t gfp = GFP_ATOMIC;
56765
56766 if (boot)
56767 gfp = GFP_NOWAIT;
56768
56769-#ifdef CONFIG_CPUMASK_OFFSTACK
56770 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
56771 return false;
56772
56773diff -urNp linux-2.6.32.43/include/linux/kallsyms.h linux-2.6.32.43/include/linux/kallsyms.h
56774--- linux-2.6.32.43/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
56775+++ linux-2.6.32.43/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
56776@@ -15,7 +15,8 @@
56777
56778 struct module;
56779
56780-#ifdef CONFIG_KALLSYMS
56781+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
56782+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
56783 /* Lookup the address for a symbol. Returns 0 if not found. */
56784 unsigned long kallsyms_lookup_name(const char *name);
56785
56786@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
56787 /* Stupid that this does nothing, but I didn't create this mess. */
56788 #define __print_symbol(fmt, addr)
56789 #endif /*CONFIG_KALLSYMS*/
56790+#else /* when included by kallsyms.c, vsnprintf.c, or
56791+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
56792+extern void __print_symbol(const char *fmt, unsigned long address);
56793+extern int sprint_symbol(char *buffer, unsigned long address);
56794+const char *kallsyms_lookup(unsigned long addr,
56795+ unsigned long *symbolsize,
56796+ unsigned long *offset,
56797+ char **modname, char *namebuf);
56798+#endif
56799
56800 /* This macro allows us to keep printk typechecking */
56801 static void __check_printsym_format(const char *fmt, ...)
56802diff -urNp linux-2.6.32.43/include/linux/kgdb.h linux-2.6.32.43/include/linux/kgdb.h
56803--- linux-2.6.32.43/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
56804+++ linux-2.6.32.43/include/linux/kgdb.h 2011-05-04 17:56:20.000000000 -0400
56805@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
56806
56807 extern int kgdb_connected;
56808
56809-extern atomic_t kgdb_setting_breakpoint;
56810-extern atomic_t kgdb_cpu_doing_single_step;
56811+extern atomic_unchecked_t kgdb_setting_breakpoint;
56812+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
56813
56814 extern struct task_struct *kgdb_usethread;
56815 extern struct task_struct *kgdb_contthread;
56816@@ -251,20 +251,20 @@ struct kgdb_arch {
56817 */
56818 struct kgdb_io {
56819 const char *name;
56820- int (*read_char) (void);
56821- void (*write_char) (u8);
56822- void (*flush) (void);
56823- int (*init) (void);
56824- void (*pre_exception) (void);
56825- void (*post_exception) (void);
56826+ int (* const read_char) (void);
56827+ void (* const write_char) (u8);
56828+ void (* const flush) (void);
56829+ int (* const init) (void);
56830+ void (* const pre_exception) (void);
56831+ void (* const post_exception) (void);
56832 };
56833
56834-extern struct kgdb_arch arch_kgdb_ops;
56835+extern const struct kgdb_arch arch_kgdb_ops;
56836
56837 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
56838
56839-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
56840-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
56841+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
56842+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
56843
56844 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
56845 extern int kgdb_mem2hex(char *mem, char *buf, int count);
56846diff -urNp linux-2.6.32.43/include/linux/kmod.h linux-2.6.32.43/include/linux/kmod.h
56847--- linux-2.6.32.43/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
56848+++ linux-2.6.32.43/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
56849@@ -31,6 +31,8 @@
56850 * usually useless though. */
56851 extern int __request_module(bool wait, const char *name, ...) \
56852 __attribute__((format(printf, 2, 3)));
56853+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
56854+ __attribute__((format(printf, 3, 4)));
56855 #define request_module(mod...) __request_module(true, mod)
56856 #define request_module_nowait(mod...) __request_module(false, mod)
56857 #define try_then_request_module(x, mod...) \
56858diff -urNp linux-2.6.32.43/include/linux/kobject.h linux-2.6.32.43/include/linux/kobject.h
56859--- linux-2.6.32.43/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
56860+++ linux-2.6.32.43/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
56861@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
56862
56863 struct kobj_type {
56864 void (*release)(struct kobject *kobj);
56865- struct sysfs_ops *sysfs_ops;
56866+ const struct sysfs_ops *sysfs_ops;
56867 struct attribute **default_attrs;
56868 };
56869
56870@@ -118,9 +118,9 @@ struct kobj_uevent_env {
56871 };
56872
56873 struct kset_uevent_ops {
56874- int (*filter)(struct kset *kset, struct kobject *kobj);
56875- const char *(*name)(struct kset *kset, struct kobject *kobj);
56876- int (*uevent)(struct kset *kset, struct kobject *kobj,
56877+ int (* const filter)(struct kset *kset, struct kobject *kobj);
56878+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
56879+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
56880 struct kobj_uevent_env *env);
56881 };
56882
56883@@ -132,7 +132,7 @@ struct kobj_attribute {
56884 const char *buf, size_t count);
56885 };
56886
56887-extern struct sysfs_ops kobj_sysfs_ops;
56888+extern const struct sysfs_ops kobj_sysfs_ops;
56889
56890 /**
56891 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
56892@@ -155,14 +155,14 @@ struct kset {
56893 struct list_head list;
56894 spinlock_t list_lock;
56895 struct kobject kobj;
56896- struct kset_uevent_ops *uevent_ops;
56897+ const struct kset_uevent_ops *uevent_ops;
56898 };
56899
56900 extern void kset_init(struct kset *kset);
56901 extern int __must_check kset_register(struct kset *kset);
56902 extern void kset_unregister(struct kset *kset);
56903 extern struct kset * __must_check kset_create_and_add(const char *name,
56904- struct kset_uevent_ops *u,
56905+ const struct kset_uevent_ops *u,
56906 struct kobject *parent_kobj);
56907
56908 static inline struct kset *to_kset(struct kobject *kobj)
56909diff -urNp linux-2.6.32.43/include/linux/kvm_host.h linux-2.6.32.43/include/linux/kvm_host.h
56910--- linux-2.6.32.43/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
56911+++ linux-2.6.32.43/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
56912@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
56913 void vcpu_load(struct kvm_vcpu *vcpu);
56914 void vcpu_put(struct kvm_vcpu *vcpu);
56915
56916-int kvm_init(void *opaque, unsigned int vcpu_size,
56917+int kvm_init(const void *opaque, unsigned int vcpu_size,
56918 struct module *module);
56919 void kvm_exit(void);
56920
56921@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
56922 struct kvm_guest_debug *dbg);
56923 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
56924
56925-int kvm_arch_init(void *opaque);
56926+int kvm_arch_init(const void *opaque);
56927 void kvm_arch_exit(void);
56928
56929 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
56930diff -urNp linux-2.6.32.43/include/linux/libata.h linux-2.6.32.43/include/linux/libata.h
56931--- linux-2.6.32.43/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
56932+++ linux-2.6.32.43/include/linux/libata.h 2011-04-23 12:56:11.000000000 -0400
56933@@ -525,11 +525,11 @@ struct ata_ioports {
56934
56935 struct ata_host {
56936 spinlock_t lock;
56937- struct device *dev;
56938+ struct device *dev;
56939 void __iomem * const *iomap;
56940 unsigned int n_ports;
56941 void *private_data;
56942- struct ata_port_operations *ops;
56943+ const struct ata_port_operations *ops;
56944 unsigned long flags;
56945 #ifdef CONFIG_ATA_ACPI
56946 acpi_handle acpi_handle;
56947@@ -710,7 +710,7 @@ struct ata_link {
56948
56949 struct ata_port {
56950 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
56951- struct ata_port_operations *ops;
56952+ const struct ata_port_operations *ops;
56953 spinlock_t *lock;
56954 /* Flags owned by the EH context. Only EH should touch these once the
56955 port is active */
56956@@ -892,7 +892,7 @@ struct ata_port_info {
56957 unsigned long pio_mask;
56958 unsigned long mwdma_mask;
56959 unsigned long udma_mask;
56960- struct ata_port_operations *port_ops;
56961+ const struct ata_port_operations *port_ops;
56962 void *private_data;
56963 };
56964
56965@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
56966 extern const unsigned long sata_deb_timing_hotplug[];
56967 extern const unsigned long sata_deb_timing_long[];
56968
56969-extern struct ata_port_operations ata_dummy_port_ops;
56970+extern const struct ata_port_operations ata_dummy_port_ops;
56971 extern const struct ata_port_info ata_dummy_port_info;
56972
56973 static inline const unsigned long *
56974@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
56975 struct scsi_host_template *sht);
56976 extern void ata_host_detach(struct ata_host *host);
56977 extern void ata_host_init(struct ata_host *, struct device *,
56978- unsigned long, struct ata_port_operations *);
56979+ unsigned long, const struct ata_port_operations *);
56980 extern int ata_scsi_detect(struct scsi_host_template *sht);
56981 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
56982 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
56983diff -urNp linux-2.6.32.43/include/linux/lockd/bind.h linux-2.6.32.43/include/linux/lockd/bind.h
56984--- linux-2.6.32.43/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
56985+++ linux-2.6.32.43/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
56986@@ -23,13 +23,13 @@ struct svc_rqst;
56987 * This is the set of functions for lockd->nfsd communication
56988 */
56989 struct nlmsvc_binding {
56990- __be32 (*fopen)(struct svc_rqst *,
56991+ __be32 (* const fopen)(struct svc_rqst *,
56992 struct nfs_fh *,
56993 struct file **);
56994- void (*fclose)(struct file *);
56995+ void (* const fclose)(struct file *);
56996 };
56997
56998-extern struct nlmsvc_binding * nlmsvc_ops;
56999+extern const struct nlmsvc_binding * nlmsvc_ops;
57000
57001 /*
57002 * Similar to nfs_client_initdata, but without the NFS-specific
57003diff -urNp linux-2.6.32.43/include/linux/mm.h linux-2.6.32.43/include/linux/mm.h
57004--- linux-2.6.32.43/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
57005+++ linux-2.6.32.43/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
57006@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
57007
57008 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
57009 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
57010+
57011+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
57012+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
57013+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
57014+#else
57015 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
57016+#endif
57017+
57018 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
57019 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
57020
57021@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
57022 int set_page_dirty_lock(struct page *page);
57023 int clear_page_dirty_for_io(struct page *page);
57024
57025-/* Is the vma a continuation of the stack vma above it? */
57026-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
57027-{
57028- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
57029-}
57030-
57031 extern unsigned long move_page_tables(struct vm_area_struct *vma,
57032 unsigned long old_addr, struct vm_area_struct *new_vma,
57033 unsigned long new_addr, unsigned long len);
57034@@ -890,6 +891,8 @@ struct shrinker {
57035 extern void register_shrinker(struct shrinker *);
57036 extern void unregister_shrinker(struct shrinker *);
57037
57038+pgprot_t vm_get_page_prot(unsigned long vm_flags);
57039+
57040 int vma_wants_writenotify(struct vm_area_struct *vma);
57041
57042 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
57043@@ -1162,6 +1165,7 @@ out:
57044 }
57045
57046 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
57047+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
57048
57049 extern unsigned long do_brk(unsigned long, unsigned long);
57050
57051@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
57052 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
57053 struct vm_area_struct **pprev);
57054
57055+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
57056+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
57057+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
57058+
57059 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
57060 NULL if none. Assume start_addr < end_addr. */
57061 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
57062@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
57063 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
57064 }
57065
57066-pgprot_t vm_get_page_prot(unsigned long vm_flags);
57067 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
57068 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
57069 unsigned long pfn, unsigned long size, pgprot_t);
57070@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
57071 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
57072 extern int sysctl_memory_failure_early_kill;
57073 extern int sysctl_memory_failure_recovery;
57074-extern atomic_long_t mce_bad_pages;
57075+extern atomic_long_unchecked_t mce_bad_pages;
57076+
57077+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
57078+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
57079+#else
57080+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
57081+#endif
57082
57083 #endif /* __KERNEL__ */
57084 #endif /* _LINUX_MM_H */
57085diff -urNp linux-2.6.32.43/include/linux/mm_types.h linux-2.6.32.43/include/linux/mm_types.h
57086--- linux-2.6.32.43/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
57087+++ linux-2.6.32.43/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
57088@@ -186,6 +186,8 @@ struct vm_area_struct {
57089 #ifdef CONFIG_NUMA
57090 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
57091 #endif
57092+
57093+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
57094 };
57095
57096 struct core_thread {
57097@@ -287,6 +289,24 @@ struct mm_struct {
57098 #ifdef CONFIG_MMU_NOTIFIER
57099 struct mmu_notifier_mm *mmu_notifier_mm;
57100 #endif
57101+
57102+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57103+ unsigned long pax_flags;
57104+#endif
57105+
57106+#ifdef CONFIG_PAX_DLRESOLVE
57107+ unsigned long call_dl_resolve;
57108+#endif
57109+
57110+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
57111+ unsigned long call_syscall;
57112+#endif
57113+
57114+#ifdef CONFIG_PAX_ASLR
57115+ unsigned long delta_mmap; /* randomized offset */
57116+ unsigned long delta_stack; /* randomized offset */
57117+#endif
57118+
57119 };
57120
57121 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
57122diff -urNp linux-2.6.32.43/include/linux/mmu_notifier.h linux-2.6.32.43/include/linux/mmu_notifier.h
57123--- linux-2.6.32.43/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
57124+++ linux-2.6.32.43/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
57125@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
57126 */
57127 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
57128 ({ \
57129- pte_t __pte; \
57130+ pte_t ___pte; \
57131 struct vm_area_struct *___vma = __vma; \
57132 unsigned long ___address = __address; \
57133- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
57134+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
57135 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
57136- __pte; \
57137+ ___pte; \
57138 })
57139
57140 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
57141diff -urNp linux-2.6.32.43/include/linux/mmzone.h linux-2.6.32.43/include/linux/mmzone.h
57142--- linux-2.6.32.43/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
57143+++ linux-2.6.32.43/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
57144@@ -350,7 +350,7 @@ struct zone {
57145 unsigned long flags; /* zone flags, see below */
57146
57147 /* Zone statistics */
57148- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57149+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
57150
57151 /*
57152 * prev_priority holds the scanning priority for this zone. It is
57153diff -urNp linux-2.6.32.43/include/linux/mod_devicetable.h linux-2.6.32.43/include/linux/mod_devicetable.h
57154--- linux-2.6.32.43/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
57155+++ linux-2.6.32.43/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
57156@@ -12,7 +12,7 @@
57157 typedef unsigned long kernel_ulong_t;
57158 #endif
57159
57160-#define PCI_ANY_ID (~0)
57161+#define PCI_ANY_ID ((__u16)~0)
57162
57163 struct pci_device_id {
57164 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
57165@@ -131,7 +131,7 @@ struct usb_device_id {
57166 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
57167 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
57168
57169-#define HID_ANY_ID (~0)
57170+#define HID_ANY_ID (~0U)
57171
57172 struct hid_device_id {
57173 __u16 bus;
57174diff -urNp linux-2.6.32.43/include/linux/module.h linux-2.6.32.43/include/linux/module.h
57175--- linux-2.6.32.43/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
57176+++ linux-2.6.32.43/include/linux/module.h 2011-04-17 15:56:46.000000000 -0400
57177@@ -287,16 +287,16 @@ struct module
57178 int (*init)(void);
57179
57180 /* If this is non-NULL, vfree after init() returns */
57181- void *module_init;
57182+ void *module_init_rx, *module_init_rw;
57183
57184 /* Here is the actual code + data, vfree'd on unload. */
57185- void *module_core;
57186+ void *module_core_rx, *module_core_rw;
57187
57188 /* Here are the sizes of the init and core sections */
57189- unsigned int init_size, core_size;
57190+ unsigned int init_size_rw, core_size_rw;
57191
57192 /* The size of the executable code in each section. */
57193- unsigned int init_text_size, core_text_size;
57194+ unsigned int init_size_rx, core_size_rx;
57195
57196 /* Arch-specific module values */
57197 struct mod_arch_specific arch;
57198@@ -393,16 +393,46 @@ struct module *__module_address(unsigned
57199 bool is_module_address(unsigned long addr);
57200 bool is_module_text_address(unsigned long addr);
57201
57202+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
57203+{
57204+
57205+#ifdef CONFIG_PAX_KERNEXEC
57206+ if (ktla_ktva(addr) >= (unsigned long)start &&
57207+ ktla_ktva(addr) < (unsigned long)start + size)
57208+ return 1;
57209+#endif
57210+
57211+ return ((void *)addr >= start && (void *)addr < start + size);
57212+}
57213+
57214+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
57215+{
57216+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
57217+}
57218+
57219+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
57220+{
57221+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
57222+}
57223+
57224+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
57225+{
57226+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
57227+}
57228+
57229+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
57230+{
57231+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
57232+}
57233+
57234 static inline int within_module_core(unsigned long addr, struct module *mod)
57235 {
57236- return (unsigned long)mod->module_core <= addr &&
57237- addr < (unsigned long)mod->module_core + mod->core_size;
57238+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
57239 }
57240
57241 static inline int within_module_init(unsigned long addr, struct module *mod)
57242 {
57243- return (unsigned long)mod->module_init <= addr &&
57244- addr < (unsigned long)mod->module_init + mod->init_size;
57245+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
57246 }
57247
57248 /* Search for module by name: must hold module_mutex. */
57249diff -urNp linux-2.6.32.43/include/linux/moduleloader.h linux-2.6.32.43/include/linux/moduleloader.h
57250--- linux-2.6.32.43/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
57251+++ linux-2.6.32.43/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
57252@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
57253 sections. Returns NULL on failure. */
57254 void *module_alloc(unsigned long size);
57255
57256+#ifdef CONFIG_PAX_KERNEXEC
57257+void *module_alloc_exec(unsigned long size);
57258+#else
57259+#define module_alloc_exec(x) module_alloc(x)
57260+#endif
57261+
57262 /* Free memory returned from module_alloc. */
57263 void module_free(struct module *mod, void *module_region);
57264
57265+#ifdef CONFIG_PAX_KERNEXEC
57266+void module_free_exec(struct module *mod, void *module_region);
57267+#else
57268+#define module_free_exec(x, y) module_free((x), (y))
57269+#endif
57270+
57271 /* Apply the given relocation to the (simplified) ELF. Return -error
57272 or 0. */
57273 int apply_relocate(Elf_Shdr *sechdrs,
57274diff -urNp linux-2.6.32.43/include/linux/moduleparam.h linux-2.6.32.43/include/linux/moduleparam.h
57275--- linux-2.6.32.43/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
57276+++ linux-2.6.32.43/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
57277@@ -132,7 +132,7 @@ struct kparam_array
57278
57279 /* Actually copy string: maxlen param is usually sizeof(string). */
57280 #define module_param_string(name, string, len, perm) \
57281- static const struct kparam_string __param_string_##name \
57282+ static const struct kparam_string __param_string_##name __used \
57283 = { len, string }; \
57284 __module_param_call(MODULE_PARAM_PREFIX, name, \
57285 param_set_copystring, param_get_string, \
57286@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
57287
57288 /* Comma-separated array: *nump is set to number they actually specified. */
57289 #define module_param_array_named(name, array, type, nump, perm) \
57290- static const struct kparam_array __param_arr_##name \
57291+ static const struct kparam_array __param_arr_##name __used \
57292 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
57293 sizeof(array[0]), array }; \
57294 __module_param_call(MODULE_PARAM_PREFIX, name, \
57295diff -urNp linux-2.6.32.43/include/linux/mutex.h linux-2.6.32.43/include/linux/mutex.h
57296--- linux-2.6.32.43/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
57297+++ linux-2.6.32.43/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
57298@@ -51,7 +51,7 @@ struct mutex {
57299 spinlock_t wait_lock;
57300 struct list_head wait_list;
57301 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
57302- struct thread_info *owner;
57303+ struct task_struct *owner;
57304 #endif
57305 #ifdef CONFIG_DEBUG_MUTEXES
57306 const char *name;
57307diff -urNp linux-2.6.32.43/include/linux/namei.h linux-2.6.32.43/include/linux/namei.h
57308--- linux-2.6.32.43/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
57309+++ linux-2.6.32.43/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
57310@@ -22,7 +22,7 @@ struct nameidata {
57311 unsigned int flags;
57312 int last_type;
57313 unsigned depth;
57314- char *saved_names[MAX_NESTED_LINKS + 1];
57315+ const char *saved_names[MAX_NESTED_LINKS + 1];
57316
57317 /* Intent data */
57318 union {
57319@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
57320 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
57321 extern void unlock_rename(struct dentry *, struct dentry *);
57322
57323-static inline void nd_set_link(struct nameidata *nd, char *path)
57324+static inline void nd_set_link(struct nameidata *nd, const char *path)
57325 {
57326 nd->saved_names[nd->depth] = path;
57327 }
57328
57329-static inline char *nd_get_link(struct nameidata *nd)
57330+static inline const char *nd_get_link(const struct nameidata *nd)
57331 {
57332 return nd->saved_names[nd->depth];
57333 }
57334diff -urNp linux-2.6.32.43/include/linux/netfilter/xt_gradm.h linux-2.6.32.43/include/linux/netfilter/xt_gradm.h
57335--- linux-2.6.32.43/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
57336+++ linux-2.6.32.43/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
57337@@ -0,0 +1,9 @@
57338+#ifndef _LINUX_NETFILTER_XT_GRADM_H
57339+#define _LINUX_NETFILTER_XT_GRADM_H 1
57340+
57341+struct xt_gradm_mtinfo {
57342+ __u16 flags;
57343+ __u16 invflags;
57344+};
57345+
57346+#endif
57347diff -urNp linux-2.6.32.43/include/linux/nodemask.h linux-2.6.32.43/include/linux/nodemask.h
57348--- linux-2.6.32.43/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
57349+++ linux-2.6.32.43/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
57350@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
57351
57352 #define any_online_node(mask) \
57353 ({ \
57354- int node; \
57355- for_each_node_mask(node, (mask)) \
57356- if (node_online(node)) \
57357+ int __node; \
57358+ for_each_node_mask(__node, (mask)) \
57359+ if (node_online(__node)) \
57360 break; \
57361- node; \
57362+ __node; \
57363 })
57364
57365 #define num_online_nodes() num_node_state(N_ONLINE)
57366diff -urNp linux-2.6.32.43/include/linux/oprofile.h linux-2.6.32.43/include/linux/oprofile.h
57367--- linux-2.6.32.43/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
57368+++ linux-2.6.32.43/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
57369@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
57370 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
57371 char const * name, ulong * val);
57372
57373-/** Create a file for read-only access to an atomic_t. */
57374+/** Create a file for read-only access to an atomic_unchecked_t. */
57375 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
57376- char const * name, atomic_t * val);
57377+ char const * name, atomic_unchecked_t * val);
57378
57379 /** create a directory */
57380 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
57381diff -urNp linux-2.6.32.43/include/linux/perf_event.h linux-2.6.32.43/include/linux/perf_event.h
57382--- linux-2.6.32.43/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
57383+++ linux-2.6.32.43/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
57384@@ -476,7 +476,7 @@ struct hw_perf_event {
57385 struct hrtimer hrtimer;
57386 };
57387 };
57388- atomic64_t prev_count;
57389+ atomic64_unchecked_t prev_count;
57390 u64 sample_period;
57391 u64 last_period;
57392 atomic64_t period_left;
57393@@ -557,7 +557,7 @@ struct perf_event {
57394 const struct pmu *pmu;
57395
57396 enum perf_event_active_state state;
57397- atomic64_t count;
57398+ atomic64_unchecked_t count;
57399
57400 /*
57401 * These are the total time in nanoseconds that the event
57402@@ -595,8 +595,8 @@ struct perf_event {
57403 * These accumulate total time (in nanoseconds) that children
57404 * events have been enabled and running, respectively.
57405 */
57406- atomic64_t child_total_time_enabled;
57407- atomic64_t child_total_time_running;
57408+ atomic64_unchecked_t child_total_time_enabled;
57409+ atomic64_unchecked_t child_total_time_running;
57410
57411 /*
57412 * Protect attach/detach and child_list:
57413diff -urNp linux-2.6.32.43/include/linux/pipe_fs_i.h linux-2.6.32.43/include/linux/pipe_fs_i.h
57414--- linux-2.6.32.43/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
57415+++ linux-2.6.32.43/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
57416@@ -46,9 +46,9 @@ struct pipe_inode_info {
57417 wait_queue_head_t wait;
57418 unsigned int nrbufs, curbuf;
57419 struct page *tmp_page;
57420- unsigned int readers;
57421- unsigned int writers;
57422- unsigned int waiting_writers;
57423+ atomic_t readers;
57424+ atomic_t writers;
57425+ atomic_t waiting_writers;
57426 unsigned int r_counter;
57427 unsigned int w_counter;
57428 struct fasync_struct *fasync_readers;
57429diff -urNp linux-2.6.32.43/include/linux/poison.h linux-2.6.32.43/include/linux/poison.h
57430--- linux-2.6.32.43/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
57431+++ linux-2.6.32.43/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
57432@@ -19,8 +19,8 @@
57433 * under normal circumstances, used to verify that nobody uses
57434 * non-initialized list entries.
57435 */
57436-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
57437-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
57438+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
57439+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
57440
57441 /********** include/linux/timer.h **********/
57442 /*
57443diff -urNp linux-2.6.32.43/include/linux/proc_fs.h linux-2.6.32.43/include/linux/proc_fs.h
57444--- linux-2.6.32.43/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
57445+++ linux-2.6.32.43/include/linux/proc_fs.h 2011-04-17 15:56:46.000000000 -0400
57446@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
57447 return proc_create_data(name, mode, parent, proc_fops, NULL);
57448 }
57449
57450+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
57451+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
57452+{
57453+#ifdef CONFIG_GRKERNSEC_PROC_USER
57454+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
57455+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
57456+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
57457+#else
57458+ return proc_create_data(name, mode, parent, proc_fops, NULL);
57459+#endif
57460+}
57461+
57462+
57463 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
57464 mode_t mode, struct proc_dir_entry *base,
57465 read_proc_t *read_proc, void * data)
57466diff -urNp linux-2.6.32.43/include/linux/ptrace.h linux-2.6.32.43/include/linux/ptrace.h
57467--- linux-2.6.32.43/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
57468+++ linux-2.6.32.43/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
57469@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
57470 extern void exit_ptrace(struct task_struct *tracer);
57471 #define PTRACE_MODE_READ 1
57472 #define PTRACE_MODE_ATTACH 2
57473-/* Returns 0 on success, -errno on denial. */
57474-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
57475 /* Returns true on success, false on denial. */
57476 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
57477+/* Returns true on success, false on denial. */
57478+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
57479
57480 static inline int ptrace_reparented(struct task_struct *child)
57481 {
57482diff -urNp linux-2.6.32.43/include/linux/random.h linux-2.6.32.43/include/linux/random.h
57483--- linux-2.6.32.43/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
57484+++ linux-2.6.32.43/include/linux/random.h 2011-04-17 15:56:46.000000000 -0400
57485@@ -74,6 +74,11 @@ unsigned long randomize_range(unsigned l
57486 u32 random32(void);
57487 void srandom32(u32 seed);
57488
57489+static inline unsigned long pax_get_random_long(void)
57490+{
57491+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
57492+}
57493+
57494 #endif /* __KERNEL___ */
57495
57496 #endif /* _LINUX_RANDOM_H */
57497diff -urNp linux-2.6.32.43/include/linux/reboot.h linux-2.6.32.43/include/linux/reboot.h
57498--- linux-2.6.32.43/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
57499+++ linux-2.6.32.43/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
57500@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
57501 * Architecture-specific implementations of sys_reboot commands.
57502 */
57503
57504-extern void machine_restart(char *cmd);
57505-extern void machine_halt(void);
57506-extern void machine_power_off(void);
57507+extern void machine_restart(char *cmd) __noreturn;
57508+extern void machine_halt(void) __noreturn;
57509+extern void machine_power_off(void) __noreturn;
57510
57511 extern void machine_shutdown(void);
57512 struct pt_regs;
57513@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
57514 */
57515
57516 extern void kernel_restart_prepare(char *cmd);
57517-extern void kernel_restart(char *cmd);
57518-extern void kernel_halt(void);
57519-extern void kernel_power_off(void);
57520+extern void kernel_restart(char *cmd) __noreturn;
57521+extern void kernel_halt(void) __noreturn;
57522+extern void kernel_power_off(void) __noreturn;
57523
57524 void ctrl_alt_del(void);
57525
57526@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
57527 * Emergency restart, callable from an interrupt handler.
57528 */
57529
57530-extern void emergency_restart(void);
57531+extern void emergency_restart(void) __noreturn;
57532 #include <asm/emergency-restart.h>
57533
57534 #endif
57535diff -urNp linux-2.6.32.43/include/linux/reiserfs_fs.h linux-2.6.32.43/include/linux/reiserfs_fs.h
57536--- linux-2.6.32.43/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
57537+++ linux-2.6.32.43/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
57538@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
57539 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
57540
57541 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
57542-#define get_generation(s) atomic_read (&fs_generation(s))
57543+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
57544 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
57545 #define __fs_changed(gen,s) (gen != get_generation (s))
57546 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
57547@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
57548 */
57549
57550 struct item_operations {
57551- int (*bytes_number) (struct item_head * ih, int block_size);
57552- void (*decrement_key) (struct cpu_key *);
57553- int (*is_left_mergeable) (struct reiserfs_key * ih,
57554+ int (* const bytes_number) (struct item_head * ih, int block_size);
57555+ void (* const decrement_key) (struct cpu_key *);
57556+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
57557 unsigned long bsize);
57558- void (*print_item) (struct item_head *, char *item);
57559- void (*check_item) (struct item_head *, char *item);
57560+ void (* const print_item) (struct item_head *, char *item);
57561+ void (* const check_item) (struct item_head *, char *item);
57562
57563- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57564+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
57565 int is_affected, int insert_size);
57566- int (*check_left) (struct virtual_item * vi, int free,
57567+ int (* const check_left) (struct virtual_item * vi, int free,
57568 int start_skip, int end_skip);
57569- int (*check_right) (struct virtual_item * vi, int free);
57570- int (*part_size) (struct virtual_item * vi, int from, int to);
57571- int (*unit_num) (struct virtual_item * vi);
57572- void (*print_vi) (struct virtual_item * vi);
57573+ int (* const check_right) (struct virtual_item * vi, int free);
57574+ int (* const part_size) (struct virtual_item * vi, int from, int to);
57575+ int (* const unit_num) (struct virtual_item * vi);
57576+ void (* const print_vi) (struct virtual_item * vi);
57577 };
57578
57579-extern struct item_operations *item_ops[TYPE_ANY + 1];
57580+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
57581
57582 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
57583 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
57584diff -urNp linux-2.6.32.43/include/linux/reiserfs_fs_sb.h linux-2.6.32.43/include/linux/reiserfs_fs_sb.h
57585--- linux-2.6.32.43/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
57586+++ linux-2.6.32.43/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
57587@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
57588 /* Comment? -Hans */
57589 wait_queue_head_t s_wait;
57590 /* To be obsoleted soon by per buffer seals.. -Hans */
57591- atomic_t s_generation_counter; // increased by one every time the
57592+ atomic_unchecked_t s_generation_counter; // increased by one every time the
57593 // tree gets re-balanced
57594 unsigned long s_properties; /* File system properties. Currently holds
57595 on-disk FS format */
57596diff -urNp linux-2.6.32.43/include/linux/sched.h linux-2.6.32.43/include/linux/sched.h
57597--- linux-2.6.32.43/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
57598+++ linux-2.6.32.43/include/linux/sched.h 2011-07-14 19:16:12.000000000 -0400
57599@@ -101,6 +101,7 @@ struct bio;
57600 struct fs_struct;
57601 struct bts_context;
57602 struct perf_event_context;
57603+struct linux_binprm;
57604
57605 /*
57606 * List of flags we want to share for kernel threads,
57607@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
57608 extern signed long schedule_timeout_uninterruptible(signed long timeout);
57609 asmlinkage void __schedule(void);
57610 asmlinkage void schedule(void);
57611-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
57612+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
57613
57614 struct nsproxy;
57615 struct user_namespace;
57616@@ -371,9 +372,12 @@ struct user_namespace;
57617 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
57618
57619 extern int sysctl_max_map_count;
57620+extern unsigned long sysctl_heap_stack_gap;
57621
57622 #include <linux/aio.h>
57623
57624+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
57625+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
57626 extern unsigned long
57627 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
57628 unsigned long, unsigned long);
57629@@ -666,6 +670,16 @@ struct signal_struct {
57630 struct tty_audit_buf *tty_audit_buf;
57631 #endif
57632
57633+#ifdef CONFIG_GRKERNSEC
57634+ u32 curr_ip;
57635+ u32 saved_ip;
57636+ u32 gr_saddr;
57637+ u32 gr_daddr;
57638+ u16 gr_sport;
57639+ u16 gr_dport;
57640+ u8 used_accept:1;
57641+#endif
57642+
57643 int oom_adj; /* OOM kill score adjustment (bit shift) */
57644 };
57645
57646@@ -723,6 +737,11 @@ struct user_struct {
57647 struct key *session_keyring; /* UID's default session keyring */
57648 #endif
57649
57650+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
57651+ unsigned int banned;
57652+ unsigned long ban_expires;
57653+#endif
57654+
57655 /* Hash table maintenance information */
57656 struct hlist_node uidhash_node;
57657 uid_t uid;
57658@@ -1328,8 +1347,8 @@ struct task_struct {
57659 struct list_head thread_group;
57660
57661 struct completion *vfork_done; /* for vfork() */
57662- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
57663- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57664+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
57665+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
57666
57667 cputime_t utime, stime, utimescaled, stimescaled;
57668 cputime_t gtime;
57669@@ -1343,16 +1362,6 @@ struct task_struct {
57670 struct task_cputime cputime_expires;
57671 struct list_head cpu_timers[3];
57672
57673-/* process credentials */
57674- const struct cred *real_cred; /* objective and real subjective task
57675- * credentials (COW) */
57676- const struct cred *cred; /* effective (overridable) subjective task
57677- * credentials (COW) */
57678- struct mutex cred_guard_mutex; /* guard against foreign influences on
57679- * credential calculations
57680- * (notably. ptrace) */
57681- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57682-
57683 char comm[TASK_COMM_LEN]; /* executable name excluding path
57684 - access with [gs]et_task_comm (which lock
57685 it with task_lock())
57686@@ -1369,6 +1378,10 @@ struct task_struct {
57687 #endif
57688 /* CPU-specific state of this task */
57689 struct thread_struct thread;
57690+/* thread_info moved to task_struct */
57691+#ifdef CONFIG_X86
57692+ struct thread_info tinfo;
57693+#endif
57694 /* filesystem information */
57695 struct fs_struct *fs;
57696 /* open file information */
57697@@ -1436,6 +1449,15 @@ struct task_struct {
57698 int hardirq_context;
57699 int softirq_context;
57700 #endif
57701+
57702+/* process credentials */
57703+ const struct cred *real_cred; /* objective and real subjective task
57704+ * credentials (COW) */
57705+ struct mutex cred_guard_mutex; /* guard against foreign influences on
57706+ * credential calculations
57707+ * (notably. ptrace) */
57708+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
57709+
57710 #ifdef CONFIG_LOCKDEP
57711 # define MAX_LOCK_DEPTH 48UL
57712 u64 curr_chain_key;
57713@@ -1456,6 +1478,9 @@ struct task_struct {
57714
57715 struct backing_dev_info *backing_dev_info;
57716
57717+ const struct cred *cred; /* effective (overridable) subjective task
57718+ * credentials (COW) */
57719+
57720 struct io_context *io_context;
57721
57722 unsigned long ptrace_message;
57723@@ -1519,6 +1544,21 @@ struct task_struct {
57724 unsigned long default_timer_slack_ns;
57725
57726 struct list_head *scm_work_list;
57727+
57728+#ifdef CONFIG_GRKERNSEC
57729+ /* grsecurity */
57730+ struct dentry *gr_chroot_dentry;
57731+ struct acl_subject_label *acl;
57732+ struct acl_role_label *role;
57733+ struct file *exec_file;
57734+ u16 acl_role_id;
57735+ /* is this the task that authenticated to the special role */
57736+ u8 acl_sp_role;
57737+ u8 is_writable;
57738+ u8 brute;
57739+ u8 gr_is_chrooted;
57740+#endif
57741+
57742 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
57743 /* Index of current stored adress in ret_stack */
57744 int curr_ret_stack;
57745@@ -1542,6 +1582,57 @@ struct task_struct {
57746 #endif /* CONFIG_TRACING */
57747 };
57748
57749+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
57750+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
57751+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
57752+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
57753+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
57754+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
57755+
57756+#ifdef CONFIG_PAX_SOFTMODE
57757+extern unsigned int pax_softmode;
57758+#endif
57759+
57760+extern int pax_check_flags(unsigned long *);
57761+
57762+/* if tsk != current then task_lock must be held on it */
57763+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
57764+static inline unsigned long pax_get_flags(struct task_struct *tsk)
57765+{
57766+ if (likely(tsk->mm))
57767+ return tsk->mm->pax_flags;
57768+ else
57769+ return 0UL;
57770+}
57771+
57772+/* if tsk != current then task_lock must be held on it */
57773+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
57774+{
57775+ if (likely(tsk->mm)) {
57776+ tsk->mm->pax_flags = flags;
57777+ return 0;
57778+ }
57779+ return -EINVAL;
57780+}
57781+#endif
57782+
57783+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
57784+extern void pax_set_initial_flags(struct linux_binprm *bprm);
57785+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
57786+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
57787+#endif
57788+
57789+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
57790+extern void pax_report_insns(void *pc, void *sp);
57791+extern void pax_report_refcount_overflow(struct pt_regs *regs);
57792+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
57793+
57794+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
57795+extern void pax_track_stack(void);
57796+#else
57797+static inline void pax_track_stack(void) {}
57798+#endif
57799+
57800 /* Future-safe accessor for struct task_struct's cpus_allowed. */
57801 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
57802
57803@@ -1978,7 +2069,9 @@ void yield(void);
57804 extern struct exec_domain default_exec_domain;
57805
57806 union thread_union {
57807+#ifndef CONFIG_X86
57808 struct thread_info thread_info;
57809+#endif
57810 unsigned long stack[THREAD_SIZE/sizeof(long)];
57811 };
57812
57813@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
57814 */
57815
57816 extern struct task_struct *find_task_by_vpid(pid_t nr);
57817+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
57818 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
57819 struct pid_namespace *ns);
57820
57821@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
57822 extern void exit_itimers(struct signal_struct *);
57823 extern void flush_itimer_signals(void);
57824
57825-extern NORET_TYPE void do_group_exit(int);
57826+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
57827
57828 extern void daemonize(const char *, ...);
57829 extern int allow_signal(int);
57830@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
57831
57832 #endif
57833
57834-static inline int object_is_on_stack(void *obj)
57835+static inline int object_starts_on_stack(void *obj)
57836 {
57837- void *stack = task_stack_page(current);
57838+ const void *stack = task_stack_page(current);
57839
57840 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
57841 }
57842
57843+#ifdef CONFIG_PAX_USERCOPY
57844+extern int object_is_on_stack(const void *obj, unsigned long len);
57845+#endif
57846+
57847 extern void thread_info_cache_init(void);
57848
57849 #ifdef CONFIG_DEBUG_STACK_USAGE
57850diff -urNp linux-2.6.32.43/include/linux/screen_info.h linux-2.6.32.43/include/linux/screen_info.h
57851--- linux-2.6.32.43/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
57852+++ linux-2.6.32.43/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
57853@@ -42,7 +42,8 @@ struct screen_info {
57854 __u16 pages; /* 0x32 */
57855 __u16 vesa_attributes; /* 0x34 */
57856 __u32 capabilities; /* 0x36 */
57857- __u8 _reserved[6]; /* 0x3a */
57858+ __u16 vesapm_size; /* 0x3a */
57859+ __u8 _reserved[4]; /* 0x3c */
57860 } __attribute__((packed));
57861
57862 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
57863diff -urNp linux-2.6.32.43/include/linux/security.h linux-2.6.32.43/include/linux/security.h
57864--- linux-2.6.32.43/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
57865+++ linux-2.6.32.43/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
57866@@ -34,6 +34,7 @@
57867 #include <linux/key.h>
57868 #include <linux/xfrm.h>
57869 #include <linux/gfp.h>
57870+#include <linux/grsecurity.h>
57871 #include <net/flow.h>
57872
57873 /* Maximum number of letters for an LSM name string */
57874diff -urNp linux-2.6.32.43/include/linux/shm.h linux-2.6.32.43/include/linux/shm.h
57875--- linux-2.6.32.43/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
57876+++ linux-2.6.32.43/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
57877@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
57878 pid_t shm_cprid;
57879 pid_t shm_lprid;
57880 struct user_struct *mlock_user;
57881+#ifdef CONFIG_GRKERNSEC
57882+ time_t shm_createtime;
57883+ pid_t shm_lapid;
57884+#endif
57885 };
57886
57887 /* shm_mode upper byte flags */
57888diff -urNp linux-2.6.32.43/include/linux/skbuff.h linux-2.6.32.43/include/linux/skbuff.h
57889--- linux-2.6.32.43/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
57890+++ linux-2.6.32.43/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
57891@@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
57892 */
57893 static inline int skb_queue_empty(const struct sk_buff_head *list)
57894 {
57895- return list->next == (struct sk_buff *)list;
57896+ return list->next == (const struct sk_buff *)list;
57897 }
57898
57899 /**
57900@@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
57901 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
57902 const struct sk_buff *skb)
57903 {
57904- return (skb->next == (struct sk_buff *) list);
57905+ return (skb->next == (const struct sk_buff *) list);
57906 }
57907
57908 /**
57909@@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
57910 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
57911 const struct sk_buff *skb)
57912 {
57913- return (skb->prev == (struct sk_buff *) list);
57914+ return (skb->prev == (const struct sk_buff *) list);
57915 }
57916
57917 /**
57918@@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
57919 * headroom, you should not reduce this.
57920 */
57921 #ifndef NET_SKB_PAD
57922-#define NET_SKB_PAD 32
57923+#define NET_SKB_PAD (_AC(32,UL))
57924 #endif
57925
57926 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
57927diff -urNp linux-2.6.32.43/include/linux/slab_def.h linux-2.6.32.43/include/linux/slab_def.h
57928--- linux-2.6.32.43/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
57929+++ linux-2.6.32.43/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
57930@@ -69,10 +69,10 @@ struct kmem_cache {
57931 unsigned long node_allocs;
57932 unsigned long node_frees;
57933 unsigned long node_overflow;
57934- atomic_t allochit;
57935- atomic_t allocmiss;
57936- atomic_t freehit;
57937- atomic_t freemiss;
57938+ atomic_unchecked_t allochit;
57939+ atomic_unchecked_t allocmiss;
57940+ atomic_unchecked_t freehit;
57941+ atomic_unchecked_t freemiss;
57942
57943 /*
57944 * If debugging is enabled, then the allocator can add additional
57945diff -urNp linux-2.6.32.43/include/linux/slab.h linux-2.6.32.43/include/linux/slab.h
57946--- linux-2.6.32.43/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
57947+++ linux-2.6.32.43/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
57948@@ -11,12 +11,20 @@
57949
57950 #include <linux/gfp.h>
57951 #include <linux/types.h>
57952+#include <linux/err.h>
57953
57954 /*
57955 * Flags to pass to kmem_cache_create().
57956 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
57957 */
57958 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
57959+
57960+#ifdef CONFIG_PAX_USERCOPY
57961+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
57962+#else
57963+#define SLAB_USERCOPY 0x00000000UL
57964+#endif
57965+
57966 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
57967 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
57968 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
57969@@ -82,10 +90,13 @@
57970 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
57971 * Both make kfree a no-op.
57972 */
57973-#define ZERO_SIZE_PTR ((void *)16)
57974+#define ZERO_SIZE_PTR \
57975+({ \
57976+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
57977+ (void *)(-MAX_ERRNO-1L); \
57978+})
57979
57980-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
57981- (unsigned long)ZERO_SIZE_PTR)
57982+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
57983
57984 /*
57985 * struct kmem_cache related prototypes
57986@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
57987 void kfree(const void *);
57988 void kzfree(const void *);
57989 size_t ksize(const void *);
57990+void check_object_size(const void *ptr, unsigned long n, bool to);
57991
57992 /*
57993 * Allocator specific definitions. These are mainly used to establish optimized
57994@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
57995
57996 void __init kmem_cache_init_late(void);
57997
57998+#define kmalloc(x, y) \
57999+({ \
58000+ void *___retval; \
58001+ intoverflow_t ___x = (intoverflow_t)x; \
58002+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
58003+ ___retval = NULL; \
58004+ else \
58005+ ___retval = kmalloc((size_t)___x, (y)); \
58006+ ___retval; \
58007+})
58008+
58009+#define kmalloc_node(x, y, z) \
58010+({ \
58011+ void *___retval; \
58012+ intoverflow_t ___x = (intoverflow_t)x; \
58013+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
58014+ ___retval = NULL; \
58015+ else \
58016+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
58017+ ___retval; \
58018+})
58019+
58020+#define kzalloc(x, y) \
58021+({ \
58022+ void *___retval; \
58023+ intoverflow_t ___x = (intoverflow_t)x; \
58024+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
58025+ ___retval = NULL; \
58026+ else \
58027+ ___retval = kzalloc((size_t)___x, (y)); \
58028+ ___retval; \
58029+})
58030+
58031 #endif /* _LINUX_SLAB_H */
58032diff -urNp linux-2.6.32.43/include/linux/slub_def.h linux-2.6.32.43/include/linux/slub_def.h
58033--- linux-2.6.32.43/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
58034+++ linux-2.6.32.43/include/linux/slub_def.h 2011-04-17 15:56:46.000000000 -0400
58035@@ -86,7 +86,7 @@ struct kmem_cache {
58036 struct kmem_cache_order_objects max;
58037 struct kmem_cache_order_objects min;
58038 gfp_t allocflags; /* gfp flags to use on each alloc */
58039- int refcount; /* Refcount for slab cache destroy */
58040+ atomic_t refcount; /* Refcount for slab cache destroy */
58041 void (*ctor)(void *);
58042 int inuse; /* Offset to metadata */
58043 int align; /* Alignment */
58044diff -urNp linux-2.6.32.43/include/linux/sonet.h linux-2.6.32.43/include/linux/sonet.h
58045--- linux-2.6.32.43/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
58046+++ linux-2.6.32.43/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
58047@@ -61,7 +61,7 @@ struct sonet_stats {
58048 #include <asm/atomic.h>
58049
58050 struct k_sonet_stats {
58051-#define __HANDLE_ITEM(i) atomic_t i
58052+#define __HANDLE_ITEM(i) atomic_unchecked_t i
58053 __SONET_ITEMS
58054 #undef __HANDLE_ITEM
58055 };
58056diff -urNp linux-2.6.32.43/include/linux/sunrpc/clnt.h linux-2.6.32.43/include/linux/sunrpc/clnt.h
58057--- linux-2.6.32.43/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
58058+++ linux-2.6.32.43/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
58059@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
58060 {
58061 switch (sap->sa_family) {
58062 case AF_INET:
58063- return ntohs(((struct sockaddr_in *)sap)->sin_port);
58064+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
58065 case AF_INET6:
58066- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
58067+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
58068 }
58069 return 0;
58070 }
58071@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
58072 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
58073 const struct sockaddr *src)
58074 {
58075- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
58076+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
58077 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
58078
58079 dsin->sin_family = ssin->sin_family;
58080@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
58081 if (sa->sa_family != AF_INET6)
58082 return 0;
58083
58084- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
58085+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
58086 }
58087
58088 #endif /* __KERNEL__ */
58089diff -urNp linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h
58090--- linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
58091+++ linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
58092@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
58093 extern unsigned int svcrdma_max_requests;
58094 extern unsigned int svcrdma_max_req_size;
58095
58096-extern atomic_t rdma_stat_recv;
58097-extern atomic_t rdma_stat_read;
58098-extern atomic_t rdma_stat_write;
58099-extern atomic_t rdma_stat_sq_starve;
58100-extern atomic_t rdma_stat_rq_starve;
58101-extern atomic_t rdma_stat_rq_poll;
58102-extern atomic_t rdma_stat_rq_prod;
58103-extern atomic_t rdma_stat_sq_poll;
58104-extern atomic_t rdma_stat_sq_prod;
58105+extern atomic_unchecked_t rdma_stat_recv;
58106+extern atomic_unchecked_t rdma_stat_read;
58107+extern atomic_unchecked_t rdma_stat_write;
58108+extern atomic_unchecked_t rdma_stat_sq_starve;
58109+extern atomic_unchecked_t rdma_stat_rq_starve;
58110+extern atomic_unchecked_t rdma_stat_rq_poll;
58111+extern atomic_unchecked_t rdma_stat_rq_prod;
58112+extern atomic_unchecked_t rdma_stat_sq_poll;
58113+extern atomic_unchecked_t rdma_stat_sq_prod;
58114
58115 #define RPCRDMA_VERSION 1
58116
58117diff -urNp linux-2.6.32.43/include/linux/suspend.h linux-2.6.32.43/include/linux/suspend.h
58118--- linux-2.6.32.43/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
58119+++ linux-2.6.32.43/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
58120@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
58121 * which require special recovery actions in that situation.
58122 */
58123 struct platform_suspend_ops {
58124- int (*valid)(suspend_state_t state);
58125- int (*begin)(suspend_state_t state);
58126- int (*prepare)(void);
58127- int (*prepare_late)(void);
58128- int (*enter)(suspend_state_t state);
58129- void (*wake)(void);
58130- void (*finish)(void);
58131- void (*end)(void);
58132- void (*recover)(void);
58133+ int (* const valid)(suspend_state_t state);
58134+ int (* const begin)(suspend_state_t state);
58135+ int (* const prepare)(void);
58136+ int (* const prepare_late)(void);
58137+ int (* const enter)(suspend_state_t state);
58138+ void (* const wake)(void);
58139+ void (* const finish)(void);
58140+ void (* const end)(void);
58141+ void (* const recover)(void);
58142 };
58143
58144 #ifdef CONFIG_SUSPEND
58145@@ -120,7 +120,7 @@ struct platform_suspend_ops {
58146 * suspend_set_ops - set platform dependent suspend operations
58147 * @ops: The new suspend operations to set.
58148 */
58149-extern void suspend_set_ops(struct platform_suspend_ops *ops);
58150+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
58151 extern int suspend_valid_only_mem(suspend_state_t state);
58152
58153 /**
58154@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
58155 #else /* !CONFIG_SUSPEND */
58156 #define suspend_valid_only_mem NULL
58157
58158-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
58159+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
58160 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
58161 #endif /* !CONFIG_SUSPEND */
58162
58163@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
58164 * platforms which require special recovery actions in that situation.
58165 */
58166 struct platform_hibernation_ops {
58167- int (*begin)(void);
58168- void (*end)(void);
58169- int (*pre_snapshot)(void);
58170- void (*finish)(void);
58171- int (*prepare)(void);
58172- int (*enter)(void);
58173- void (*leave)(void);
58174- int (*pre_restore)(void);
58175- void (*restore_cleanup)(void);
58176- void (*recover)(void);
58177+ int (* const begin)(void);
58178+ void (* const end)(void);
58179+ int (* const pre_snapshot)(void);
58180+ void (* const finish)(void);
58181+ int (* const prepare)(void);
58182+ int (* const enter)(void);
58183+ void (* const leave)(void);
58184+ int (* const pre_restore)(void);
58185+ void (* const restore_cleanup)(void);
58186+ void (* const recover)(void);
58187 };
58188
58189 #ifdef CONFIG_HIBERNATION
58190@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
58191 extern void swsusp_unset_page_free(struct page *);
58192 extern unsigned long get_safe_page(gfp_t gfp_mask);
58193
58194-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
58195+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
58196 extern int hibernate(void);
58197 extern bool system_entering_hibernation(void);
58198 #else /* CONFIG_HIBERNATION */
58199@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
58200 static inline void swsusp_set_page_free(struct page *p) {}
58201 static inline void swsusp_unset_page_free(struct page *p) {}
58202
58203-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
58204+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
58205 static inline int hibernate(void) { return -ENOSYS; }
58206 static inline bool system_entering_hibernation(void) { return false; }
58207 #endif /* CONFIG_HIBERNATION */
58208diff -urNp linux-2.6.32.43/include/linux/sysctl.h linux-2.6.32.43/include/linux/sysctl.h
58209--- linux-2.6.32.43/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
58210+++ linux-2.6.32.43/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
58211@@ -164,7 +164,11 @@ enum
58212 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
58213 };
58214
58215-
58216+#ifdef CONFIG_PAX_SOFTMODE
58217+enum {
58218+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
58219+};
58220+#endif
58221
58222 /* CTL_VM names: */
58223 enum
58224@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
58225
58226 extern int proc_dostring(struct ctl_table *, int,
58227 void __user *, size_t *, loff_t *);
58228+extern int proc_dostring_modpriv(struct ctl_table *, int,
58229+ void __user *, size_t *, loff_t *);
58230 extern int proc_dointvec(struct ctl_table *, int,
58231 void __user *, size_t *, loff_t *);
58232 extern int proc_dointvec_minmax(struct ctl_table *, int,
58233@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
58234
58235 extern ctl_handler sysctl_data;
58236 extern ctl_handler sysctl_string;
58237+extern ctl_handler sysctl_string_modpriv;
58238 extern ctl_handler sysctl_intvec;
58239 extern ctl_handler sysctl_jiffies;
58240 extern ctl_handler sysctl_ms_jiffies;
58241diff -urNp linux-2.6.32.43/include/linux/sysfs.h linux-2.6.32.43/include/linux/sysfs.h
58242--- linux-2.6.32.43/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
58243+++ linux-2.6.32.43/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
58244@@ -75,8 +75,8 @@ struct bin_attribute {
58245 };
58246
58247 struct sysfs_ops {
58248- ssize_t (*show)(struct kobject *, struct attribute *,char *);
58249- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
58250+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
58251+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
58252 };
58253
58254 struct sysfs_dirent;
58255diff -urNp linux-2.6.32.43/include/linux/thread_info.h linux-2.6.32.43/include/linux/thread_info.h
58256--- linux-2.6.32.43/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
58257+++ linux-2.6.32.43/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
58258@@ -23,7 +23,7 @@ struct restart_block {
58259 };
58260 /* For futex_wait and futex_wait_requeue_pi */
58261 struct {
58262- u32 *uaddr;
58263+ u32 __user *uaddr;
58264 u32 val;
58265 u32 flags;
58266 u32 bitset;
58267diff -urNp linux-2.6.32.43/include/linux/tty.h linux-2.6.32.43/include/linux/tty.h
58268--- linux-2.6.32.43/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
58269+++ linux-2.6.32.43/include/linux/tty.h 2011-04-17 15:56:46.000000000 -0400
58270@@ -13,6 +13,7 @@
58271 #include <linux/tty_driver.h>
58272 #include <linux/tty_ldisc.h>
58273 #include <linux/mutex.h>
58274+#include <linux/poll.h>
58275
58276 #include <asm/system.h>
58277
58278@@ -443,7 +444,6 @@ extern int tty_perform_flush(struct tty_
58279 extern dev_t tty_devnum(struct tty_struct *tty);
58280 extern void proc_clear_tty(struct task_struct *p);
58281 extern struct tty_struct *get_current_tty(void);
58282-extern void tty_default_fops(struct file_operations *fops);
58283 extern struct tty_struct *alloc_tty_struct(void);
58284 extern void free_tty_struct(struct tty_struct *tty);
58285 extern void initialize_tty_struct(struct tty_struct *tty,
58286@@ -493,6 +493,18 @@ extern void tty_ldisc_begin(void);
58287 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
58288 extern void tty_ldisc_enable(struct tty_struct *tty);
58289
58290+/* tty_io.c */
58291+extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *);
58292+extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *);
58293+extern unsigned int tty_poll(struct file *, poll_table *);
58294+#ifdef CONFIG_COMPAT
58295+extern long tty_compat_ioctl(struct file *file, unsigned int cmd,
58296+ unsigned long arg);
58297+#else
58298+#define tty_compat_ioctl NULL
58299+#endif
58300+extern int tty_release(struct inode *, struct file *);
58301+extern int tty_fasync(int fd, struct file *filp, int on);
58302
58303 /* n_tty.c */
58304 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
58305diff -urNp linux-2.6.32.43/include/linux/tty_ldisc.h linux-2.6.32.43/include/linux/tty_ldisc.h
58306--- linux-2.6.32.43/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
58307+++ linux-2.6.32.43/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
58308@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
58309
58310 struct module *owner;
58311
58312- int refcount;
58313+ atomic_t refcount;
58314 };
58315
58316 struct tty_ldisc {
58317diff -urNp linux-2.6.32.43/include/linux/types.h linux-2.6.32.43/include/linux/types.h
58318--- linux-2.6.32.43/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
58319+++ linux-2.6.32.43/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
58320@@ -191,10 +191,26 @@ typedef struct {
58321 volatile int counter;
58322 } atomic_t;
58323
58324+#ifdef CONFIG_PAX_REFCOUNT
58325+typedef struct {
58326+ volatile int counter;
58327+} atomic_unchecked_t;
58328+#else
58329+typedef atomic_t atomic_unchecked_t;
58330+#endif
58331+
58332 #ifdef CONFIG_64BIT
58333 typedef struct {
58334 volatile long counter;
58335 } atomic64_t;
58336+
58337+#ifdef CONFIG_PAX_REFCOUNT
58338+typedef struct {
58339+ volatile long counter;
58340+} atomic64_unchecked_t;
58341+#else
58342+typedef atomic64_t atomic64_unchecked_t;
58343+#endif
58344 #endif
58345
58346 struct ustat {
58347diff -urNp linux-2.6.32.43/include/linux/uaccess.h linux-2.6.32.43/include/linux/uaccess.h
58348--- linux-2.6.32.43/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
58349+++ linux-2.6.32.43/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
58350@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
58351 long ret; \
58352 mm_segment_t old_fs = get_fs(); \
58353 \
58354- set_fs(KERNEL_DS); \
58355 pagefault_disable(); \
58356+ set_fs(KERNEL_DS); \
58357 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
58358- pagefault_enable(); \
58359 set_fs(old_fs); \
58360+ pagefault_enable(); \
58361 ret; \
58362 })
58363
58364@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
58365 * Safely read from address @src to the buffer at @dst. If a kernel fault
58366 * happens, handle that and return -EFAULT.
58367 */
58368-extern long probe_kernel_read(void *dst, void *src, size_t size);
58369+extern long probe_kernel_read(void *dst, const void *src, size_t size);
58370
58371 /*
58372 * probe_kernel_write(): safely attempt to write to a location
58373@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
58374 * Safely write to address @dst from the buffer at @src. If a kernel fault
58375 * happens, handle that and return -EFAULT.
58376 */
58377-extern long probe_kernel_write(void *dst, void *src, size_t size);
58378+extern long probe_kernel_write(void *dst, const void *src, size_t size);
58379
58380 #endif /* __LINUX_UACCESS_H__ */
58381diff -urNp linux-2.6.32.43/include/linux/unaligned/access_ok.h linux-2.6.32.43/include/linux/unaligned/access_ok.h
58382--- linux-2.6.32.43/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
58383+++ linux-2.6.32.43/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
58384@@ -6,32 +6,32 @@
58385
58386 static inline u16 get_unaligned_le16(const void *p)
58387 {
58388- return le16_to_cpup((__le16 *)p);
58389+ return le16_to_cpup((const __le16 *)p);
58390 }
58391
58392 static inline u32 get_unaligned_le32(const void *p)
58393 {
58394- return le32_to_cpup((__le32 *)p);
58395+ return le32_to_cpup((const __le32 *)p);
58396 }
58397
58398 static inline u64 get_unaligned_le64(const void *p)
58399 {
58400- return le64_to_cpup((__le64 *)p);
58401+ return le64_to_cpup((const __le64 *)p);
58402 }
58403
58404 static inline u16 get_unaligned_be16(const void *p)
58405 {
58406- return be16_to_cpup((__be16 *)p);
58407+ return be16_to_cpup((const __be16 *)p);
58408 }
58409
58410 static inline u32 get_unaligned_be32(const void *p)
58411 {
58412- return be32_to_cpup((__be32 *)p);
58413+ return be32_to_cpup((const __be32 *)p);
58414 }
58415
58416 static inline u64 get_unaligned_be64(const void *p)
58417 {
58418- return be64_to_cpup((__be64 *)p);
58419+ return be64_to_cpup((const __be64 *)p);
58420 }
58421
58422 static inline void put_unaligned_le16(u16 val, void *p)
58423diff -urNp linux-2.6.32.43/include/linux/vmalloc.h linux-2.6.32.43/include/linux/vmalloc.h
58424--- linux-2.6.32.43/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
58425+++ linux-2.6.32.43/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
58426@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
58427 #define VM_MAP 0x00000004 /* vmap()ed pages */
58428 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
58429 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
58430+
58431+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
58432+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
58433+#endif
58434+
58435 /* bits [20..32] reserved for arch specific ioremap internals */
58436
58437 /*
58438@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
58439
58440 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
58441
58442+#define vmalloc(x) \
58443+({ \
58444+ void *___retval; \
58445+ intoverflow_t ___x = (intoverflow_t)x; \
58446+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
58447+ ___retval = NULL; \
58448+ else \
58449+ ___retval = vmalloc((unsigned long)___x); \
58450+ ___retval; \
58451+})
58452+
58453+#define __vmalloc(x, y, z) \
58454+({ \
58455+ void *___retval; \
58456+ intoverflow_t ___x = (intoverflow_t)x; \
58457+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
58458+ ___retval = NULL; \
58459+ else \
58460+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
58461+ ___retval; \
58462+})
58463+
58464+#define vmalloc_user(x) \
58465+({ \
58466+ void *___retval; \
58467+ intoverflow_t ___x = (intoverflow_t)x; \
58468+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
58469+ ___retval = NULL; \
58470+ else \
58471+ ___retval = vmalloc_user((unsigned long)___x); \
58472+ ___retval; \
58473+})
58474+
58475+#define vmalloc_exec(x) \
58476+({ \
58477+ void *___retval; \
58478+ intoverflow_t ___x = (intoverflow_t)x; \
58479+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
58480+ ___retval = NULL; \
58481+ else \
58482+ ___retval = vmalloc_exec((unsigned long)___x); \
58483+ ___retval; \
58484+})
58485+
58486+#define vmalloc_node(x, y) \
58487+({ \
58488+ void *___retval; \
58489+ intoverflow_t ___x = (intoverflow_t)x; \
58490+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
58491+ ___retval = NULL; \
58492+ else \
58493+ ___retval = vmalloc_node((unsigned long)___x, (y));\
58494+ ___retval; \
58495+})
58496+
58497+#define vmalloc_32(x) \
58498+({ \
58499+ void *___retval; \
58500+ intoverflow_t ___x = (intoverflow_t)x; \
58501+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
58502+ ___retval = NULL; \
58503+ else \
58504+ ___retval = vmalloc_32((unsigned long)___x); \
58505+ ___retval; \
58506+})
58507+
58508+#define vmalloc_32_user(x) \
58509+({ \
58510+ void *___retval; \
58511+ intoverflow_t ___x = (intoverflow_t)x; \
58512+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
58513+ ___retval = NULL; \
58514+ else \
58515+ ___retval = vmalloc_32_user((unsigned long)___x);\
58516+ ___retval; \
58517+})
58518+
58519 #endif /* _LINUX_VMALLOC_H */
58520diff -urNp linux-2.6.32.43/include/linux/vmstat.h linux-2.6.32.43/include/linux/vmstat.h
58521--- linux-2.6.32.43/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
58522+++ linux-2.6.32.43/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
58523@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
58524 /*
58525 * Zone based page accounting with per cpu differentials.
58526 */
58527-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58528+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
58529
58530 static inline void zone_page_state_add(long x, struct zone *zone,
58531 enum zone_stat_item item)
58532 {
58533- atomic_long_add(x, &zone->vm_stat[item]);
58534- atomic_long_add(x, &vm_stat[item]);
58535+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
58536+ atomic_long_add_unchecked(x, &vm_stat[item]);
58537 }
58538
58539 static inline unsigned long global_page_state(enum zone_stat_item item)
58540 {
58541- long x = atomic_long_read(&vm_stat[item]);
58542+ long x = atomic_long_read_unchecked(&vm_stat[item]);
58543 #ifdef CONFIG_SMP
58544 if (x < 0)
58545 x = 0;
58546@@ -158,7 +158,7 @@ static inline unsigned long global_page_
58547 static inline unsigned long zone_page_state(struct zone *zone,
58548 enum zone_stat_item item)
58549 {
58550- long x = atomic_long_read(&zone->vm_stat[item]);
58551+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58552 #ifdef CONFIG_SMP
58553 if (x < 0)
58554 x = 0;
58555@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
58556 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
58557 enum zone_stat_item item)
58558 {
58559- long x = atomic_long_read(&zone->vm_stat[item]);
58560+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
58561
58562 #ifdef CONFIG_SMP
58563 int cpu;
58564@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
58565
58566 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
58567 {
58568- atomic_long_inc(&zone->vm_stat[item]);
58569- atomic_long_inc(&vm_stat[item]);
58570+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
58571+ atomic_long_inc_unchecked(&vm_stat[item]);
58572 }
58573
58574 static inline void __inc_zone_page_state(struct page *page,
58575@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
58576
58577 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
58578 {
58579- atomic_long_dec(&zone->vm_stat[item]);
58580- atomic_long_dec(&vm_stat[item]);
58581+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
58582+ atomic_long_dec_unchecked(&vm_stat[item]);
58583 }
58584
58585 static inline void __dec_zone_page_state(struct page *page,
58586diff -urNp linux-2.6.32.43/include/media/v4l2-device.h linux-2.6.32.43/include/media/v4l2-device.h
58587--- linux-2.6.32.43/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
58588+++ linux-2.6.32.43/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
58589@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
58590 this function returns 0. If the name ends with a digit (e.g. cx18),
58591 then the name will be set to cx18-0 since cx180 looks really odd. */
58592 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
58593- atomic_t *instance);
58594+ atomic_unchecked_t *instance);
58595
58596 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
58597 Since the parent disappears this ensures that v4l2_dev doesn't have an
58598diff -urNp linux-2.6.32.43/include/net/flow.h linux-2.6.32.43/include/net/flow.h
58599--- linux-2.6.32.43/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
58600+++ linux-2.6.32.43/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
58601@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
58602 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
58603 u8 dir, flow_resolve_t resolver);
58604 extern void flow_cache_flush(void);
58605-extern atomic_t flow_cache_genid;
58606+extern atomic_unchecked_t flow_cache_genid;
58607
58608 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
58609 {
58610diff -urNp linux-2.6.32.43/include/net/inetpeer.h linux-2.6.32.43/include/net/inetpeer.h
58611--- linux-2.6.32.43/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
58612+++ linux-2.6.32.43/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
58613@@ -24,7 +24,7 @@ struct inet_peer
58614 __u32 dtime; /* the time of last use of not
58615 * referenced entries */
58616 atomic_t refcnt;
58617- atomic_t rid; /* Frag reception counter */
58618+ atomic_unchecked_t rid; /* Frag reception counter */
58619 __u32 tcp_ts;
58620 unsigned long tcp_ts_stamp;
58621 };
58622diff -urNp linux-2.6.32.43/include/net/ip_vs.h linux-2.6.32.43/include/net/ip_vs.h
58623--- linux-2.6.32.43/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
58624+++ linux-2.6.32.43/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
58625@@ -365,7 +365,7 @@ struct ip_vs_conn {
58626 struct ip_vs_conn *control; /* Master control connection */
58627 atomic_t n_control; /* Number of controlled ones */
58628 struct ip_vs_dest *dest; /* real server */
58629- atomic_t in_pkts; /* incoming packet counter */
58630+ atomic_unchecked_t in_pkts; /* incoming packet counter */
58631
58632 /* packet transmitter for different forwarding methods. If it
58633 mangles the packet, it must return NF_DROP or better NF_STOLEN,
58634@@ -466,7 +466,7 @@ struct ip_vs_dest {
58635 union nf_inet_addr addr; /* IP address of the server */
58636 __be16 port; /* port number of the server */
58637 volatile unsigned flags; /* dest status flags */
58638- atomic_t conn_flags; /* flags to copy to conn */
58639+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
58640 atomic_t weight; /* server weight */
58641
58642 atomic_t refcnt; /* reference counter */
58643diff -urNp linux-2.6.32.43/include/net/irda/ircomm_tty.h linux-2.6.32.43/include/net/irda/ircomm_tty.h
58644--- linux-2.6.32.43/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
58645+++ linux-2.6.32.43/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
58646@@ -35,6 +35,7 @@
58647 #include <linux/termios.h>
58648 #include <linux/timer.h>
58649 #include <linux/tty.h> /* struct tty_struct */
58650+#include <asm/local.h>
58651
58652 #include <net/irda/irias_object.h>
58653 #include <net/irda/ircomm_core.h>
58654@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
58655 unsigned short close_delay;
58656 unsigned short closing_wait; /* time to wait before closing */
58657
58658- int open_count;
58659- int blocked_open; /* # of blocked opens */
58660+ local_t open_count;
58661+ local_t blocked_open; /* # of blocked opens */
58662
58663 /* Protect concurent access to :
58664 * o self->open_count
58665diff -urNp linux-2.6.32.43/include/net/iucv/af_iucv.h linux-2.6.32.43/include/net/iucv/af_iucv.h
58666--- linux-2.6.32.43/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
58667+++ linux-2.6.32.43/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
58668@@ -87,7 +87,7 @@ struct iucv_sock {
58669 struct iucv_sock_list {
58670 struct hlist_head head;
58671 rwlock_t lock;
58672- atomic_t autobind_name;
58673+ atomic_unchecked_t autobind_name;
58674 };
58675
58676 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
58677diff -urNp linux-2.6.32.43/include/net/neighbour.h linux-2.6.32.43/include/net/neighbour.h
58678--- linux-2.6.32.43/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
58679+++ linux-2.6.32.43/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
58680@@ -125,12 +125,12 @@ struct neighbour
58681 struct neigh_ops
58682 {
58683 int family;
58684- void (*solicit)(struct neighbour *, struct sk_buff*);
58685- void (*error_report)(struct neighbour *, struct sk_buff*);
58686- int (*output)(struct sk_buff*);
58687- int (*connected_output)(struct sk_buff*);
58688- int (*hh_output)(struct sk_buff*);
58689- int (*queue_xmit)(struct sk_buff*);
58690+ void (* const solicit)(struct neighbour *, struct sk_buff*);
58691+ void (* const error_report)(struct neighbour *, struct sk_buff*);
58692+ int (* const output)(struct sk_buff*);
58693+ int (* const connected_output)(struct sk_buff*);
58694+ int (* const hh_output)(struct sk_buff*);
58695+ int (* const queue_xmit)(struct sk_buff*);
58696 };
58697
58698 struct pneigh_entry
58699diff -urNp linux-2.6.32.43/include/net/netlink.h linux-2.6.32.43/include/net/netlink.h
58700--- linux-2.6.32.43/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
58701+++ linux-2.6.32.43/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
58702@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
58703 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
58704 {
58705 if (mark)
58706- skb_trim(skb, (unsigned char *) mark - skb->data);
58707+ skb_trim(skb, (const unsigned char *) mark - skb->data);
58708 }
58709
58710 /**
58711diff -urNp linux-2.6.32.43/include/net/netns/ipv4.h linux-2.6.32.43/include/net/netns/ipv4.h
58712--- linux-2.6.32.43/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
58713+++ linux-2.6.32.43/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
58714@@ -54,7 +54,7 @@ struct netns_ipv4 {
58715 int current_rt_cache_rebuild_count;
58716
58717 struct timer_list rt_secret_timer;
58718- atomic_t rt_genid;
58719+ atomic_unchecked_t rt_genid;
58720
58721 #ifdef CONFIG_IP_MROUTE
58722 struct sock *mroute_sk;
58723diff -urNp linux-2.6.32.43/include/net/sctp/sctp.h linux-2.6.32.43/include/net/sctp/sctp.h
58724--- linux-2.6.32.43/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
58725+++ linux-2.6.32.43/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
58726@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
58727
58728 #else /* SCTP_DEBUG */
58729
58730-#define SCTP_DEBUG_PRINTK(whatever...)
58731-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
58732+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
58733+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
58734 #define SCTP_ENABLE_DEBUG
58735 #define SCTP_DISABLE_DEBUG
58736 #define SCTP_ASSERT(expr, str, func)
58737diff -urNp linux-2.6.32.43/include/net/sock.h linux-2.6.32.43/include/net/sock.h
58738--- linux-2.6.32.43/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
58739+++ linux-2.6.32.43/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
58740@@ -272,7 +272,7 @@ struct sock {
58741 rwlock_t sk_callback_lock;
58742 int sk_err,
58743 sk_err_soft;
58744- atomic_t sk_drops;
58745+ atomic_unchecked_t sk_drops;
58746 unsigned short sk_ack_backlog;
58747 unsigned short sk_max_ack_backlog;
58748 __u32 sk_priority;
58749diff -urNp linux-2.6.32.43/include/net/tcp.h linux-2.6.32.43/include/net/tcp.h
58750--- linux-2.6.32.43/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
58751+++ linux-2.6.32.43/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
58752@@ -1444,6 +1444,7 @@ enum tcp_seq_states {
58753 struct tcp_seq_afinfo {
58754 char *name;
58755 sa_family_t family;
58756+ /* cannot be const */
58757 struct file_operations seq_fops;
58758 struct seq_operations seq_ops;
58759 };
58760diff -urNp linux-2.6.32.43/include/net/udp.h linux-2.6.32.43/include/net/udp.h
58761--- linux-2.6.32.43/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
58762+++ linux-2.6.32.43/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
58763@@ -187,6 +187,7 @@ struct udp_seq_afinfo {
58764 char *name;
58765 sa_family_t family;
58766 struct udp_table *udp_table;
58767+ /* cannot be const */
58768 struct file_operations seq_fops;
58769 struct seq_operations seq_ops;
58770 };
58771diff -urNp linux-2.6.32.43/include/scsi/scsi_device.h linux-2.6.32.43/include/scsi/scsi_device.h
58772--- linux-2.6.32.43/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
58773+++ linux-2.6.32.43/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
58774@@ -156,9 +156,9 @@ struct scsi_device {
58775 unsigned int max_device_blocked; /* what device_blocked counts down from */
58776 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
58777
58778- atomic_t iorequest_cnt;
58779- atomic_t iodone_cnt;
58780- atomic_t ioerr_cnt;
58781+ atomic_unchecked_t iorequest_cnt;
58782+ atomic_unchecked_t iodone_cnt;
58783+ atomic_unchecked_t ioerr_cnt;
58784
58785 struct device sdev_gendev,
58786 sdev_dev;
58787diff -urNp linux-2.6.32.43/include/sound/ac97_codec.h linux-2.6.32.43/include/sound/ac97_codec.h
58788--- linux-2.6.32.43/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
58789+++ linux-2.6.32.43/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
58790@@ -419,15 +419,15 @@
58791 struct snd_ac97;
58792
58793 struct snd_ac97_build_ops {
58794- int (*build_3d) (struct snd_ac97 *ac97);
58795- int (*build_specific) (struct snd_ac97 *ac97);
58796- int (*build_spdif) (struct snd_ac97 *ac97);
58797- int (*build_post_spdif) (struct snd_ac97 *ac97);
58798+ int (* const build_3d) (struct snd_ac97 *ac97);
58799+ int (* const build_specific) (struct snd_ac97 *ac97);
58800+ int (* const build_spdif) (struct snd_ac97 *ac97);
58801+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
58802 #ifdef CONFIG_PM
58803- void (*suspend) (struct snd_ac97 *ac97);
58804- void (*resume) (struct snd_ac97 *ac97);
58805+ void (* const suspend) (struct snd_ac97 *ac97);
58806+ void (* const resume) (struct snd_ac97 *ac97);
58807 #endif
58808- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58809+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
58810 };
58811
58812 struct snd_ac97_bus_ops {
58813@@ -477,7 +477,7 @@ struct snd_ac97_template {
58814
58815 struct snd_ac97 {
58816 /* -- lowlevel (hardware) driver specific -- */
58817- struct snd_ac97_build_ops * build_ops;
58818+ const struct snd_ac97_build_ops * build_ops;
58819 void *private_data;
58820 void (*private_free) (struct snd_ac97 *ac97);
58821 /* --- */
58822diff -urNp linux-2.6.32.43/include/sound/ymfpci.h linux-2.6.32.43/include/sound/ymfpci.h
58823--- linux-2.6.32.43/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
58824+++ linux-2.6.32.43/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
58825@@ -358,7 +358,7 @@ struct snd_ymfpci {
58826 spinlock_t reg_lock;
58827 spinlock_t voice_lock;
58828 wait_queue_head_t interrupt_sleep;
58829- atomic_t interrupt_sleep_count;
58830+ atomic_unchecked_t interrupt_sleep_count;
58831 struct snd_info_entry *proc_entry;
58832 const struct firmware *dsp_microcode;
58833 const struct firmware *controller_microcode;
58834diff -urNp linux-2.6.32.43/include/trace/events/irq.h linux-2.6.32.43/include/trace/events/irq.h
58835--- linux-2.6.32.43/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
58836+++ linux-2.6.32.43/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
58837@@ -34,7 +34,7 @@
58838 */
58839 TRACE_EVENT(irq_handler_entry,
58840
58841- TP_PROTO(int irq, struct irqaction *action),
58842+ TP_PROTO(int irq, const struct irqaction *action),
58843
58844 TP_ARGS(irq, action),
58845
58846@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
58847 */
58848 TRACE_EVENT(irq_handler_exit,
58849
58850- TP_PROTO(int irq, struct irqaction *action, int ret),
58851+ TP_PROTO(int irq, const struct irqaction *action, int ret),
58852
58853 TP_ARGS(irq, action, ret),
58854
58855@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
58856 */
58857 TRACE_EVENT(softirq_entry,
58858
58859- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58860+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58861
58862 TP_ARGS(h, vec),
58863
58864@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
58865 */
58866 TRACE_EVENT(softirq_exit,
58867
58868- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
58869+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
58870
58871 TP_ARGS(h, vec),
58872
58873diff -urNp linux-2.6.32.43/include/video/uvesafb.h linux-2.6.32.43/include/video/uvesafb.h
58874--- linux-2.6.32.43/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
58875+++ linux-2.6.32.43/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
58876@@ -177,6 +177,7 @@ struct uvesafb_par {
58877 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
58878 u8 pmi_setpal; /* PMI for palette changes */
58879 u16 *pmi_base; /* protected mode interface location */
58880+ u8 *pmi_code; /* protected mode code location */
58881 void *pmi_start;
58882 void *pmi_pal;
58883 u8 *vbe_state_orig; /*
58884diff -urNp linux-2.6.32.43/init/do_mounts.c linux-2.6.32.43/init/do_mounts.c
58885--- linux-2.6.32.43/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
58886+++ linux-2.6.32.43/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
58887@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
58888
58889 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
58890 {
58891- int err = sys_mount(name, "/root", fs, flags, data);
58892+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
58893 if (err)
58894 return err;
58895
58896- sys_chdir("/root");
58897+ sys_chdir((__force const char __user *)"/root");
58898 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
58899 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
58900 current->fs->pwd.mnt->mnt_sb->s_type->name,
58901@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
58902 va_start(args, fmt);
58903 vsprintf(buf, fmt, args);
58904 va_end(args);
58905- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
58906+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
58907 if (fd >= 0) {
58908 sys_ioctl(fd, FDEJECT, 0);
58909 sys_close(fd);
58910 }
58911 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
58912- fd = sys_open("/dev/console", O_RDWR, 0);
58913+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
58914 if (fd >= 0) {
58915 sys_ioctl(fd, TCGETS, (long)&termios);
58916 termios.c_lflag &= ~ICANON;
58917 sys_ioctl(fd, TCSETSF, (long)&termios);
58918- sys_read(fd, &c, 1);
58919+ sys_read(fd, (char __user *)&c, 1);
58920 termios.c_lflag |= ICANON;
58921 sys_ioctl(fd, TCSETSF, (long)&termios);
58922 sys_close(fd);
58923@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
58924 mount_root();
58925 out:
58926 devtmpfs_mount("dev");
58927- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58928- sys_chroot(".");
58929+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58930+ sys_chroot((__force char __user *)".");
58931 }
58932diff -urNp linux-2.6.32.43/init/do_mounts.h linux-2.6.32.43/init/do_mounts.h
58933--- linux-2.6.32.43/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
58934+++ linux-2.6.32.43/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
58935@@ -15,15 +15,15 @@ extern int root_mountflags;
58936
58937 static inline int create_dev(char *name, dev_t dev)
58938 {
58939- sys_unlink(name);
58940- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
58941+ sys_unlink((__force char __user *)name);
58942+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
58943 }
58944
58945 #if BITS_PER_LONG == 32
58946 static inline u32 bstat(char *name)
58947 {
58948 struct stat64 stat;
58949- if (sys_stat64(name, &stat) != 0)
58950+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
58951 return 0;
58952 if (!S_ISBLK(stat.st_mode))
58953 return 0;
58954diff -urNp linux-2.6.32.43/init/do_mounts_initrd.c linux-2.6.32.43/init/do_mounts_initrd.c
58955--- linux-2.6.32.43/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
58956+++ linux-2.6.32.43/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
58957@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
58958 sys_close(old_fd);sys_close(root_fd);
58959 sys_close(0);sys_close(1);sys_close(2);
58960 sys_setsid();
58961- (void) sys_open("/dev/console",O_RDWR,0);
58962+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
58963 (void) sys_dup(0);
58964 (void) sys_dup(0);
58965 return kernel_execve(shell, argv, envp_init);
58966@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
58967 create_dev("/dev/root.old", Root_RAM0);
58968 /* mount initrd on rootfs' /root */
58969 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
58970- sys_mkdir("/old", 0700);
58971- root_fd = sys_open("/", 0, 0);
58972- old_fd = sys_open("/old", 0, 0);
58973+ sys_mkdir((__force const char __user *)"/old", 0700);
58974+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
58975+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
58976 /* move initrd over / and chdir/chroot in initrd root */
58977- sys_chdir("/root");
58978- sys_mount(".", "/", NULL, MS_MOVE, NULL);
58979- sys_chroot(".");
58980+ sys_chdir((__force const char __user *)"/root");
58981+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
58982+ sys_chroot((__force const char __user *)".");
58983
58984 /*
58985 * In case that a resume from disk is carried out by linuxrc or one of
58986@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
58987
58988 /* move initrd to rootfs' /old */
58989 sys_fchdir(old_fd);
58990- sys_mount("/", ".", NULL, MS_MOVE, NULL);
58991+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
58992 /* switch root and cwd back to / of rootfs */
58993 sys_fchdir(root_fd);
58994- sys_chroot(".");
58995+ sys_chroot((__force const char __user *)".");
58996 sys_close(old_fd);
58997 sys_close(root_fd);
58998
58999 if (new_decode_dev(real_root_dev) == Root_RAM0) {
59000- sys_chdir("/old");
59001+ sys_chdir((__force const char __user *)"/old");
59002 return;
59003 }
59004
59005@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
59006 mount_root();
59007
59008 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
59009- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
59010+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
59011 if (!error)
59012 printk("okay\n");
59013 else {
59014- int fd = sys_open("/dev/root.old", O_RDWR, 0);
59015+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
59016 if (error == -ENOENT)
59017 printk("/initrd does not exist. Ignored.\n");
59018 else
59019 printk("failed\n");
59020 printk(KERN_NOTICE "Unmounting old root\n");
59021- sys_umount("/old", MNT_DETACH);
59022+ sys_umount((__force char __user *)"/old", MNT_DETACH);
59023 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
59024 if (fd < 0) {
59025 error = fd;
59026@@ -119,11 +119,11 @@ int __init initrd_load(void)
59027 * mounted in the normal path.
59028 */
59029 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
59030- sys_unlink("/initrd.image");
59031+ sys_unlink((__force const char __user *)"/initrd.image");
59032 handle_initrd();
59033 return 1;
59034 }
59035 }
59036- sys_unlink("/initrd.image");
59037+ sys_unlink((__force const char __user *)"/initrd.image");
59038 return 0;
59039 }
59040diff -urNp linux-2.6.32.43/init/do_mounts_md.c linux-2.6.32.43/init/do_mounts_md.c
59041--- linux-2.6.32.43/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
59042+++ linux-2.6.32.43/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
59043@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
59044 partitioned ? "_d" : "", minor,
59045 md_setup_args[ent].device_names);
59046
59047- fd = sys_open(name, 0, 0);
59048+ fd = sys_open((__force char __user *)name, 0, 0);
59049 if (fd < 0) {
59050 printk(KERN_ERR "md: open failed - cannot start "
59051 "array %s\n", name);
59052@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
59053 * array without it
59054 */
59055 sys_close(fd);
59056- fd = sys_open(name, 0, 0);
59057+ fd = sys_open((__force char __user *)name, 0, 0);
59058 sys_ioctl(fd, BLKRRPART, 0);
59059 }
59060 sys_close(fd);
59061@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
59062
59063 wait_for_device_probe();
59064
59065- fd = sys_open("/dev/md0", 0, 0);
59066+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
59067 if (fd >= 0) {
59068 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
59069 sys_close(fd);
59070diff -urNp linux-2.6.32.43/init/initramfs.c linux-2.6.32.43/init/initramfs.c
59071--- linux-2.6.32.43/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
59072+++ linux-2.6.32.43/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
59073@@ -74,7 +74,7 @@ static void __init free_hash(void)
59074 }
59075 }
59076
59077-static long __init do_utime(char __user *filename, time_t mtime)
59078+static long __init do_utime(__force char __user *filename, time_t mtime)
59079 {
59080 struct timespec t[2];
59081
59082@@ -109,7 +109,7 @@ static void __init dir_utime(void)
59083 struct dir_entry *de, *tmp;
59084 list_for_each_entry_safe(de, tmp, &dir_list, list) {
59085 list_del(&de->list);
59086- do_utime(de->name, de->mtime);
59087+ do_utime((__force char __user *)de->name, de->mtime);
59088 kfree(de->name);
59089 kfree(de);
59090 }
59091@@ -271,7 +271,7 @@ static int __init maybe_link(void)
59092 if (nlink >= 2) {
59093 char *old = find_link(major, minor, ino, mode, collected);
59094 if (old)
59095- return (sys_link(old, collected) < 0) ? -1 : 1;
59096+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
59097 }
59098 return 0;
59099 }
59100@@ -280,11 +280,11 @@ static void __init clean_path(char *path
59101 {
59102 struct stat st;
59103
59104- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
59105+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
59106 if (S_ISDIR(st.st_mode))
59107- sys_rmdir(path);
59108+ sys_rmdir((__force char __user *)path);
59109 else
59110- sys_unlink(path);
59111+ sys_unlink((__force char __user *)path);
59112 }
59113 }
59114
59115@@ -305,7 +305,7 @@ static int __init do_name(void)
59116 int openflags = O_WRONLY|O_CREAT;
59117 if (ml != 1)
59118 openflags |= O_TRUNC;
59119- wfd = sys_open(collected, openflags, mode);
59120+ wfd = sys_open((__force char __user *)collected, openflags, mode);
59121
59122 if (wfd >= 0) {
59123 sys_fchown(wfd, uid, gid);
59124@@ -317,17 +317,17 @@ static int __init do_name(void)
59125 }
59126 }
59127 } else if (S_ISDIR(mode)) {
59128- sys_mkdir(collected, mode);
59129- sys_chown(collected, uid, gid);
59130- sys_chmod(collected, mode);
59131+ sys_mkdir((__force char __user *)collected, mode);
59132+ sys_chown((__force char __user *)collected, uid, gid);
59133+ sys_chmod((__force char __user *)collected, mode);
59134 dir_add(collected, mtime);
59135 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
59136 S_ISFIFO(mode) || S_ISSOCK(mode)) {
59137 if (maybe_link() == 0) {
59138- sys_mknod(collected, mode, rdev);
59139- sys_chown(collected, uid, gid);
59140- sys_chmod(collected, mode);
59141- do_utime(collected, mtime);
59142+ sys_mknod((__force char __user *)collected, mode, rdev);
59143+ sys_chown((__force char __user *)collected, uid, gid);
59144+ sys_chmod((__force char __user *)collected, mode);
59145+ do_utime((__force char __user *)collected, mtime);
59146 }
59147 }
59148 return 0;
59149@@ -336,15 +336,15 @@ static int __init do_name(void)
59150 static int __init do_copy(void)
59151 {
59152 if (count >= body_len) {
59153- sys_write(wfd, victim, body_len);
59154+ sys_write(wfd, (__force char __user *)victim, body_len);
59155 sys_close(wfd);
59156- do_utime(vcollected, mtime);
59157+ do_utime((__force char __user *)vcollected, mtime);
59158 kfree(vcollected);
59159 eat(body_len);
59160 state = SkipIt;
59161 return 0;
59162 } else {
59163- sys_write(wfd, victim, count);
59164+ sys_write(wfd, (__force char __user *)victim, count);
59165 body_len -= count;
59166 eat(count);
59167 return 1;
59168@@ -355,9 +355,9 @@ static int __init do_symlink(void)
59169 {
59170 collected[N_ALIGN(name_len) + body_len] = '\0';
59171 clean_path(collected, 0);
59172- sys_symlink(collected + N_ALIGN(name_len), collected);
59173- sys_lchown(collected, uid, gid);
59174- do_utime(collected, mtime);
59175+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
59176+ sys_lchown((__force char __user *)collected, uid, gid);
59177+ do_utime((__force char __user *)collected, mtime);
59178 state = SkipIt;
59179 next_state = Reset;
59180 return 0;
59181diff -urNp linux-2.6.32.43/init/Kconfig linux-2.6.32.43/init/Kconfig
59182--- linux-2.6.32.43/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
59183+++ linux-2.6.32.43/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
59184@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
59185
59186 config COMPAT_BRK
59187 bool "Disable heap randomization"
59188- default y
59189+ default n
59190 help
59191 Randomizing heap placement makes heap exploits harder, but it
59192 also breaks ancient binaries (including anything libc5 based).
59193diff -urNp linux-2.6.32.43/init/main.c linux-2.6.32.43/init/main.c
59194--- linux-2.6.32.43/init/main.c 2011-05-10 22:12:01.000000000 -0400
59195+++ linux-2.6.32.43/init/main.c 2011-05-22 23:02:06.000000000 -0400
59196@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
59197 #ifdef CONFIG_TC
59198 extern void tc_init(void);
59199 #endif
59200+extern void grsecurity_init(void);
59201
59202 enum system_states system_state __read_mostly;
59203 EXPORT_SYMBOL(system_state);
59204@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
59205
59206 __setup("reset_devices", set_reset_devices);
59207
59208+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
59209+extern char pax_enter_kernel_user[];
59210+extern char pax_exit_kernel_user[];
59211+extern pgdval_t clone_pgd_mask;
59212+#endif
59213+
59214+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
59215+static int __init setup_pax_nouderef(char *str)
59216+{
59217+#ifdef CONFIG_X86_32
59218+ unsigned int cpu;
59219+ struct desc_struct *gdt;
59220+
59221+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
59222+ gdt = get_cpu_gdt_table(cpu);
59223+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
59224+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
59225+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
59226+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
59227+ }
59228+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
59229+#else
59230+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
59231+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
59232+ clone_pgd_mask = ~(pgdval_t)0UL;
59233+#endif
59234+
59235+ return 0;
59236+}
59237+early_param("pax_nouderef", setup_pax_nouderef);
59238+#endif
59239+
59240+#ifdef CONFIG_PAX_SOFTMODE
59241+unsigned int pax_softmode;
59242+
59243+static int __init setup_pax_softmode(char *str)
59244+{
59245+ get_option(&str, &pax_softmode);
59246+ return 1;
59247+}
59248+__setup("pax_softmode=", setup_pax_softmode);
59249+#endif
59250+
59251 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
59252 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
59253 static const char *panic_later, *panic_param;
59254@@ -705,52 +749,53 @@ int initcall_debug;
59255 core_param(initcall_debug, initcall_debug, bool, 0644);
59256
59257 static char msgbuf[64];
59258-static struct boot_trace_call call;
59259-static struct boot_trace_ret ret;
59260+static struct boot_trace_call trace_call;
59261+static struct boot_trace_ret trace_ret;
59262
59263 int do_one_initcall(initcall_t fn)
59264 {
59265 int count = preempt_count();
59266 ktime_t calltime, delta, rettime;
59267+ const char *msg1 = "", *msg2 = "";
59268
59269 if (initcall_debug) {
59270- call.caller = task_pid_nr(current);
59271- printk("calling %pF @ %i\n", fn, call.caller);
59272+ trace_call.caller = task_pid_nr(current);
59273+ printk("calling %pF @ %i\n", fn, trace_call.caller);
59274 calltime = ktime_get();
59275- trace_boot_call(&call, fn);
59276+ trace_boot_call(&trace_call, fn);
59277 enable_boot_trace();
59278 }
59279
59280- ret.result = fn();
59281+ trace_ret.result = fn();
59282
59283 if (initcall_debug) {
59284 disable_boot_trace();
59285 rettime = ktime_get();
59286 delta = ktime_sub(rettime, calltime);
59287- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59288- trace_boot_ret(&ret, fn);
59289+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
59290+ trace_boot_ret(&trace_ret, fn);
59291 printk("initcall %pF returned %d after %Ld usecs\n", fn,
59292- ret.result, ret.duration);
59293+ trace_ret.result, trace_ret.duration);
59294 }
59295
59296 msgbuf[0] = 0;
59297
59298- if (ret.result && ret.result != -ENODEV && initcall_debug)
59299- sprintf(msgbuf, "error code %d ", ret.result);
59300+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
59301+ sprintf(msgbuf, "error code %d ", trace_ret.result);
59302
59303 if (preempt_count() != count) {
59304- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
59305+ msg1 = " preemption imbalance";
59306 preempt_count() = count;
59307 }
59308 if (irqs_disabled()) {
59309- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
59310+ msg2 = " disabled interrupts";
59311 local_irq_enable();
59312 }
59313- if (msgbuf[0]) {
59314- printk("initcall %pF returned with %s\n", fn, msgbuf);
59315+ if (msgbuf[0] || *msg1 || *msg2) {
59316+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
59317 }
59318
59319- return ret.result;
59320+ return trace_ret.result;
59321 }
59322
59323
59324@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
59325 if (!ramdisk_execute_command)
59326 ramdisk_execute_command = "/init";
59327
59328- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
59329+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
59330 ramdisk_execute_command = NULL;
59331 prepare_namespace();
59332 }
59333
59334+ grsecurity_init();
59335+
59336 /*
59337 * Ok, we have completed the initial bootup, and
59338 * we're essentially up and running. Get rid of the
59339diff -urNp linux-2.6.32.43/init/noinitramfs.c linux-2.6.32.43/init/noinitramfs.c
59340--- linux-2.6.32.43/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
59341+++ linux-2.6.32.43/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
59342@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
59343 {
59344 int err;
59345
59346- err = sys_mkdir("/dev", 0755);
59347+ err = sys_mkdir((const char __user *)"/dev", 0755);
59348 if (err < 0)
59349 goto out;
59350
59351@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
59352 if (err < 0)
59353 goto out;
59354
59355- err = sys_mkdir("/root", 0700);
59356+ err = sys_mkdir((const char __user *)"/root", 0700);
59357 if (err < 0)
59358 goto out;
59359
59360diff -urNp linux-2.6.32.43/ipc/mqueue.c linux-2.6.32.43/ipc/mqueue.c
59361--- linux-2.6.32.43/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
59362+++ linux-2.6.32.43/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
59363@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
59364 mq_bytes = (mq_msg_tblsz +
59365 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
59366
59367+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
59368 spin_lock(&mq_lock);
59369 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
59370 u->mq_bytes + mq_bytes >
59371diff -urNp linux-2.6.32.43/ipc/sem.c linux-2.6.32.43/ipc/sem.c
59372--- linux-2.6.32.43/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
59373+++ linux-2.6.32.43/ipc/sem.c 2011-05-16 21:46:57.000000000 -0400
59374@@ -671,6 +671,8 @@ static int semctl_main(struct ipc_namesp
59375 ushort* sem_io = fast_sem_io;
59376 int nsems;
59377
59378+ pax_track_stack();
59379+
59380 sma = sem_lock_check(ns, semid);
59381 if (IS_ERR(sma))
59382 return PTR_ERR(sma);
59383@@ -1071,6 +1073,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
59384 unsigned long jiffies_left = 0;
59385 struct ipc_namespace *ns;
59386
59387+ pax_track_stack();
59388+
59389 ns = current->nsproxy->ipc_ns;
59390
59391 if (nsops < 1 || semid < 0)
59392diff -urNp linux-2.6.32.43/ipc/shm.c linux-2.6.32.43/ipc/shm.c
59393--- linux-2.6.32.43/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
59394+++ linux-2.6.32.43/ipc/shm.c 2011-04-17 15:56:46.000000000 -0400
59395@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
59396 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
59397 #endif
59398
59399+#ifdef CONFIG_GRKERNSEC
59400+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59401+ const time_t shm_createtime, const uid_t cuid,
59402+ const int shmid);
59403+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
59404+ const time_t shm_createtime);
59405+#endif
59406+
59407 void shm_init_ns(struct ipc_namespace *ns)
59408 {
59409 ns->shm_ctlmax = SHMMAX;
59410@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
59411 shp->shm_lprid = 0;
59412 shp->shm_atim = shp->shm_dtim = 0;
59413 shp->shm_ctim = get_seconds();
59414+#ifdef CONFIG_GRKERNSEC
59415+ {
59416+ struct timespec timeval;
59417+ do_posix_clock_monotonic_gettime(&timeval);
59418+
59419+ shp->shm_createtime = timeval.tv_sec;
59420+ }
59421+#endif
59422 shp->shm_segsz = size;
59423 shp->shm_nattch = 0;
59424 shp->shm_file = file;
59425@@ -880,9 +896,21 @@ long do_shmat(int shmid, char __user *sh
59426 if (err)
59427 goto out_unlock;
59428
59429+#ifdef CONFIG_GRKERNSEC
59430+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
59431+ shp->shm_perm.cuid, shmid) ||
59432+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
59433+ err = -EACCES;
59434+ goto out_unlock;
59435+ }
59436+#endif
59437+
59438 path.dentry = dget(shp->shm_file->f_path.dentry);
59439 path.mnt = shp->shm_file->f_path.mnt;
59440 shp->shm_nattch++;
59441+#ifdef CONFIG_GRKERNSEC
59442+ shp->shm_lapid = current->pid;
59443+#endif
59444 size = i_size_read(path.dentry->d_inode);
59445 shm_unlock(shp);
59446
59447diff -urNp linux-2.6.32.43/kernel/acct.c linux-2.6.32.43/kernel/acct.c
59448--- linux-2.6.32.43/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
59449+++ linux-2.6.32.43/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
59450@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
59451 */
59452 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
59453 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
59454- file->f_op->write(file, (char *)&ac,
59455+ file->f_op->write(file, (__force char __user *)&ac,
59456 sizeof(acct_t), &file->f_pos);
59457 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
59458 set_fs(fs);
59459diff -urNp linux-2.6.32.43/kernel/audit.c linux-2.6.32.43/kernel/audit.c
59460--- linux-2.6.32.43/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
59461+++ linux-2.6.32.43/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
59462@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
59463 3) suppressed due to audit_rate_limit
59464 4) suppressed due to audit_backlog_limit
59465 */
59466-static atomic_t audit_lost = ATOMIC_INIT(0);
59467+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
59468
59469 /* The netlink socket. */
59470 static struct sock *audit_sock;
59471@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
59472 unsigned long now;
59473 int print;
59474
59475- atomic_inc(&audit_lost);
59476+ atomic_inc_unchecked(&audit_lost);
59477
59478 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
59479
59480@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
59481 printk(KERN_WARNING
59482 "audit: audit_lost=%d audit_rate_limit=%d "
59483 "audit_backlog_limit=%d\n",
59484- atomic_read(&audit_lost),
59485+ atomic_read_unchecked(&audit_lost),
59486 audit_rate_limit,
59487 audit_backlog_limit);
59488 audit_panic(message);
59489@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
59490 status_set.pid = audit_pid;
59491 status_set.rate_limit = audit_rate_limit;
59492 status_set.backlog_limit = audit_backlog_limit;
59493- status_set.lost = atomic_read(&audit_lost);
59494+ status_set.lost = atomic_read_unchecked(&audit_lost);
59495 status_set.backlog = skb_queue_len(&audit_skb_queue);
59496 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
59497 &status_set, sizeof(status_set));
59498@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
59499 spin_unlock_irq(&tsk->sighand->siglock);
59500 }
59501 read_unlock(&tasklist_lock);
59502- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
59503- &s, sizeof(s));
59504+
59505+ if (!err)
59506+ audit_send_reply(NETLINK_CB(skb).pid, seq,
59507+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
59508 break;
59509 }
59510 case AUDIT_TTY_SET: {
59511diff -urNp linux-2.6.32.43/kernel/auditsc.c linux-2.6.32.43/kernel/auditsc.c
59512--- linux-2.6.32.43/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
59513+++ linux-2.6.32.43/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
59514@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
59515 }
59516
59517 /* global counter which is incremented every time something logs in */
59518-static atomic_t session_id = ATOMIC_INIT(0);
59519+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
59520
59521 /**
59522 * audit_set_loginuid - set a task's audit_context loginuid
59523@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
59524 */
59525 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
59526 {
59527- unsigned int sessionid = atomic_inc_return(&session_id);
59528+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
59529 struct audit_context *context = task->audit_context;
59530
59531 if (context && context->in_syscall) {
59532diff -urNp linux-2.6.32.43/kernel/capability.c linux-2.6.32.43/kernel/capability.c
59533--- linux-2.6.32.43/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
59534+++ linux-2.6.32.43/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
59535@@ -305,10 +305,26 @@ int capable(int cap)
59536 BUG();
59537 }
59538
59539- if (security_capable(cap) == 0) {
59540+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
59541 current->flags |= PF_SUPERPRIV;
59542 return 1;
59543 }
59544 return 0;
59545 }
59546+
59547+int capable_nolog(int cap)
59548+{
59549+ if (unlikely(!cap_valid(cap))) {
59550+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
59551+ BUG();
59552+ }
59553+
59554+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
59555+ current->flags |= PF_SUPERPRIV;
59556+ return 1;
59557+ }
59558+ return 0;
59559+}
59560+
59561 EXPORT_SYMBOL(capable);
59562+EXPORT_SYMBOL(capable_nolog);
59563diff -urNp linux-2.6.32.43/kernel/cgroup.c linux-2.6.32.43/kernel/cgroup.c
59564--- linux-2.6.32.43/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
59565+++ linux-2.6.32.43/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
59566@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
59567 struct hlist_head *hhead;
59568 struct cg_cgroup_link *link;
59569
59570+ pax_track_stack();
59571+
59572 /* First see if we already have a cgroup group that matches
59573 * the desired set */
59574 read_lock(&css_set_lock);
59575diff -urNp linux-2.6.32.43/kernel/configs.c linux-2.6.32.43/kernel/configs.c
59576--- linux-2.6.32.43/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
59577+++ linux-2.6.32.43/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
59578@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
59579 struct proc_dir_entry *entry;
59580
59581 /* create the current config file */
59582+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
59583+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
59584+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
59585+ &ikconfig_file_ops);
59586+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59587+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
59588+ &ikconfig_file_ops);
59589+#endif
59590+#else
59591 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
59592 &ikconfig_file_ops);
59593+#endif
59594+
59595 if (!entry)
59596 return -ENOMEM;
59597
59598diff -urNp linux-2.6.32.43/kernel/cpu.c linux-2.6.32.43/kernel/cpu.c
59599--- linux-2.6.32.43/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
59600+++ linux-2.6.32.43/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
59601@@ -19,7 +19,7 @@
59602 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
59603 static DEFINE_MUTEX(cpu_add_remove_lock);
59604
59605-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
59606+static RAW_NOTIFIER_HEAD(cpu_chain);
59607
59608 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
59609 * Should always be manipulated under cpu_add_remove_lock
59610diff -urNp linux-2.6.32.43/kernel/cred.c linux-2.6.32.43/kernel/cred.c
59611--- linux-2.6.32.43/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
59612+++ linux-2.6.32.43/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
59613@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
59614 */
59615 void __put_cred(struct cred *cred)
59616 {
59617+ pax_track_stack();
59618+
59619 kdebug("__put_cred(%p{%d,%d})", cred,
59620 atomic_read(&cred->usage),
59621 read_cred_subscribers(cred));
59622@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
59623 {
59624 struct cred *cred;
59625
59626+ pax_track_stack();
59627+
59628 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
59629 atomic_read(&tsk->cred->usage),
59630 read_cred_subscribers(tsk->cred));
59631@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
59632 {
59633 const struct cred *cred;
59634
59635+ pax_track_stack();
59636+
59637 rcu_read_lock();
59638
59639 do {
59640@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
59641 {
59642 struct cred *new;
59643
59644+ pax_track_stack();
59645+
59646 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
59647 if (!new)
59648 return NULL;
59649@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
59650 const struct cred *old;
59651 struct cred *new;
59652
59653+ pax_track_stack();
59654+
59655 validate_process_creds();
59656
59657 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59658@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
59659 struct thread_group_cred *tgcred = NULL;
59660 struct cred *new;
59661
59662+ pax_track_stack();
59663+
59664 #ifdef CONFIG_KEYS
59665 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
59666 if (!tgcred)
59667@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
59668 struct cred *new;
59669 int ret;
59670
59671+ pax_track_stack();
59672+
59673 mutex_init(&p->cred_guard_mutex);
59674
59675 if (
59676@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
59677 struct task_struct *task = current;
59678 const struct cred *old = task->real_cred;
59679
59680+ pax_track_stack();
59681+
59682 kdebug("commit_creds(%p{%d,%d})", new,
59683 atomic_read(&new->usage),
59684 read_cred_subscribers(new));
59685@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
59686
59687 get_cred(new); /* we will require a ref for the subj creds too */
59688
59689+ gr_set_role_label(task, new->uid, new->gid);
59690+
59691 /* dumpability changes */
59692 if (old->euid != new->euid ||
59693 old->egid != new->egid ||
59694@@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
59695 */
59696 void abort_creds(struct cred *new)
59697 {
59698+ pax_track_stack();
59699+
59700 kdebug("abort_creds(%p{%d,%d})", new,
59701 atomic_read(&new->usage),
59702 read_cred_subscribers(new));
59703@@ -629,6 +649,8 @@ const struct cred *override_creds(const
59704 {
59705 const struct cred *old = current->cred;
59706
59707+ pax_track_stack();
59708+
59709 kdebug("override_creds(%p{%d,%d})", new,
59710 atomic_read(&new->usage),
59711 read_cred_subscribers(new));
59712@@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
59713 {
59714 const struct cred *override = current->cred;
59715
59716+ pax_track_stack();
59717+
59718 kdebug("revert_creds(%p{%d,%d})", old,
59719 atomic_read(&old->usage),
59720 read_cred_subscribers(old));
59721@@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
59722 const struct cred *old;
59723 struct cred *new;
59724
59725+ pax_track_stack();
59726+
59727 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
59728 if (!new)
59729 return NULL;
59730@@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
59731 */
59732 int set_security_override(struct cred *new, u32 secid)
59733 {
59734+ pax_track_stack();
59735+
59736 return security_kernel_act_as(new, secid);
59737 }
59738 EXPORT_SYMBOL(set_security_override);
59739@@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
59740 u32 secid;
59741 int ret;
59742
59743+ pax_track_stack();
59744+
59745 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
59746 if (ret < 0)
59747 return ret;
59748diff -urNp linux-2.6.32.43/kernel/exit.c linux-2.6.32.43/kernel/exit.c
59749--- linux-2.6.32.43/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
59750+++ linux-2.6.32.43/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
59751@@ -55,6 +55,10 @@
59752 #include <asm/pgtable.h>
59753 #include <asm/mmu_context.h>
59754
59755+#ifdef CONFIG_GRKERNSEC
59756+extern rwlock_t grsec_exec_file_lock;
59757+#endif
59758+
59759 static void exit_mm(struct task_struct * tsk);
59760
59761 static void __unhash_process(struct task_struct *p)
59762@@ -174,6 +178,8 @@ void release_task(struct task_struct * p
59763 struct task_struct *leader;
59764 int zap_leader;
59765 repeat:
59766+ gr_del_task_from_ip_table(p);
59767+
59768 tracehook_prepare_release_task(p);
59769 /* don't need to get the RCU readlock here - the process is dead and
59770 * can't be modifying its own credentials */
59771@@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
59772 {
59773 write_lock_irq(&tasklist_lock);
59774
59775+#ifdef CONFIG_GRKERNSEC
59776+ write_lock(&grsec_exec_file_lock);
59777+ if (current->exec_file) {
59778+ fput(current->exec_file);
59779+ current->exec_file = NULL;
59780+ }
59781+ write_unlock(&grsec_exec_file_lock);
59782+#endif
59783+
59784 ptrace_unlink(current);
59785 /* Reparent to init */
59786 current->real_parent = current->parent = kthreadd_task;
59787 list_move_tail(&current->sibling, &current->real_parent->children);
59788
59789+ gr_set_kernel_label(current);
59790+
59791 /* Set the exit signal to SIGCHLD so we signal init on exit */
59792 current->exit_signal = SIGCHLD;
59793
59794@@ -397,7 +414,7 @@ int allow_signal(int sig)
59795 * know it'll be handled, so that they don't get converted to
59796 * SIGKILL or just silently dropped.
59797 */
59798- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
59799+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
59800 recalc_sigpending();
59801 spin_unlock_irq(&current->sighand->siglock);
59802 return 0;
59803@@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
59804 vsnprintf(current->comm, sizeof(current->comm), name, args);
59805 va_end(args);
59806
59807+#ifdef CONFIG_GRKERNSEC
59808+ write_lock(&grsec_exec_file_lock);
59809+ if (current->exec_file) {
59810+ fput(current->exec_file);
59811+ current->exec_file = NULL;
59812+ }
59813+ write_unlock(&grsec_exec_file_lock);
59814+#endif
59815+
59816+ gr_set_kernel_label(current);
59817+
59818 /*
59819 * If we were started as result of loading a module, close all of the
59820 * user space pages. We don't need them, and if we didn't close them
59821@@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
59822 struct task_struct *tsk = current;
59823 int group_dead;
59824
59825- profile_task_exit(tsk);
59826-
59827- WARN_ON(atomic_read(&tsk->fs_excl));
59828-
59829+ /*
59830+ * Check this first since set_fs() below depends on
59831+ * current_thread_info(), which we better not access when we're in
59832+ * interrupt context. Other than that, we want to do the set_fs()
59833+ * as early as possible.
59834+ */
59835 if (unlikely(in_interrupt()))
59836 panic("Aiee, killing interrupt handler!");
59837- if (unlikely(!tsk->pid))
59838- panic("Attempted to kill the idle task!");
59839
59840 /*
59841- * If do_exit is called because this processes oopsed, it's possible
59842+ * If do_exit is called because this processes Oops'ed, it's possible
59843 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
59844 * continuing. Amongst other possible reasons, this is to prevent
59845 * mm_release()->clear_child_tid() from writing to a user-controlled
59846@@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
59847 */
59848 set_fs(USER_DS);
59849
59850+ profile_task_exit(tsk);
59851+
59852+ WARN_ON(atomic_read(&tsk->fs_excl));
59853+
59854+ if (unlikely(!tsk->pid))
59855+ panic("Attempted to kill the idle task!");
59856+
59857 tracehook_report_exit(&code);
59858
59859 validate_creds_for_do_exit(tsk);
59860@@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
59861 tsk->exit_code = code;
59862 taskstats_exit(tsk, group_dead);
59863
59864+ gr_acl_handle_psacct(tsk, code);
59865+ gr_acl_handle_exit();
59866+
59867 exit_mm(tsk);
59868
59869 if (group_dead)
59870@@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
59871
59872 if (unlikely(wo->wo_flags & WNOWAIT)) {
59873 int exit_code = p->exit_code;
59874- int why, status;
59875+ int why;
59876
59877 get_task_struct(p);
59878 read_unlock(&tasklist_lock);
59879diff -urNp linux-2.6.32.43/kernel/fork.c linux-2.6.32.43/kernel/fork.c
59880--- linux-2.6.32.43/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
59881+++ linux-2.6.32.43/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
59882@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
59883 *stackend = STACK_END_MAGIC; /* for overflow detection */
59884
59885 #ifdef CONFIG_CC_STACKPROTECTOR
59886- tsk->stack_canary = get_random_int();
59887+ tsk->stack_canary = pax_get_random_long();
59888 #endif
59889
59890 /* One for us, one for whoever does the "release_task()" (usually parent) */
59891@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
59892 mm->locked_vm = 0;
59893 mm->mmap = NULL;
59894 mm->mmap_cache = NULL;
59895- mm->free_area_cache = oldmm->mmap_base;
59896- mm->cached_hole_size = ~0UL;
59897+ mm->free_area_cache = oldmm->free_area_cache;
59898+ mm->cached_hole_size = oldmm->cached_hole_size;
59899 mm->map_count = 0;
59900 cpumask_clear(mm_cpumask(mm));
59901 mm->mm_rb = RB_ROOT;
59902@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
59903 tmp->vm_flags &= ~VM_LOCKED;
59904 tmp->vm_mm = mm;
59905 tmp->vm_next = tmp->vm_prev = NULL;
59906+ tmp->vm_mirror = NULL;
59907 anon_vma_link(tmp);
59908 file = tmp->vm_file;
59909 if (file) {
59910@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
59911 if (retval)
59912 goto out;
59913 }
59914+
59915+#ifdef CONFIG_PAX_SEGMEXEC
59916+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
59917+ struct vm_area_struct *mpnt_m;
59918+
59919+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
59920+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
59921+
59922+ if (!mpnt->vm_mirror)
59923+ continue;
59924+
59925+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
59926+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
59927+ mpnt->vm_mirror = mpnt_m;
59928+ } else {
59929+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
59930+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
59931+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
59932+ mpnt->vm_mirror->vm_mirror = mpnt;
59933+ }
59934+ }
59935+ BUG_ON(mpnt_m);
59936+ }
59937+#endif
59938+
59939 /* a new mm has just been created */
59940 arch_dup_mmap(oldmm, mm);
59941 retval = 0;
59942@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
59943 write_unlock(&fs->lock);
59944 return -EAGAIN;
59945 }
59946- fs->users++;
59947+ atomic_inc(&fs->users);
59948 write_unlock(&fs->lock);
59949 return 0;
59950 }
59951 tsk->fs = copy_fs_struct(fs);
59952 if (!tsk->fs)
59953 return -ENOMEM;
59954+ gr_set_chroot_entries(tsk, &tsk->fs->root);
59955 return 0;
59956 }
59957
59958@@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
59959 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
59960 #endif
59961 retval = -EAGAIN;
59962+
59963+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
59964+
59965 if (atomic_read(&p->real_cred->user->processes) >=
59966 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
59967- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
59968- p->real_cred->user != INIT_USER)
59969+ if (p->real_cred->user != INIT_USER &&
59970+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
59971 goto bad_fork_free;
59972 }
59973
59974@@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
59975 goto bad_fork_free_pid;
59976 }
59977
59978+ gr_copy_label(p);
59979+
59980 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
59981 /*
59982 * Clear TID on mm_release()?
59983@@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
59984 bad_fork_free:
59985 free_task(p);
59986 fork_out:
59987+ gr_log_forkfail(retval);
59988+
59989 return ERR_PTR(retval);
59990 }
59991
59992@@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
59993 if (clone_flags & CLONE_PARENT_SETTID)
59994 put_user(nr, parent_tidptr);
59995
59996+ gr_handle_brute_check();
59997+
59998 if (clone_flags & CLONE_VFORK) {
59999 p->vfork_done = &vfork;
60000 init_completion(&vfork);
60001@@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
60002 return 0;
60003
60004 /* don't need lock here; in the worst case we'll do useless copy */
60005- if (fs->users == 1)
60006+ if (atomic_read(&fs->users) == 1)
60007 return 0;
60008
60009 *new_fsp = copy_fs_struct(fs);
60010@@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
60011 fs = current->fs;
60012 write_lock(&fs->lock);
60013 current->fs = new_fs;
60014- if (--fs->users)
60015+ gr_set_chroot_entries(current, &current->fs->root);
60016+ if (atomic_dec_return(&fs->users))
60017 new_fs = NULL;
60018 else
60019 new_fs = fs;
60020diff -urNp linux-2.6.32.43/kernel/futex.c linux-2.6.32.43/kernel/futex.c
60021--- linux-2.6.32.43/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
60022+++ linux-2.6.32.43/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
60023@@ -54,6 +54,7 @@
60024 #include <linux/mount.h>
60025 #include <linux/pagemap.h>
60026 #include <linux/syscalls.h>
60027+#include <linux/ptrace.h>
60028 #include <linux/signal.h>
60029 #include <linux/module.h>
60030 #include <linux/magic.h>
60031@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
60032 struct page *page;
60033 int err;
60034
60035+#ifdef CONFIG_PAX_SEGMEXEC
60036+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
60037+ return -EFAULT;
60038+#endif
60039+
60040 /*
60041 * The futex address must be "naturally" aligned.
60042 */
60043@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
60044 struct futex_q q;
60045 int ret;
60046
60047+ pax_track_stack();
60048+
60049 if (!bitset)
60050 return -EINVAL;
60051
60052@@ -1841,7 +1849,7 @@ retry:
60053
60054 restart = &current_thread_info()->restart_block;
60055 restart->fn = futex_wait_restart;
60056- restart->futex.uaddr = (u32 *)uaddr;
60057+ restart->futex.uaddr = uaddr;
60058 restart->futex.val = val;
60059 restart->futex.time = abs_time->tv64;
60060 restart->futex.bitset = bitset;
60061@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
60062 struct futex_q q;
60063 int res, ret;
60064
60065+ pax_track_stack();
60066+
60067 if (!bitset)
60068 return -EINVAL;
60069
60070@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60071 {
60072 struct robust_list_head __user *head;
60073 unsigned long ret;
60074+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60075 const struct cred *cred = current_cred(), *pcred;
60076+#endif
60077
60078 if (!futex_cmpxchg_enabled)
60079 return -ENOSYS;
60080@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
60081 if (!p)
60082 goto err_unlock;
60083 ret = -EPERM;
60084+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60085+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
60086+ goto err_unlock;
60087+#else
60088 pcred = __task_cred(p);
60089 if (cred->euid != pcred->euid &&
60090 cred->euid != pcred->uid &&
60091 !capable(CAP_SYS_PTRACE))
60092 goto err_unlock;
60093+#endif
60094 head = p->robust_list;
60095 rcu_read_unlock();
60096 }
60097@@ -2459,7 +2476,7 @@ retry:
60098 */
60099 static inline int fetch_robust_entry(struct robust_list __user **entry,
60100 struct robust_list __user * __user *head,
60101- int *pi)
60102+ unsigned int *pi)
60103 {
60104 unsigned long uentry;
60105
60106@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
60107 {
60108 u32 curval;
60109 int i;
60110+ mm_segment_t oldfs;
60111
60112 /*
60113 * This will fail and we want it. Some arch implementations do
60114@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
60115 * implementation, the non functional ones will return
60116 * -ENOSYS.
60117 */
60118+ oldfs = get_fs();
60119+ set_fs(USER_DS);
60120 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
60121+ set_fs(oldfs);
60122 if (curval == -EFAULT)
60123 futex_cmpxchg_enabled = 1;
60124
60125diff -urNp linux-2.6.32.43/kernel/futex_compat.c linux-2.6.32.43/kernel/futex_compat.c
60126--- linux-2.6.32.43/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
60127+++ linux-2.6.32.43/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
60128@@ -10,6 +10,7 @@
60129 #include <linux/compat.h>
60130 #include <linux/nsproxy.h>
60131 #include <linux/futex.h>
60132+#include <linux/ptrace.h>
60133
60134 #include <asm/uaccess.h>
60135
60136@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
60137 {
60138 struct compat_robust_list_head __user *head;
60139 unsigned long ret;
60140- const struct cred *cred = current_cred(), *pcred;
60141+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
60142+ const struct cred *cred = current_cred();
60143+ const struct cred *pcred;
60144+#endif
60145
60146 if (!futex_cmpxchg_enabled)
60147 return -ENOSYS;
60148@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
60149 if (!p)
60150 goto err_unlock;
60151 ret = -EPERM;
60152+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
60153+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
60154+ goto err_unlock;
60155+#else
60156 pcred = __task_cred(p);
60157 if (cred->euid != pcred->euid &&
60158 cred->euid != pcred->uid &&
60159 !capable(CAP_SYS_PTRACE))
60160 goto err_unlock;
60161+#endif
60162 head = p->compat_robust_list;
60163 read_unlock(&tasklist_lock);
60164 }
60165diff -urNp linux-2.6.32.43/kernel/gcov/base.c linux-2.6.32.43/kernel/gcov/base.c
60166--- linux-2.6.32.43/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
60167+++ linux-2.6.32.43/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
60168@@ -102,11 +102,6 @@ void gcov_enable_events(void)
60169 }
60170
60171 #ifdef CONFIG_MODULES
60172-static inline int within(void *addr, void *start, unsigned long size)
60173-{
60174- return ((addr >= start) && (addr < start + size));
60175-}
60176-
60177 /* Update list and generate events when modules are unloaded. */
60178 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
60179 void *data)
60180@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
60181 prev = NULL;
60182 /* Remove entries located in module from linked list. */
60183 for (info = gcov_info_head; info; info = info->next) {
60184- if (within(info, mod->module_core, mod->core_size)) {
60185+ if (within_module_core_rw((unsigned long)info, mod)) {
60186 if (prev)
60187 prev->next = info->next;
60188 else
60189diff -urNp linux-2.6.32.43/kernel/hrtimer.c linux-2.6.32.43/kernel/hrtimer.c
60190--- linux-2.6.32.43/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
60191+++ linux-2.6.32.43/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
60192@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
60193 local_irq_restore(flags);
60194 }
60195
60196-static void run_hrtimer_softirq(struct softirq_action *h)
60197+static void run_hrtimer_softirq(void)
60198 {
60199 hrtimer_peek_ahead_timers();
60200 }
60201diff -urNp linux-2.6.32.43/kernel/kallsyms.c linux-2.6.32.43/kernel/kallsyms.c
60202--- linux-2.6.32.43/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
60203+++ linux-2.6.32.43/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
60204@@ -11,6 +11,9 @@
60205 * Changed the compression method from stem compression to "table lookup"
60206 * compression (see scripts/kallsyms.c for a more complete description)
60207 */
60208+#ifdef CONFIG_GRKERNSEC_HIDESYM
60209+#define __INCLUDED_BY_HIDESYM 1
60210+#endif
60211 #include <linux/kallsyms.h>
60212 #include <linux/module.h>
60213 #include <linux/init.h>
60214@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
60215
60216 static inline int is_kernel_inittext(unsigned long addr)
60217 {
60218+ if (system_state != SYSTEM_BOOTING)
60219+ return 0;
60220+
60221 if (addr >= (unsigned long)_sinittext
60222 && addr <= (unsigned long)_einittext)
60223 return 1;
60224 return 0;
60225 }
60226
60227+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60228+#ifdef CONFIG_MODULES
60229+static inline int is_module_text(unsigned long addr)
60230+{
60231+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
60232+ return 1;
60233+
60234+ addr = ktla_ktva(addr);
60235+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
60236+}
60237+#else
60238+static inline int is_module_text(unsigned long addr)
60239+{
60240+ return 0;
60241+}
60242+#endif
60243+#endif
60244+
60245 static inline int is_kernel_text(unsigned long addr)
60246 {
60247 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
60248@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
60249
60250 static inline int is_kernel(unsigned long addr)
60251 {
60252+
60253+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60254+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
60255+ return 1;
60256+
60257+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
60258+#else
60259 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
60260+#endif
60261+
60262 return 1;
60263 return in_gate_area_no_task(addr);
60264 }
60265
60266 static int is_ksym_addr(unsigned long addr)
60267 {
60268+
60269+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
60270+ if (is_module_text(addr))
60271+ return 0;
60272+#endif
60273+
60274 if (all_var)
60275 return is_kernel(addr);
60276
60277@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
60278
60279 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
60280 {
60281- iter->name[0] = '\0';
60282 iter->nameoff = get_symbol_offset(new_pos);
60283 iter->pos = new_pos;
60284 }
60285@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
60286 {
60287 struct kallsym_iter *iter = m->private;
60288
60289+#ifdef CONFIG_GRKERNSEC_HIDESYM
60290+ if (current_uid())
60291+ return 0;
60292+#endif
60293+
60294 /* Some debugging symbols have no name. Ignore them. */
60295 if (!iter->name[0])
60296 return 0;
60297@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
60298 struct kallsym_iter *iter;
60299 int ret;
60300
60301- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
60302+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
60303 if (!iter)
60304 return -ENOMEM;
60305 reset_iter(iter, 0);
60306diff -urNp linux-2.6.32.43/kernel/kgdb.c linux-2.6.32.43/kernel/kgdb.c
60307--- linux-2.6.32.43/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
60308+++ linux-2.6.32.43/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
60309@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
60310 /* Guard for recursive entry */
60311 static int exception_level;
60312
60313-static struct kgdb_io *kgdb_io_ops;
60314+static const struct kgdb_io *kgdb_io_ops;
60315 static DEFINE_SPINLOCK(kgdb_registration_lock);
60316
60317 /* kgdb console driver is loaded */
60318@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
60319 */
60320 static atomic_t passive_cpu_wait[NR_CPUS];
60321 static atomic_t cpu_in_kgdb[NR_CPUS];
60322-atomic_t kgdb_setting_breakpoint;
60323+atomic_unchecked_t kgdb_setting_breakpoint;
60324
60325 struct task_struct *kgdb_usethread;
60326 struct task_struct *kgdb_contthread;
60327@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
60328 sizeof(unsigned long)];
60329
60330 /* to keep track of the CPU which is doing the single stepping*/
60331-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60332+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
60333
60334 /*
60335 * If you are debugging a problem where roundup (the collection of
60336@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
60337 return 0;
60338 if (kgdb_connected)
60339 return 1;
60340- if (atomic_read(&kgdb_setting_breakpoint))
60341+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
60342 return 1;
60343 if (print_wait)
60344 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
60345@@ -1426,8 +1426,8 @@ acquirelock:
60346 * instance of the exception handler wanted to come into the
60347 * debugger on a different CPU via a single step
60348 */
60349- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
60350- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
60351+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
60352+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
60353
60354 atomic_set(&kgdb_active, -1);
60355 touch_softlockup_watchdog();
60356@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
60357 *
60358 * Register it with the KGDB core.
60359 */
60360-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
60361+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
60362 {
60363 int err;
60364
60365@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
60366 *
60367 * Unregister it with the KGDB core.
60368 */
60369-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
60370+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
60371 {
60372 BUG_ON(kgdb_connected);
60373
60374@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
60375 */
60376 void kgdb_breakpoint(void)
60377 {
60378- atomic_set(&kgdb_setting_breakpoint, 1);
60379+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
60380 wmb(); /* Sync point before breakpoint */
60381 arch_kgdb_breakpoint();
60382 wmb(); /* Sync point after breakpoint */
60383- atomic_set(&kgdb_setting_breakpoint, 0);
60384+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
60385 }
60386 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
60387
60388diff -urNp linux-2.6.32.43/kernel/kmod.c linux-2.6.32.43/kernel/kmod.c
60389--- linux-2.6.32.43/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
60390+++ linux-2.6.32.43/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
60391@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
60392 * If module auto-loading support is disabled then this function
60393 * becomes a no-operation.
60394 */
60395-int __request_module(bool wait, const char *fmt, ...)
60396+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
60397 {
60398- va_list args;
60399 char module_name[MODULE_NAME_LEN];
60400 unsigned int max_modprobes;
60401 int ret;
60402- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
60403+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
60404 static char *envp[] = { "HOME=/",
60405 "TERM=linux",
60406 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
60407@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
60408 if (ret)
60409 return ret;
60410
60411- va_start(args, fmt);
60412- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
60413- va_end(args);
60414+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
60415 if (ret >= MODULE_NAME_LEN)
60416 return -ENAMETOOLONG;
60417
60418+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60419+ if (!current_uid()) {
60420+ /* hack to workaround consolekit/udisks stupidity */
60421+ read_lock(&tasklist_lock);
60422+ if (!strcmp(current->comm, "mount") &&
60423+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
60424+ read_unlock(&tasklist_lock);
60425+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
60426+ return -EPERM;
60427+ }
60428+ read_unlock(&tasklist_lock);
60429+ }
60430+#endif
60431+
60432 /* If modprobe needs a service that is in a module, we get a recursive
60433 * loop. Limit the number of running kmod threads to max_threads/2 or
60434 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
60435@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
60436 atomic_dec(&kmod_concurrent);
60437 return ret;
60438 }
60439+
60440+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
60441+{
60442+ va_list args;
60443+ int ret;
60444+
60445+ va_start(args, fmt);
60446+ ret = ____request_module(wait, module_param, fmt, args);
60447+ va_end(args);
60448+
60449+ return ret;
60450+}
60451+
60452+int __request_module(bool wait, const char *fmt, ...)
60453+{
60454+ va_list args;
60455+ int ret;
60456+
60457+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60458+ if (current_uid()) {
60459+ char module_param[MODULE_NAME_LEN];
60460+
60461+ memset(module_param, 0, sizeof(module_param));
60462+
60463+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
60464+
60465+ va_start(args, fmt);
60466+ ret = ____request_module(wait, module_param, fmt, args);
60467+ va_end(args);
60468+
60469+ return ret;
60470+ }
60471+#endif
60472+
60473+ va_start(args, fmt);
60474+ ret = ____request_module(wait, NULL, fmt, args);
60475+ va_end(args);
60476+
60477+ return ret;
60478+}
60479+
60480+
60481 EXPORT_SYMBOL(__request_module);
60482 #endif /* CONFIG_MODULES */
60483
60484diff -urNp linux-2.6.32.43/kernel/kprobes.c linux-2.6.32.43/kernel/kprobes.c
60485--- linux-2.6.32.43/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
60486+++ linux-2.6.32.43/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
60487@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
60488 * kernel image and loaded module images reside. This is required
60489 * so x86_64 can correctly handle the %rip-relative fixups.
60490 */
60491- kip->insns = module_alloc(PAGE_SIZE);
60492+ kip->insns = module_alloc_exec(PAGE_SIZE);
60493 if (!kip->insns) {
60494 kfree(kip);
60495 return NULL;
60496@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
60497 */
60498 if (!list_is_singular(&kprobe_insn_pages)) {
60499 list_del(&kip->list);
60500- module_free(NULL, kip->insns);
60501+ module_free_exec(NULL, kip->insns);
60502 kfree(kip);
60503 }
60504 return 1;
60505@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
60506 {
60507 int i, err = 0;
60508 unsigned long offset = 0, size = 0;
60509- char *modname, namebuf[128];
60510+ char *modname, namebuf[KSYM_NAME_LEN];
60511 const char *symbol_name;
60512 void *addr;
60513 struct kprobe_blackpoint *kb;
60514@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
60515 const char *sym = NULL;
60516 unsigned int i = *(loff_t *) v;
60517 unsigned long offset = 0;
60518- char *modname, namebuf[128];
60519+ char *modname, namebuf[KSYM_NAME_LEN];
60520
60521 head = &kprobe_table[i];
60522 preempt_disable();
60523diff -urNp linux-2.6.32.43/kernel/lockdep.c linux-2.6.32.43/kernel/lockdep.c
60524--- linux-2.6.32.43/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
60525+++ linux-2.6.32.43/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
60526@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
60527 /*
60528 * Various lockdep statistics:
60529 */
60530-atomic_t chain_lookup_hits;
60531-atomic_t chain_lookup_misses;
60532-atomic_t hardirqs_on_events;
60533-atomic_t hardirqs_off_events;
60534-atomic_t redundant_hardirqs_on;
60535-atomic_t redundant_hardirqs_off;
60536-atomic_t softirqs_on_events;
60537-atomic_t softirqs_off_events;
60538-atomic_t redundant_softirqs_on;
60539-atomic_t redundant_softirqs_off;
60540-atomic_t nr_unused_locks;
60541-atomic_t nr_cyclic_checks;
60542-atomic_t nr_find_usage_forwards_checks;
60543-atomic_t nr_find_usage_backwards_checks;
60544+atomic_unchecked_t chain_lookup_hits;
60545+atomic_unchecked_t chain_lookup_misses;
60546+atomic_unchecked_t hardirqs_on_events;
60547+atomic_unchecked_t hardirqs_off_events;
60548+atomic_unchecked_t redundant_hardirqs_on;
60549+atomic_unchecked_t redundant_hardirqs_off;
60550+atomic_unchecked_t softirqs_on_events;
60551+atomic_unchecked_t softirqs_off_events;
60552+atomic_unchecked_t redundant_softirqs_on;
60553+atomic_unchecked_t redundant_softirqs_off;
60554+atomic_unchecked_t nr_unused_locks;
60555+atomic_unchecked_t nr_cyclic_checks;
60556+atomic_unchecked_t nr_find_usage_forwards_checks;
60557+atomic_unchecked_t nr_find_usage_backwards_checks;
60558 #endif
60559
60560 /*
60561@@ -577,6 +577,10 @@ static int static_obj(void *obj)
60562 int i;
60563 #endif
60564
60565+#ifdef CONFIG_PAX_KERNEXEC
60566+ start = ktla_ktva(start);
60567+#endif
60568+
60569 /*
60570 * static variable?
60571 */
60572@@ -592,8 +596,7 @@ static int static_obj(void *obj)
60573 */
60574 for_each_possible_cpu(i) {
60575 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
60576- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
60577- + per_cpu_offset(i);
60578+ end = start + PERCPU_ENOUGH_ROOM;
60579
60580 if ((addr >= start) && (addr < end))
60581 return 1;
60582@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
60583 if (!static_obj(lock->key)) {
60584 debug_locks_off();
60585 printk("INFO: trying to register non-static key.\n");
60586+ printk("lock:%pS key:%pS.\n", lock, lock->key);
60587 printk("the code is fine but needs lockdep annotation.\n");
60588 printk("turning off the locking correctness validator.\n");
60589 dump_stack();
60590@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
60591 if (!class)
60592 return 0;
60593 }
60594- debug_atomic_inc((atomic_t *)&class->ops);
60595+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
60596 if (very_verbose(class)) {
60597 printk("\nacquire class [%p] %s", class->key, class->name);
60598 if (class->name_version > 1)
60599diff -urNp linux-2.6.32.43/kernel/lockdep_internals.h linux-2.6.32.43/kernel/lockdep_internals.h
60600--- linux-2.6.32.43/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
60601+++ linux-2.6.32.43/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
60602@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
60603 /*
60604 * Various lockdep statistics:
60605 */
60606-extern atomic_t chain_lookup_hits;
60607-extern atomic_t chain_lookup_misses;
60608-extern atomic_t hardirqs_on_events;
60609-extern atomic_t hardirqs_off_events;
60610-extern atomic_t redundant_hardirqs_on;
60611-extern atomic_t redundant_hardirqs_off;
60612-extern atomic_t softirqs_on_events;
60613-extern atomic_t softirqs_off_events;
60614-extern atomic_t redundant_softirqs_on;
60615-extern atomic_t redundant_softirqs_off;
60616-extern atomic_t nr_unused_locks;
60617-extern atomic_t nr_cyclic_checks;
60618-extern atomic_t nr_cyclic_check_recursions;
60619-extern atomic_t nr_find_usage_forwards_checks;
60620-extern atomic_t nr_find_usage_forwards_recursions;
60621-extern atomic_t nr_find_usage_backwards_checks;
60622-extern atomic_t nr_find_usage_backwards_recursions;
60623-# define debug_atomic_inc(ptr) atomic_inc(ptr)
60624-# define debug_atomic_dec(ptr) atomic_dec(ptr)
60625-# define debug_atomic_read(ptr) atomic_read(ptr)
60626+extern atomic_unchecked_t chain_lookup_hits;
60627+extern atomic_unchecked_t chain_lookup_misses;
60628+extern atomic_unchecked_t hardirqs_on_events;
60629+extern atomic_unchecked_t hardirqs_off_events;
60630+extern atomic_unchecked_t redundant_hardirqs_on;
60631+extern atomic_unchecked_t redundant_hardirqs_off;
60632+extern atomic_unchecked_t softirqs_on_events;
60633+extern atomic_unchecked_t softirqs_off_events;
60634+extern atomic_unchecked_t redundant_softirqs_on;
60635+extern atomic_unchecked_t redundant_softirqs_off;
60636+extern atomic_unchecked_t nr_unused_locks;
60637+extern atomic_unchecked_t nr_cyclic_checks;
60638+extern atomic_unchecked_t nr_cyclic_check_recursions;
60639+extern atomic_unchecked_t nr_find_usage_forwards_checks;
60640+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
60641+extern atomic_unchecked_t nr_find_usage_backwards_checks;
60642+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
60643+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
60644+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
60645+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
60646 #else
60647 # define debug_atomic_inc(ptr) do { } while (0)
60648 # define debug_atomic_dec(ptr) do { } while (0)
60649diff -urNp linux-2.6.32.43/kernel/lockdep_proc.c linux-2.6.32.43/kernel/lockdep_proc.c
60650--- linux-2.6.32.43/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
60651+++ linux-2.6.32.43/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
60652@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
60653
60654 static void print_name(struct seq_file *m, struct lock_class *class)
60655 {
60656- char str[128];
60657+ char str[KSYM_NAME_LEN];
60658 const char *name = class->name;
60659
60660 if (!name) {
60661diff -urNp linux-2.6.32.43/kernel/module.c linux-2.6.32.43/kernel/module.c
60662--- linux-2.6.32.43/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
60663+++ linux-2.6.32.43/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
60664@@ -55,6 +55,7 @@
60665 #include <linux/async.h>
60666 #include <linux/percpu.h>
60667 #include <linux/kmemleak.h>
60668+#include <linux/grsecurity.h>
60669
60670 #define CREATE_TRACE_POINTS
60671 #include <trace/events/module.h>
60672@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
60673 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
60674
60675 /* Bounds of module allocation, for speeding __module_address */
60676-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
60677+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
60678+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
60679
60680 int register_module_notifier(struct notifier_block * nb)
60681 {
60682@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
60683 return true;
60684
60685 list_for_each_entry_rcu(mod, &modules, list) {
60686- struct symsearch arr[] = {
60687+ struct symsearch modarr[] = {
60688 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
60689 NOT_GPL_ONLY, false },
60690 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
60691@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
60692 #endif
60693 };
60694
60695- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
60696+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
60697 return true;
60698 }
60699 return false;
60700@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
60701 void *ptr;
60702 int cpu;
60703
60704- if (align > PAGE_SIZE) {
60705+ if (align-1 >= PAGE_SIZE) {
60706 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
60707 name, align, PAGE_SIZE);
60708 align = PAGE_SIZE;
60709@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
60710 * /sys/module/foo/sections stuff
60711 * J. Corbet <corbet@lwn.net>
60712 */
60713-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
60714+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
60715
60716 static inline bool sect_empty(const Elf_Shdr *sect)
60717 {
60718@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
60719 destroy_params(mod->kp, mod->num_kp);
60720
60721 /* This may be NULL, but that's OK */
60722- module_free(mod, mod->module_init);
60723+ module_free(mod, mod->module_init_rw);
60724+ module_free_exec(mod, mod->module_init_rx);
60725 kfree(mod->args);
60726 if (mod->percpu)
60727 percpu_modfree(mod->percpu);
60728@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
60729 percpu_modfree(mod->refptr);
60730 #endif
60731 /* Free lock-classes: */
60732- lockdep_free_key_range(mod->module_core, mod->core_size);
60733+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
60734+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
60735
60736 /* Finally, free the core (containing the module structure) */
60737- module_free(mod, mod->module_core);
60738+ module_free_exec(mod, mod->module_core_rx);
60739+ module_free(mod, mod->module_core_rw);
60740
60741 #ifdef CONFIG_MPU
60742 update_protections(current->mm);
60743@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
60744 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60745 int ret = 0;
60746 const struct kernel_symbol *ksym;
60747+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60748+ int is_fs_load = 0;
60749+ int register_filesystem_found = 0;
60750+ char *p;
60751+
60752+ p = strstr(mod->args, "grsec_modharden_fs");
60753+
60754+ if (p) {
60755+ char *endptr = p + strlen("grsec_modharden_fs");
60756+ /* copy \0 as well */
60757+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
60758+ is_fs_load = 1;
60759+ }
60760+#endif
60761+
60762
60763 for (i = 1; i < n; i++) {
60764+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60765+ const char *name = strtab + sym[i].st_name;
60766+
60767+ /* it's a real shame this will never get ripped and copied
60768+ upstream! ;(
60769+ */
60770+ if (is_fs_load && !strcmp(name, "register_filesystem"))
60771+ register_filesystem_found = 1;
60772+#endif
60773 switch (sym[i].st_shndx) {
60774 case SHN_COMMON:
60775 /* We compiled with -fno-common. These are not
60776@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
60777 strtab + sym[i].st_name, mod);
60778 /* Ok if resolved. */
60779 if (ksym) {
60780+ pax_open_kernel();
60781 sym[i].st_value = ksym->value;
60782+ pax_close_kernel();
60783 break;
60784 }
60785
60786@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
60787 secbase = (unsigned long)mod->percpu;
60788 else
60789 secbase = sechdrs[sym[i].st_shndx].sh_addr;
60790+ pax_open_kernel();
60791 sym[i].st_value += secbase;
60792+ pax_close_kernel();
60793 break;
60794 }
60795 }
60796
60797+#ifdef CONFIG_GRKERNSEC_MODHARDEN
60798+ if (is_fs_load && !register_filesystem_found) {
60799+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
60800+ ret = -EPERM;
60801+ }
60802+#endif
60803+
60804 return ret;
60805 }
60806
60807@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
60808 || s->sh_entsize != ~0UL
60809 || strstarts(secstrings + s->sh_name, ".init"))
60810 continue;
60811- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
60812+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60813+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
60814+ else
60815+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
60816 DEBUGP("\t%s\n", secstrings + s->sh_name);
60817 }
60818- if (m == 0)
60819- mod->core_text_size = mod->core_size;
60820 }
60821
60822 DEBUGP("Init section allocation order:\n");
60823@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
60824 || s->sh_entsize != ~0UL
60825 || !strstarts(secstrings + s->sh_name, ".init"))
60826 continue;
60827- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
60828- | INIT_OFFSET_MASK);
60829+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
60830+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
60831+ else
60832+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
60833+ s->sh_entsize |= INIT_OFFSET_MASK;
60834 DEBUGP("\t%s\n", secstrings + s->sh_name);
60835 }
60836- if (m == 0)
60837- mod->init_text_size = mod->init_size;
60838 }
60839 }
60840
60841@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
60842
60843 /* As per nm */
60844 static char elf_type(const Elf_Sym *sym,
60845- Elf_Shdr *sechdrs,
60846- const char *secstrings,
60847- struct module *mod)
60848+ const Elf_Shdr *sechdrs,
60849+ const char *secstrings)
60850 {
60851 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
60852 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
60853@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
60854
60855 /* Put symbol section at end of init part of module. */
60856 symsect->sh_flags |= SHF_ALLOC;
60857- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
60858+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
60859 symindex) | INIT_OFFSET_MASK;
60860 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
60861
60862@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
60863 }
60864
60865 /* Append room for core symbols at end of core part. */
60866- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
60867- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
60868+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
60869+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
60870
60871 /* Put string table section at end of init part of module. */
60872 strsect->sh_flags |= SHF_ALLOC;
60873- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
60874+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
60875 strindex) | INIT_OFFSET_MASK;
60876 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
60877
60878 /* Append room for core symbols' strings at end of core part. */
60879- *pstroffs = mod->core_size;
60880+ *pstroffs = mod->core_size_rx;
60881 __set_bit(0, strmap);
60882- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
60883+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
60884
60885 return symoffs;
60886 }
60887@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
60888 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
60889 mod->strtab = (void *)sechdrs[strindex].sh_addr;
60890
60891+ pax_open_kernel();
60892+
60893 /* Set types up while we still have access to sections. */
60894 for (i = 0; i < mod->num_symtab; i++)
60895 mod->symtab[i].st_info
60896- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
60897+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
60898
60899- mod->core_symtab = dst = mod->module_core + symoffs;
60900+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
60901 src = mod->symtab;
60902 *dst = *src;
60903 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
60904@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
60905 }
60906 mod->core_num_syms = ndst;
60907
60908- mod->core_strtab = s = mod->module_core + stroffs;
60909+ mod->core_strtab = s = mod->module_core_rx + stroffs;
60910 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
60911 if (test_bit(i, strmap))
60912 *++s = mod->strtab[i];
60913+
60914+ pax_close_kernel();
60915 }
60916 #else
60917 static inline unsigned long layout_symtab(struct module *mod,
60918@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
60919 #endif
60920 }
60921
60922-static void *module_alloc_update_bounds(unsigned long size)
60923+static void *module_alloc_update_bounds_rw(unsigned long size)
60924 {
60925 void *ret = module_alloc(size);
60926
60927 if (ret) {
60928 /* Update module bounds. */
60929- if ((unsigned long)ret < module_addr_min)
60930- module_addr_min = (unsigned long)ret;
60931- if ((unsigned long)ret + size > module_addr_max)
60932- module_addr_max = (unsigned long)ret + size;
60933+ if ((unsigned long)ret < module_addr_min_rw)
60934+ module_addr_min_rw = (unsigned long)ret;
60935+ if ((unsigned long)ret + size > module_addr_max_rw)
60936+ module_addr_max_rw = (unsigned long)ret + size;
60937+ }
60938+ return ret;
60939+}
60940+
60941+static void *module_alloc_update_bounds_rx(unsigned long size)
60942+{
60943+ void *ret = module_alloc_exec(size);
60944+
60945+ if (ret) {
60946+ /* Update module bounds. */
60947+ if ((unsigned long)ret < module_addr_min_rx)
60948+ module_addr_min_rx = (unsigned long)ret;
60949+ if ((unsigned long)ret + size > module_addr_max_rx)
60950+ module_addr_max_rx = (unsigned long)ret + size;
60951 }
60952 return ret;
60953 }
60954@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
60955 unsigned int i;
60956
60957 /* only scan the sections containing data */
60958- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
60959- (unsigned long)mod->module_core,
60960+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
60961+ (unsigned long)mod->module_core_rw,
60962 sizeof(struct module), GFP_KERNEL);
60963
60964 for (i = 1; i < hdr->e_shnum; i++) {
60965@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
60966 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
60967 continue;
60968
60969- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
60970- (unsigned long)mod->module_core,
60971+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
60972+ (unsigned long)mod->module_core_rw,
60973 sechdrs[i].sh_size, GFP_KERNEL);
60974 }
60975 }
60976@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
60977 secstrings, &stroffs, strmap);
60978
60979 /* Do the allocs. */
60980- ptr = module_alloc_update_bounds(mod->core_size);
60981+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
60982 /*
60983 * The pointer to this block is stored in the module structure
60984 * which is inside the block. Just mark it as not being a
60985@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
60986 err = -ENOMEM;
60987 goto free_percpu;
60988 }
60989- memset(ptr, 0, mod->core_size);
60990- mod->module_core = ptr;
60991+ memset(ptr, 0, mod->core_size_rw);
60992+ mod->module_core_rw = ptr;
60993
60994- ptr = module_alloc_update_bounds(mod->init_size);
60995+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
60996 /*
60997 * The pointer to this block is stored in the module structure
60998 * which is inside the block. This block doesn't need to be
60999 * scanned as it contains data and code that will be freed
61000 * after the module is initialized.
61001 */
61002- kmemleak_ignore(ptr);
61003- if (!ptr && mod->init_size) {
61004+ kmemleak_not_leak(ptr);
61005+ if (!ptr && mod->init_size_rw) {
61006+ err = -ENOMEM;
61007+ goto free_core_rw;
61008+ }
61009+ memset(ptr, 0, mod->init_size_rw);
61010+ mod->module_init_rw = ptr;
61011+
61012+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
61013+ kmemleak_not_leak(ptr);
61014+ if (!ptr) {
61015 err = -ENOMEM;
61016- goto free_core;
61017+ goto free_init_rw;
61018 }
61019- memset(ptr, 0, mod->init_size);
61020- mod->module_init = ptr;
61021+
61022+ pax_open_kernel();
61023+ memset(ptr, 0, mod->core_size_rx);
61024+ pax_close_kernel();
61025+ mod->module_core_rx = ptr;
61026+
61027+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
61028+ kmemleak_not_leak(ptr);
61029+ if (!ptr && mod->init_size_rx) {
61030+ err = -ENOMEM;
61031+ goto free_core_rx;
61032+ }
61033+
61034+ pax_open_kernel();
61035+ memset(ptr, 0, mod->init_size_rx);
61036+ pax_close_kernel();
61037+ mod->module_init_rx = ptr;
61038
61039 /* Transfer each section which specifies SHF_ALLOC */
61040 DEBUGP("final section addresses:\n");
61041@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
61042 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
61043 continue;
61044
61045- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
61046- dest = mod->module_init
61047- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61048- else
61049- dest = mod->module_core + sechdrs[i].sh_entsize;
61050+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
61051+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
61052+ dest = mod->module_init_rw
61053+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61054+ else
61055+ dest = mod->module_init_rx
61056+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
61057+ } else {
61058+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
61059+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
61060+ else
61061+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
61062+ }
61063+
61064+ if (sechdrs[i].sh_type != SHT_NOBITS) {
61065
61066- if (sechdrs[i].sh_type != SHT_NOBITS)
61067- memcpy(dest, (void *)sechdrs[i].sh_addr,
61068- sechdrs[i].sh_size);
61069+#ifdef CONFIG_PAX_KERNEXEC
61070+#ifdef CONFIG_X86_64
61071+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
61072+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
61073+#endif
61074+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
61075+ pax_open_kernel();
61076+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
61077+ pax_close_kernel();
61078+ } else
61079+#endif
61080+
61081+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
61082+ }
61083 /* Update sh_addr to point to copy in image. */
61084- sechdrs[i].sh_addr = (unsigned long)dest;
61085+
61086+#ifdef CONFIG_PAX_KERNEXEC
61087+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
61088+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
61089+ else
61090+#endif
61091+
61092+ sechdrs[i].sh_addr = (unsigned long)dest;
61093 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
61094 }
61095 /* Module has been moved. */
61096@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
61097 mod->name);
61098 if (!mod->refptr) {
61099 err = -ENOMEM;
61100- goto free_init;
61101+ goto free_init_rx;
61102 }
61103 #endif
61104 /* Now we've moved module, initialize linked lists, etc. */
61105@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
61106 /* Set up MODINFO_ATTR fields */
61107 setup_modinfo(mod, sechdrs, infoindex);
61108
61109+ mod->args = args;
61110+
61111+#ifdef CONFIG_GRKERNSEC_MODHARDEN
61112+ {
61113+ char *p, *p2;
61114+
61115+ if (strstr(mod->args, "grsec_modharden_netdev")) {
61116+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
61117+ err = -EPERM;
61118+ goto cleanup;
61119+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
61120+ p += strlen("grsec_modharden_normal");
61121+ p2 = strstr(p, "_");
61122+ if (p2) {
61123+ *p2 = '\0';
61124+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
61125+ *p2 = '_';
61126+ }
61127+ err = -EPERM;
61128+ goto cleanup;
61129+ }
61130+ }
61131+#endif
61132+
61133+
61134 /* Fix up syms, so that st_value is a pointer to location. */
61135 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
61136 mod);
61137@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
61138
61139 /* Now do relocations. */
61140 for (i = 1; i < hdr->e_shnum; i++) {
61141- const char *strtab = (char *)sechdrs[strindex].sh_addr;
61142 unsigned int info = sechdrs[i].sh_info;
61143+ strtab = (char *)sechdrs[strindex].sh_addr;
61144
61145 /* Not a valid relocation section? */
61146 if (info >= hdr->e_shnum)
61147@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
61148 * Do it before processing of module parameters, so the module
61149 * can provide parameter accessor functions of its own.
61150 */
61151- if (mod->module_init)
61152- flush_icache_range((unsigned long)mod->module_init,
61153- (unsigned long)mod->module_init
61154- + mod->init_size);
61155- flush_icache_range((unsigned long)mod->module_core,
61156- (unsigned long)mod->module_core + mod->core_size);
61157+ if (mod->module_init_rx)
61158+ flush_icache_range((unsigned long)mod->module_init_rx,
61159+ (unsigned long)mod->module_init_rx
61160+ + mod->init_size_rx);
61161+ flush_icache_range((unsigned long)mod->module_core_rx,
61162+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
61163
61164 set_fs(old_fs);
61165
61166- mod->args = args;
61167 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
61168 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
61169 mod->name);
61170@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
61171 free_unload:
61172 module_unload_free(mod);
61173 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
61174+ free_init_rx:
61175 percpu_modfree(mod->refptr);
61176- free_init:
61177 #endif
61178- module_free(mod, mod->module_init);
61179- free_core:
61180- module_free(mod, mod->module_core);
61181+ module_free_exec(mod, mod->module_init_rx);
61182+ free_core_rx:
61183+ module_free_exec(mod, mod->module_core_rx);
61184+ free_init_rw:
61185+ module_free(mod, mod->module_init_rw);
61186+ free_core_rw:
61187+ module_free(mod, mod->module_core_rw);
61188 /* mod will be freed with core. Don't access it beyond this line! */
61189 free_percpu:
61190 if (percpu)
61191@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
61192 mod->symtab = mod->core_symtab;
61193 mod->strtab = mod->core_strtab;
61194 #endif
61195- module_free(mod, mod->module_init);
61196- mod->module_init = NULL;
61197- mod->init_size = 0;
61198- mod->init_text_size = 0;
61199+ module_free(mod, mod->module_init_rw);
61200+ module_free_exec(mod, mod->module_init_rx);
61201+ mod->module_init_rw = NULL;
61202+ mod->module_init_rx = NULL;
61203+ mod->init_size_rw = 0;
61204+ mod->init_size_rx = 0;
61205 mutex_unlock(&module_mutex);
61206
61207 return 0;
61208@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
61209 unsigned long nextval;
61210
61211 /* At worse, next value is at end of module */
61212- if (within_module_init(addr, mod))
61213- nextval = (unsigned long)mod->module_init+mod->init_text_size;
61214+ if (within_module_init_rx(addr, mod))
61215+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
61216+ else if (within_module_init_rw(addr, mod))
61217+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
61218+ else if (within_module_core_rx(addr, mod))
61219+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
61220+ else if (within_module_core_rw(addr, mod))
61221+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
61222 else
61223- nextval = (unsigned long)mod->module_core+mod->core_text_size;
61224+ return NULL;
61225
61226 /* Scan for closest preceeding symbol, and next symbol. (ELF
61227 starts real symbols at 1). */
61228@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
61229 char buf[8];
61230
61231 seq_printf(m, "%s %u",
61232- mod->name, mod->init_size + mod->core_size);
61233+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
61234 print_unload_info(m, mod);
61235
61236 /* Informative for users. */
61237@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
61238 mod->state == MODULE_STATE_COMING ? "Loading":
61239 "Live");
61240 /* Used by oprofile and other similar tools. */
61241- seq_printf(m, " 0x%p", mod->module_core);
61242+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
61243
61244 /* Taints info */
61245 if (mod->taints)
61246@@ -2981,7 +3128,17 @@ static const struct file_operations proc
61247
61248 static int __init proc_modules_init(void)
61249 {
61250+#ifndef CONFIG_GRKERNSEC_HIDESYM
61251+#ifdef CONFIG_GRKERNSEC_PROC_USER
61252+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61253+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
61254+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
61255+#else
61256 proc_create("modules", 0, NULL, &proc_modules_operations);
61257+#endif
61258+#else
61259+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
61260+#endif
61261 return 0;
61262 }
61263 module_init(proc_modules_init);
61264@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
61265 {
61266 struct module *mod;
61267
61268- if (addr < module_addr_min || addr > module_addr_max)
61269+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
61270+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
61271 return NULL;
61272
61273 list_for_each_entry_rcu(mod, &modules, list)
61274- if (within_module_core(addr, mod)
61275- || within_module_init(addr, mod))
61276+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
61277 return mod;
61278 return NULL;
61279 }
61280@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
61281 */
61282 struct module *__module_text_address(unsigned long addr)
61283 {
61284- struct module *mod = __module_address(addr);
61285+ struct module *mod;
61286+
61287+#ifdef CONFIG_X86_32
61288+ addr = ktla_ktva(addr);
61289+#endif
61290+
61291+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
61292+ return NULL;
61293+
61294+ mod = __module_address(addr);
61295+
61296 if (mod) {
61297 /* Make sure it's within the text section. */
61298- if (!within(addr, mod->module_init, mod->init_text_size)
61299- && !within(addr, mod->module_core, mod->core_text_size))
61300+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
61301 mod = NULL;
61302 }
61303 return mod;
61304diff -urNp linux-2.6.32.43/kernel/mutex.c linux-2.6.32.43/kernel/mutex.c
61305--- linux-2.6.32.43/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
61306+++ linux-2.6.32.43/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
61307@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
61308 */
61309
61310 for (;;) {
61311- struct thread_info *owner;
61312+ struct task_struct *owner;
61313
61314 /*
61315 * If we own the BKL, then don't spin. The owner of
61316@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
61317 spin_lock_mutex(&lock->wait_lock, flags);
61318
61319 debug_mutex_lock_common(lock, &waiter);
61320- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
61321+ debug_mutex_add_waiter(lock, &waiter, task);
61322
61323 /* add waiting tasks to the end of the waitqueue (FIFO): */
61324 list_add_tail(&waiter.list, &lock->wait_list);
61325@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
61326 * TASK_UNINTERRUPTIBLE case.)
61327 */
61328 if (unlikely(signal_pending_state(state, task))) {
61329- mutex_remove_waiter(lock, &waiter,
61330- task_thread_info(task));
61331+ mutex_remove_waiter(lock, &waiter, task);
61332 mutex_release(&lock->dep_map, 1, ip);
61333 spin_unlock_mutex(&lock->wait_lock, flags);
61334
61335@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
61336 done:
61337 lock_acquired(&lock->dep_map, ip);
61338 /* got the lock - rejoice! */
61339- mutex_remove_waiter(lock, &waiter, current_thread_info());
61340+ mutex_remove_waiter(lock, &waiter, task);
61341 mutex_set_owner(lock);
61342
61343 /* set it to 0 if there are no waiters left: */
61344diff -urNp linux-2.6.32.43/kernel/mutex-debug.c linux-2.6.32.43/kernel/mutex-debug.c
61345--- linux-2.6.32.43/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
61346+++ linux-2.6.32.43/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
61347@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
61348 }
61349
61350 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61351- struct thread_info *ti)
61352+ struct task_struct *task)
61353 {
61354 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
61355
61356 /* Mark the current thread as blocked on the lock: */
61357- ti->task->blocked_on = waiter;
61358+ task->blocked_on = waiter;
61359 }
61360
61361 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61362- struct thread_info *ti)
61363+ struct task_struct *task)
61364 {
61365 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
61366- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
61367- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
61368- ti->task->blocked_on = NULL;
61369+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
61370+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
61371+ task->blocked_on = NULL;
61372
61373 list_del_init(&waiter->list);
61374 waiter->task = NULL;
61375@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
61376 return;
61377
61378 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
61379- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
61380+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
61381 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
61382 mutex_clear_owner(lock);
61383 }
61384diff -urNp linux-2.6.32.43/kernel/mutex-debug.h linux-2.6.32.43/kernel/mutex-debug.h
61385--- linux-2.6.32.43/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
61386+++ linux-2.6.32.43/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
61387@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
61388 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
61389 extern void debug_mutex_add_waiter(struct mutex *lock,
61390 struct mutex_waiter *waiter,
61391- struct thread_info *ti);
61392+ struct task_struct *task);
61393 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
61394- struct thread_info *ti);
61395+ struct task_struct *task);
61396 extern void debug_mutex_unlock(struct mutex *lock);
61397 extern void debug_mutex_init(struct mutex *lock, const char *name,
61398 struct lock_class_key *key);
61399
61400 static inline void mutex_set_owner(struct mutex *lock)
61401 {
61402- lock->owner = current_thread_info();
61403+ lock->owner = current;
61404 }
61405
61406 static inline void mutex_clear_owner(struct mutex *lock)
61407diff -urNp linux-2.6.32.43/kernel/mutex.h linux-2.6.32.43/kernel/mutex.h
61408--- linux-2.6.32.43/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
61409+++ linux-2.6.32.43/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
61410@@ -19,7 +19,7 @@
61411 #ifdef CONFIG_SMP
61412 static inline void mutex_set_owner(struct mutex *lock)
61413 {
61414- lock->owner = current_thread_info();
61415+ lock->owner = current;
61416 }
61417
61418 static inline void mutex_clear_owner(struct mutex *lock)
61419diff -urNp linux-2.6.32.43/kernel/panic.c linux-2.6.32.43/kernel/panic.c
61420--- linux-2.6.32.43/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
61421+++ linux-2.6.32.43/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
61422@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
61423 const char *board;
61424
61425 printk(KERN_WARNING "------------[ cut here ]------------\n");
61426- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
61427+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
61428 board = dmi_get_system_info(DMI_PRODUCT_NAME);
61429 if (board)
61430 printk(KERN_WARNING "Hardware name: %s\n", board);
61431@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
61432 */
61433 void __stack_chk_fail(void)
61434 {
61435- panic("stack-protector: Kernel stack is corrupted in: %p\n",
61436+ dump_stack();
61437+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
61438 __builtin_return_address(0));
61439 }
61440 EXPORT_SYMBOL(__stack_chk_fail);
61441diff -urNp linux-2.6.32.43/kernel/params.c linux-2.6.32.43/kernel/params.c
61442--- linux-2.6.32.43/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
61443+++ linux-2.6.32.43/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
61444@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
61445 return ret;
61446 }
61447
61448-static struct sysfs_ops module_sysfs_ops = {
61449+static const struct sysfs_ops module_sysfs_ops = {
61450 .show = module_attr_show,
61451 .store = module_attr_store,
61452 };
61453@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
61454 return 0;
61455 }
61456
61457-static struct kset_uevent_ops module_uevent_ops = {
61458+static const struct kset_uevent_ops module_uevent_ops = {
61459 .filter = uevent_filter,
61460 };
61461
61462diff -urNp linux-2.6.32.43/kernel/perf_event.c linux-2.6.32.43/kernel/perf_event.c
61463--- linux-2.6.32.43/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
61464+++ linux-2.6.32.43/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
61465@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
61466 */
61467 int sysctl_perf_event_sample_rate __read_mostly = 100000;
61468
61469-static atomic64_t perf_event_id;
61470+static atomic64_unchecked_t perf_event_id;
61471
61472 /*
61473 * Lock for (sysadmin-configurable) event reservations:
61474@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
61475 * In order to keep per-task stats reliable we need to flip the event
61476 * values when we flip the contexts.
61477 */
61478- value = atomic64_read(&next_event->count);
61479- value = atomic64_xchg(&event->count, value);
61480- atomic64_set(&next_event->count, value);
61481+ value = atomic64_read_unchecked(&next_event->count);
61482+ value = atomic64_xchg_unchecked(&event->count, value);
61483+ atomic64_set_unchecked(&next_event->count, value);
61484
61485 swap(event->total_time_enabled, next_event->total_time_enabled);
61486 swap(event->total_time_running, next_event->total_time_running);
61487@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
61488 update_event_times(event);
61489 }
61490
61491- return atomic64_read(&event->count);
61492+ return atomic64_read_unchecked(&event->count);
61493 }
61494
61495 /*
61496@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
61497 values[n++] = 1 + leader->nr_siblings;
61498 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61499 values[n++] = leader->total_time_enabled +
61500- atomic64_read(&leader->child_total_time_enabled);
61501+ atomic64_read_unchecked(&leader->child_total_time_enabled);
61502 }
61503 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61504 values[n++] = leader->total_time_running +
61505- atomic64_read(&leader->child_total_time_running);
61506+ atomic64_read_unchecked(&leader->child_total_time_running);
61507 }
61508
61509 size = n * sizeof(u64);
61510@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
61511 values[n++] = perf_event_read_value(event);
61512 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61513 values[n++] = event->total_time_enabled +
61514- atomic64_read(&event->child_total_time_enabled);
61515+ atomic64_read_unchecked(&event->child_total_time_enabled);
61516 }
61517 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61518 values[n++] = event->total_time_running +
61519- atomic64_read(&event->child_total_time_running);
61520+ atomic64_read_unchecked(&event->child_total_time_running);
61521 }
61522 if (read_format & PERF_FORMAT_ID)
61523 values[n++] = primary_event_id(event);
61524@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
61525 static void perf_event_reset(struct perf_event *event)
61526 {
61527 (void)perf_event_read(event);
61528- atomic64_set(&event->count, 0);
61529+ atomic64_set_unchecked(&event->count, 0);
61530 perf_event_update_userpage(event);
61531 }
61532
61533@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
61534 ++userpg->lock;
61535 barrier();
61536 userpg->index = perf_event_index(event);
61537- userpg->offset = atomic64_read(&event->count);
61538+ userpg->offset = atomic64_read_unchecked(&event->count);
61539 if (event->state == PERF_EVENT_STATE_ACTIVE)
61540- userpg->offset -= atomic64_read(&event->hw.prev_count);
61541+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
61542
61543 userpg->time_enabled = event->total_time_enabled +
61544- atomic64_read(&event->child_total_time_enabled);
61545+ atomic64_read_unchecked(&event->child_total_time_enabled);
61546
61547 userpg->time_running = event->total_time_running +
61548- atomic64_read(&event->child_total_time_running);
61549+ atomic64_read_unchecked(&event->child_total_time_running);
61550
61551 barrier();
61552 ++userpg->lock;
61553@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
61554 u64 values[4];
61555 int n = 0;
61556
61557- values[n++] = atomic64_read(&event->count);
61558+ values[n++] = atomic64_read_unchecked(&event->count);
61559 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
61560 values[n++] = event->total_time_enabled +
61561- atomic64_read(&event->child_total_time_enabled);
61562+ atomic64_read_unchecked(&event->child_total_time_enabled);
61563 }
61564 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
61565 values[n++] = event->total_time_running +
61566- atomic64_read(&event->child_total_time_running);
61567+ atomic64_read_unchecked(&event->child_total_time_running);
61568 }
61569 if (read_format & PERF_FORMAT_ID)
61570 values[n++] = primary_event_id(event);
61571@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
61572 if (leader != event)
61573 leader->pmu->read(leader);
61574
61575- values[n++] = atomic64_read(&leader->count);
61576+ values[n++] = atomic64_read_unchecked(&leader->count);
61577 if (read_format & PERF_FORMAT_ID)
61578 values[n++] = primary_event_id(leader);
61579
61580@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
61581 if (sub != event)
61582 sub->pmu->read(sub);
61583
61584- values[n++] = atomic64_read(&sub->count);
61585+ values[n++] = atomic64_read_unchecked(&sub->count);
61586 if (read_format & PERF_FORMAT_ID)
61587 values[n++] = primary_event_id(sub);
61588
61589@@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
61590 {
61591 struct hw_perf_event *hwc = &event->hw;
61592
61593- atomic64_add(nr, &event->count);
61594+ atomic64_add_unchecked(nr, &event->count);
61595
61596 if (!hwc->sample_period)
61597 return;
61598@@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
61599 u64 now;
61600
61601 now = cpu_clock(cpu);
61602- prev = atomic64_read(&event->hw.prev_count);
61603- atomic64_set(&event->hw.prev_count, now);
61604- atomic64_add(now - prev, &event->count);
61605+ prev = atomic64_read_unchecked(&event->hw.prev_count);
61606+ atomic64_set_unchecked(&event->hw.prev_count, now);
61607+ atomic64_add_unchecked(now - prev, &event->count);
61608 }
61609
61610 static int cpu_clock_perf_event_enable(struct perf_event *event)
61611@@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
61612 struct hw_perf_event *hwc = &event->hw;
61613 int cpu = raw_smp_processor_id();
61614
61615- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
61616+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
61617 perf_swevent_start_hrtimer(event);
61618
61619 return 0;
61620@@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
61621 u64 prev;
61622 s64 delta;
61623
61624- prev = atomic64_xchg(&event->hw.prev_count, now);
61625+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
61626 delta = now - prev;
61627- atomic64_add(delta, &event->count);
61628+ atomic64_add_unchecked(delta, &event->count);
61629 }
61630
61631 static int task_clock_perf_event_enable(struct perf_event *event)
61632@@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
61633
61634 now = event->ctx->time;
61635
61636- atomic64_set(&hwc->prev_count, now);
61637+ atomic64_set_unchecked(&hwc->prev_count, now);
61638
61639 perf_swevent_start_hrtimer(event);
61640
61641@@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
61642 event->parent = parent_event;
61643
61644 event->ns = get_pid_ns(current->nsproxy->pid_ns);
61645- event->id = atomic64_inc_return(&perf_event_id);
61646+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
61647
61648 event->state = PERF_EVENT_STATE_INACTIVE;
61649
61650@@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
61651 if (child_event->attr.inherit_stat)
61652 perf_event_read_event(child_event, child);
61653
61654- child_val = atomic64_read(&child_event->count);
61655+ child_val = atomic64_read_unchecked(&child_event->count);
61656
61657 /*
61658 * Add back the child's count to the parent's count:
61659 */
61660- atomic64_add(child_val, &parent_event->count);
61661- atomic64_add(child_event->total_time_enabled,
61662+ atomic64_add_unchecked(child_val, &parent_event->count);
61663+ atomic64_add_unchecked(child_event->total_time_enabled,
61664 &parent_event->child_total_time_enabled);
61665- atomic64_add(child_event->total_time_running,
61666+ atomic64_add_unchecked(child_event->total_time_running,
61667 &parent_event->child_total_time_running);
61668
61669 /*
61670diff -urNp linux-2.6.32.43/kernel/pid.c linux-2.6.32.43/kernel/pid.c
61671--- linux-2.6.32.43/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
61672+++ linux-2.6.32.43/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
61673@@ -33,6 +33,7 @@
61674 #include <linux/rculist.h>
61675 #include <linux/bootmem.h>
61676 #include <linux/hash.h>
61677+#include <linux/security.h>
61678 #include <linux/pid_namespace.h>
61679 #include <linux/init_task.h>
61680 #include <linux/syscalls.h>
61681@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
61682
61683 int pid_max = PID_MAX_DEFAULT;
61684
61685-#define RESERVED_PIDS 300
61686+#define RESERVED_PIDS 500
61687
61688 int pid_max_min = RESERVED_PIDS + 1;
61689 int pid_max_max = PID_MAX_LIMIT;
61690@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
61691 */
61692 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
61693 {
61694- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61695+ struct task_struct *task;
61696+
61697+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
61698+
61699+ if (gr_pid_is_chrooted(task))
61700+ return NULL;
61701+
61702+ return task;
61703 }
61704
61705 struct task_struct *find_task_by_vpid(pid_t vnr)
61706@@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
61707 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
61708 }
61709
61710+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
61711+{
61712+ struct task_struct *task;
61713+
61714+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
61715+}
61716+
61717 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
61718 {
61719 struct pid *pid;
61720diff -urNp linux-2.6.32.43/kernel/posix-cpu-timers.c linux-2.6.32.43/kernel/posix-cpu-timers.c
61721--- linux-2.6.32.43/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
61722+++ linux-2.6.32.43/kernel/posix-cpu-timers.c 2011-04-17 15:56:46.000000000 -0400
61723@@ -6,6 +6,7 @@
61724 #include <linux/posix-timers.h>
61725 #include <linux/errno.h>
61726 #include <linux/math64.h>
61727+#include <linux/security.h>
61728 #include <asm/uaccess.h>
61729 #include <linux/kernel_stat.h>
61730 #include <trace/events/timer.h>
61731diff -urNp linux-2.6.32.43/kernel/posix-timers.c linux-2.6.32.43/kernel/posix-timers.c
61732--- linux-2.6.32.43/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
61733+++ linux-2.6.32.43/kernel/posix-timers.c 2011-05-16 21:46:57.000000000 -0400
61734@@ -42,6 +42,7 @@
61735 #include <linux/compiler.h>
61736 #include <linux/idr.h>
61737 #include <linux/posix-timers.h>
61738+#include <linux/grsecurity.h>
61739 #include <linux/syscalls.h>
61740 #include <linux/wait.h>
61741 #include <linux/workqueue.h>
61742@@ -296,6 +297,8 @@ static __init int init_posix_timers(void
61743 .nsleep = no_nsleep,
61744 };
61745
61746+ pax_track_stack();
61747+
61748 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
61749 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
61750 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
61751@@ -948,6 +951,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
61752 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
61753 return -EFAULT;
61754
61755+ /* only the CLOCK_REALTIME clock can be set, all other clocks
61756+ have their clock_set fptr set to a nosettime dummy function
61757+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
61758+ call common_clock_set, which calls do_sys_settimeofday, which
61759+ we hook
61760+ */
61761+
61762 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
61763 }
61764
61765diff -urNp linux-2.6.32.43/kernel/power/hibernate.c linux-2.6.32.43/kernel/power/hibernate.c
61766--- linux-2.6.32.43/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
61767+++ linux-2.6.32.43/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
61768@@ -48,14 +48,14 @@ enum {
61769
61770 static int hibernation_mode = HIBERNATION_SHUTDOWN;
61771
61772-static struct platform_hibernation_ops *hibernation_ops;
61773+static const struct platform_hibernation_ops *hibernation_ops;
61774
61775 /**
61776 * hibernation_set_ops - set the global hibernate operations
61777 * @ops: the hibernation operations to use in subsequent hibernation transitions
61778 */
61779
61780-void hibernation_set_ops(struct platform_hibernation_ops *ops)
61781+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
61782 {
61783 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
61784 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
61785diff -urNp linux-2.6.32.43/kernel/power/poweroff.c linux-2.6.32.43/kernel/power/poweroff.c
61786--- linux-2.6.32.43/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
61787+++ linux-2.6.32.43/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
61788@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
61789 .enable_mask = SYSRQ_ENABLE_BOOT,
61790 };
61791
61792-static int pm_sysrq_init(void)
61793+static int __init pm_sysrq_init(void)
61794 {
61795 register_sysrq_key('o', &sysrq_poweroff_op);
61796 return 0;
61797diff -urNp linux-2.6.32.43/kernel/power/process.c linux-2.6.32.43/kernel/power/process.c
61798--- linux-2.6.32.43/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
61799+++ linux-2.6.32.43/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
61800@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
61801 struct timeval start, end;
61802 u64 elapsed_csecs64;
61803 unsigned int elapsed_csecs;
61804+ bool timedout = false;
61805
61806 do_gettimeofday(&start);
61807
61808 end_time = jiffies + TIMEOUT;
61809 do {
61810 todo = 0;
61811+ if (time_after(jiffies, end_time))
61812+ timedout = true;
61813 read_lock(&tasklist_lock);
61814 do_each_thread(g, p) {
61815 if (frozen(p) || !freezeable(p))
61816@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
61817 * It is "frozen enough". If the task does wake
61818 * up, it will immediately call try_to_freeze.
61819 */
61820- if (!task_is_stopped_or_traced(p) &&
61821- !freezer_should_skip(p))
61822+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
61823 todo++;
61824+ if (timedout) {
61825+ printk(KERN_ERR "Task refusing to freeze:\n");
61826+ sched_show_task(p);
61827+ }
61828+ }
61829 } while_each_thread(g, p);
61830 read_unlock(&tasklist_lock);
61831 yield(); /* Yield is okay here */
61832- if (time_after(jiffies, end_time))
61833- break;
61834- } while (todo);
61835+ } while (todo && !timedout);
61836
61837 do_gettimeofday(&end);
61838 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
61839diff -urNp linux-2.6.32.43/kernel/power/suspend.c linux-2.6.32.43/kernel/power/suspend.c
61840--- linux-2.6.32.43/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
61841+++ linux-2.6.32.43/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
61842@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
61843 [PM_SUSPEND_MEM] = "mem",
61844 };
61845
61846-static struct platform_suspend_ops *suspend_ops;
61847+static const struct platform_suspend_ops *suspend_ops;
61848
61849 /**
61850 * suspend_set_ops - Set the global suspend method table.
61851 * @ops: Pointer to ops structure.
61852 */
61853-void suspend_set_ops(struct platform_suspend_ops *ops)
61854+void suspend_set_ops(const struct platform_suspend_ops *ops)
61855 {
61856 mutex_lock(&pm_mutex);
61857 suspend_ops = ops;
61858diff -urNp linux-2.6.32.43/kernel/printk.c linux-2.6.32.43/kernel/printk.c
61859--- linux-2.6.32.43/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
61860+++ linux-2.6.32.43/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
61861@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
61862 char c;
61863 int error = 0;
61864
61865+#ifdef CONFIG_GRKERNSEC_DMESG
61866+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
61867+ return -EPERM;
61868+#endif
61869+
61870 error = security_syslog(type);
61871 if (error)
61872 return error;
61873diff -urNp linux-2.6.32.43/kernel/profile.c linux-2.6.32.43/kernel/profile.c
61874--- linux-2.6.32.43/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
61875+++ linux-2.6.32.43/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
61876@@ -39,7 +39,7 @@ struct profile_hit {
61877 /* Oprofile timer tick hook */
61878 static int (*timer_hook)(struct pt_regs *) __read_mostly;
61879
61880-static atomic_t *prof_buffer;
61881+static atomic_unchecked_t *prof_buffer;
61882 static unsigned long prof_len, prof_shift;
61883
61884 int prof_on __read_mostly;
61885@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
61886 hits[i].pc = 0;
61887 continue;
61888 }
61889- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61890+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61891 hits[i].hits = hits[i].pc = 0;
61892 }
61893 }
61894@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
61895 * Add the current hit(s) and flush the write-queue out
61896 * to the global buffer:
61897 */
61898- atomic_add(nr_hits, &prof_buffer[pc]);
61899+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
61900 for (i = 0; i < NR_PROFILE_HIT; ++i) {
61901- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
61902+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
61903 hits[i].pc = hits[i].hits = 0;
61904 }
61905 out:
61906@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
61907 if (prof_on != type || !prof_buffer)
61908 return;
61909 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
61910- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61911+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
61912 }
61913 #endif /* !CONFIG_SMP */
61914 EXPORT_SYMBOL_GPL(profile_hits);
61915@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
61916 return -EFAULT;
61917 buf++; p++; count--; read++;
61918 }
61919- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
61920+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
61921 if (copy_to_user(buf, (void *)pnt, count))
61922 return -EFAULT;
61923 read += count;
61924@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
61925 }
61926 #endif
61927 profile_discard_flip_buffers();
61928- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
61929+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
61930 return count;
61931 }
61932
61933diff -urNp linux-2.6.32.43/kernel/ptrace.c linux-2.6.32.43/kernel/ptrace.c
61934--- linux-2.6.32.43/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
61935+++ linux-2.6.32.43/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
61936@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
61937 return ret;
61938 }
61939
61940-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
61941+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
61942+ unsigned int log)
61943 {
61944 const struct cred *cred = current_cred(), *tcred;
61945
61946@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
61947 cred->gid != tcred->egid ||
61948 cred->gid != tcred->sgid ||
61949 cred->gid != tcred->gid) &&
61950- !capable(CAP_SYS_PTRACE)) {
61951+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61952+ (log && !capable(CAP_SYS_PTRACE)))
61953+ ) {
61954 rcu_read_unlock();
61955 return -EPERM;
61956 }
61957@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
61958 smp_rmb();
61959 if (task->mm)
61960 dumpable = get_dumpable(task->mm);
61961- if (!dumpable && !capable(CAP_SYS_PTRACE))
61962+ if (!dumpable &&
61963+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
61964+ (log && !capable(CAP_SYS_PTRACE))))
61965 return -EPERM;
61966
61967 return security_ptrace_access_check(task, mode);
61968@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
61969 {
61970 int err;
61971 task_lock(task);
61972- err = __ptrace_may_access(task, mode);
61973+ err = __ptrace_may_access(task, mode, 0);
61974+ task_unlock(task);
61975+ return !err;
61976+}
61977+
61978+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
61979+{
61980+ int err;
61981+ task_lock(task);
61982+ err = __ptrace_may_access(task, mode, 1);
61983 task_unlock(task);
61984 return !err;
61985 }
61986@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
61987 goto out;
61988
61989 task_lock(task);
61990- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
61991+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
61992 task_unlock(task);
61993 if (retval)
61994 goto unlock_creds;
61995@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
61996 goto unlock_tasklist;
61997
61998 task->ptrace = PT_PTRACED;
61999- if (capable(CAP_SYS_PTRACE))
62000+ if (capable_nolog(CAP_SYS_PTRACE))
62001 task->ptrace |= PT_PTRACE_CAP;
62002
62003 __ptrace_link(task, current);
62004@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
62005 {
62006 int copied = 0;
62007
62008+ pax_track_stack();
62009+
62010 while (len > 0) {
62011 char buf[128];
62012 int this_len, retval;
62013@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
62014 {
62015 int copied = 0;
62016
62017+ pax_track_stack();
62018+
62019 while (len > 0) {
62020 char buf[128];
62021 int this_len, retval;
62022@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
62023 int ret = -EIO;
62024 siginfo_t siginfo;
62025
62026+ pax_track_stack();
62027+
62028 switch (request) {
62029 case PTRACE_PEEKTEXT:
62030 case PTRACE_PEEKDATA:
62031@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
62032 ret = ptrace_setoptions(child, data);
62033 break;
62034 case PTRACE_GETEVENTMSG:
62035- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
62036+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
62037 break;
62038
62039 case PTRACE_GETSIGINFO:
62040 ret = ptrace_getsiginfo(child, &siginfo);
62041 if (!ret)
62042- ret = copy_siginfo_to_user((siginfo_t __user *) data,
62043+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
62044 &siginfo);
62045 break;
62046
62047 case PTRACE_SETSIGINFO:
62048- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
62049+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
62050 sizeof siginfo))
62051 ret = -EFAULT;
62052 else
62053@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
62054 goto out;
62055 }
62056
62057+ if (gr_handle_ptrace(child, request)) {
62058+ ret = -EPERM;
62059+ goto out_put_task_struct;
62060+ }
62061+
62062 if (request == PTRACE_ATTACH) {
62063 ret = ptrace_attach(child);
62064 /*
62065 * Some architectures need to do book-keeping after
62066 * a ptrace attach.
62067 */
62068- if (!ret)
62069+ if (!ret) {
62070 arch_ptrace_attach(child);
62071+ gr_audit_ptrace(child);
62072+ }
62073 goto out_put_task_struct;
62074 }
62075
62076@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
62077 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
62078 if (copied != sizeof(tmp))
62079 return -EIO;
62080- return put_user(tmp, (unsigned long __user *)data);
62081+ return put_user(tmp, (__force unsigned long __user *)data);
62082 }
62083
62084 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
62085@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
62086 siginfo_t siginfo;
62087 int ret;
62088
62089+ pax_track_stack();
62090+
62091 switch (request) {
62092 case PTRACE_PEEKTEXT:
62093 case PTRACE_PEEKDATA:
62094@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
62095 goto out;
62096 }
62097
62098+ if (gr_handle_ptrace(child, request)) {
62099+ ret = -EPERM;
62100+ goto out_put_task_struct;
62101+ }
62102+
62103 if (request == PTRACE_ATTACH) {
62104 ret = ptrace_attach(child);
62105 /*
62106 * Some architectures need to do book-keeping after
62107 * a ptrace attach.
62108 */
62109- if (!ret)
62110+ if (!ret) {
62111 arch_ptrace_attach(child);
62112+ gr_audit_ptrace(child);
62113+ }
62114 goto out_put_task_struct;
62115 }
62116
62117diff -urNp linux-2.6.32.43/kernel/rcutorture.c linux-2.6.32.43/kernel/rcutorture.c
62118--- linux-2.6.32.43/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
62119+++ linux-2.6.32.43/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
62120@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
62121 { 0 };
62122 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
62123 { 0 };
62124-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62125-static atomic_t n_rcu_torture_alloc;
62126-static atomic_t n_rcu_torture_alloc_fail;
62127-static atomic_t n_rcu_torture_free;
62128-static atomic_t n_rcu_torture_mberror;
62129-static atomic_t n_rcu_torture_error;
62130+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
62131+static atomic_unchecked_t n_rcu_torture_alloc;
62132+static atomic_unchecked_t n_rcu_torture_alloc_fail;
62133+static atomic_unchecked_t n_rcu_torture_free;
62134+static atomic_unchecked_t n_rcu_torture_mberror;
62135+static atomic_unchecked_t n_rcu_torture_error;
62136 static long n_rcu_torture_timers;
62137 static struct list_head rcu_torture_removed;
62138 static cpumask_var_t shuffle_tmp_mask;
62139@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
62140
62141 spin_lock_bh(&rcu_torture_lock);
62142 if (list_empty(&rcu_torture_freelist)) {
62143- atomic_inc(&n_rcu_torture_alloc_fail);
62144+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
62145 spin_unlock_bh(&rcu_torture_lock);
62146 return NULL;
62147 }
62148- atomic_inc(&n_rcu_torture_alloc);
62149+ atomic_inc_unchecked(&n_rcu_torture_alloc);
62150 p = rcu_torture_freelist.next;
62151 list_del_init(p);
62152 spin_unlock_bh(&rcu_torture_lock);
62153@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
62154 static void
62155 rcu_torture_free(struct rcu_torture *p)
62156 {
62157- atomic_inc(&n_rcu_torture_free);
62158+ atomic_inc_unchecked(&n_rcu_torture_free);
62159 spin_lock_bh(&rcu_torture_lock);
62160 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
62161 spin_unlock_bh(&rcu_torture_lock);
62162@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
62163 i = rp->rtort_pipe_count;
62164 if (i > RCU_TORTURE_PIPE_LEN)
62165 i = RCU_TORTURE_PIPE_LEN;
62166- atomic_inc(&rcu_torture_wcount[i]);
62167+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62168 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62169 rp->rtort_mbtest = 0;
62170 rcu_torture_free(rp);
62171@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
62172 i = rp->rtort_pipe_count;
62173 if (i > RCU_TORTURE_PIPE_LEN)
62174 i = RCU_TORTURE_PIPE_LEN;
62175- atomic_inc(&rcu_torture_wcount[i]);
62176+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62177 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
62178 rp->rtort_mbtest = 0;
62179 list_del(&rp->rtort_free);
62180@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
62181 i = old_rp->rtort_pipe_count;
62182 if (i > RCU_TORTURE_PIPE_LEN)
62183 i = RCU_TORTURE_PIPE_LEN;
62184- atomic_inc(&rcu_torture_wcount[i]);
62185+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
62186 old_rp->rtort_pipe_count++;
62187 cur_ops->deferred_free(old_rp);
62188 }
62189@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
62190 return;
62191 }
62192 if (p->rtort_mbtest == 0)
62193- atomic_inc(&n_rcu_torture_mberror);
62194+ atomic_inc_unchecked(&n_rcu_torture_mberror);
62195 spin_lock(&rand_lock);
62196 cur_ops->read_delay(&rand);
62197 n_rcu_torture_timers++;
62198@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
62199 continue;
62200 }
62201 if (p->rtort_mbtest == 0)
62202- atomic_inc(&n_rcu_torture_mberror);
62203+ atomic_inc_unchecked(&n_rcu_torture_mberror);
62204 cur_ops->read_delay(&rand);
62205 preempt_disable();
62206 pipe_count = p->rtort_pipe_count;
62207@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
62208 rcu_torture_current,
62209 rcu_torture_current_version,
62210 list_empty(&rcu_torture_freelist),
62211- atomic_read(&n_rcu_torture_alloc),
62212- atomic_read(&n_rcu_torture_alloc_fail),
62213- atomic_read(&n_rcu_torture_free),
62214- atomic_read(&n_rcu_torture_mberror),
62215+ atomic_read_unchecked(&n_rcu_torture_alloc),
62216+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
62217+ atomic_read_unchecked(&n_rcu_torture_free),
62218+ atomic_read_unchecked(&n_rcu_torture_mberror),
62219 n_rcu_torture_timers);
62220- if (atomic_read(&n_rcu_torture_mberror) != 0)
62221+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
62222 cnt += sprintf(&page[cnt], " !!!");
62223 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
62224 if (i > 1) {
62225 cnt += sprintf(&page[cnt], "!!! ");
62226- atomic_inc(&n_rcu_torture_error);
62227+ atomic_inc_unchecked(&n_rcu_torture_error);
62228 WARN_ON_ONCE(1);
62229 }
62230 cnt += sprintf(&page[cnt], "Reader Pipe: ");
62231@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
62232 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
62233 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62234 cnt += sprintf(&page[cnt], " %d",
62235- atomic_read(&rcu_torture_wcount[i]));
62236+ atomic_read_unchecked(&rcu_torture_wcount[i]));
62237 }
62238 cnt += sprintf(&page[cnt], "\n");
62239 if (cur_ops->stats)
62240@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
62241
62242 if (cur_ops->cleanup)
62243 cur_ops->cleanup();
62244- if (atomic_read(&n_rcu_torture_error))
62245+ if (atomic_read_unchecked(&n_rcu_torture_error))
62246 rcu_torture_print_module_parms("End of test: FAILURE");
62247 else
62248 rcu_torture_print_module_parms("End of test: SUCCESS");
62249@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
62250
62251 rcu_torture_current = NULL;
62252 rcu_torture_current_version = 0;
62253- atomic_set(&n_rcu_torture_alloc, 0);
62254- atomic_set(&n_rcu_torture_alloc_fail, 0);
62255- atomic_set(&n_rcu_torture_free, 0);
62256- atomic_set(&n_rcu_torture_mberror, 0);
62257- atomic_set(&n_rcu_torture_error, 0);
62258+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
62259+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
62260+ atomic_set_unchecked(&n_rcu_torture_free, 0);
62261+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
62262+ atomic_set_unchecked(&n_rcu_torture_error, 0);
62263 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
62264- atomic_set(&rcu_torture_wcount[i], 0);
62265+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
62266 for_each_possible_cpu(cpu) {
62267 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
62268 per_cpu(rcu_torture_count, cpu)[i] = 0;
62269diff -urNp linux-2.6.32.43/kernel/rcutree.c linux-2.6.32.43/kernel/rcutree.c
62270--- linux-2.6.32.43/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
62271+++ linux-2.6.32.43/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
62272@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
62273 /*
62274 * Do softirq processing for the current CPU.
62275 */
62276-static void rcu_process_callbacks(struct softirq_action *unused)
62277+static void rcu_process_callbacks(void)
62278 {
62279 /*
62280 * Memory references from any prior RCU read-side critical sections
62281diff -urNp linux-2.6.32.43/kernel/rcutree_plugin.h linux-2.6.32.43/kernel/rcutree_plugin.h
62282--- linux-2.6.32.43/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
62283+++ linux-2.6.32.43/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
62284@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
62285 */
62286 void __rcu_read_lock(void)
62287 {
62288- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
62289+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
62290 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
62291 }
62292 EXPORT_SYMBOL_GPL(__rcu_read_lock);
62293@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
62294 struct task_struct *t = current;
62295
62296 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
62297- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
62298+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
62299 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
62300 rcu_read_unlock_special(t);
62301 }
62302diff -urNp linux-2.6.32.43/kernel/relay.c linux-2.6.32.43/kernel/relay.c
62303--- linux-2.6.32.43/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
62304+++ linux-2.6.32.43/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
62305@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
62306 unsigned int flags,
62307 int *nonpad_ret)
62308 {
62309- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
62310+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
62311 struct rchan_buf *rbuf = in->private_data;
62312 unsigned int subbuf_size = rbuf->chan->subbuf_size;
62313 uint64_t pos = (uint64_t) *ppos;
62314@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
62315 .ops = &relay_pipe_buf_ops,
62316 .spd_release = relay_page_release,
62317 };
62318+ ssize_t ret;
62319+
62320+ pax_track_stack();
62321
62322 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
62323 return 0;
62324diff -urNp linux-2.6.32.43/kernel/resource.c linux-2.6.32.43/kernel/resource.c
62325--- linux-2.6.32.43/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
62326+++ linux-2.6.32.43/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
62327@@ -132,8 +132,18 @@ static const struct file_operations proc
62328
62329 static int __init ioresources_init(void)
62330 {
62331+#ifdef CONFIG_GRKERNSEC_PROC_ADD
62332+#ifdef CONFIG_GRKERNSEC_PROC_USER
62333+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
62334+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
62335+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62336+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
62337+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
62338+#endif
62339+#else
62340 proc_create("ioports", 0, NULL, &proc_ioports_operations);
62341 proc_create("iomem", 0, NULL, &proc_iomem_operations);
62342+#endif
62343 return 0;
62344 }
62345 __initcall(ioresources_init);
62346diff -urNp linux-2.6.32.43/kernel/rtmutex.c linux-2.6.32.43/kernel/rtmutex.c
62347--- linux-2.6.32.43/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
62348+++ linux-2.6.32.43/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
62349@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
62350 */
62351 spin_lock_irqsave(&pendowner->pi_lock, flags);
62352
62353- WARN_ON(!pendowner->pi_blocked_on);
62354+ BUG_ON(!pendowner->pi_blocked_on);
62355 WARN_ON(pendowner->pi_blocked_on != waiter);
62356 WARN_ON(pendowner->pi_blocked_on->lock != lock);
62357
62358diff -urNp linux-2.6.32.43/kernel/rtmutex-tester.c linux-2.6.32.43/kernel/rtmutex-tester.c
62359--- linux-2.6.32.43/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
62360+++ linux-2.6.32.43/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
62361@@ -21,7 +21,7 @@
62362 #define MAX_RT_TEST_MUTEXES 8
62363
62364 static spinlock_t rttest_lock;
62365-static atomic_t rttest_event;
62366+static atomic_unchecked_t rttest_event;
62367
62368 struct test_thread_data {
62369 int opcode;
62370@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
62371
62372 case RTTEST_LOCKCONT:
62373 td->mutexes[td->opdata] = 1;
62374- td->event = atomic_add_return(1, &rttest_event);
62375+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62376 return 0;
62377
62378 case RTTEST_RESET:
62379@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
62380 return 0;
62381
62382 case RTTEST_RESETEVENT:
62383- atomic_set(&rttest_event, 0);
62384+ atomic_set_unchecked(&rttest_event, 0);
62385 return 0;
62386
62387 default:
62388@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
62389 return ret;
62390
62391 td->mutexes[id] = 1;
62392- td->event = atomic_add_return(1, &rttest_event);
62393+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62394 rt_mutex_lock(&mutexes[id]);
62395- td->event = atomic_add_return(1, &rttest_event);
62396+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62397 td->mutexes[id] = 4;
62398 return 0;
62399
62400@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
62401 return ret;
62402
62403 td->mutexes[id] = 1;
62404- td->event = atomic_add_return(1, &rttest_event);
62405+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62406 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
62407- td->event = atomic_add_return(1, &rttest_event);
62408+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62409 td->mutexes[id] = ret ? 0 : 4;
62410 return ret ? -EINTR : 0;
62411
62412@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
62413 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
62414 return ret;
62415
62416- td->event = atomic_add_return(1, &rttest_event);
62417+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62418 rt_mutex_unlock(&mutexes[id]);
62419- td->event = atomic_add_return(1, &rttest_event);
62420+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62421 td->mutexes[id] = 0;
62422 return 0;
62423
62424@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
62425 break;
62426
62427 td->mutexes[dat] = 2;
62428- td->event = atomic_add_return(1, &rttest_event);
62429+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62430 break;
62431
62432 case RTTEST_LOCKBKL:
62433@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
62434 return;
62435
62436 td->mutexes[dat] = 3;
62437- td->event = atomic_add_return(1, &rttest_event);
62438+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62439 break;
62440
62441 case RTTEST_LOCKNOWAIT:
62442@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
62443 return;
62444
62445 td->mutexes[dat] = 1;
62446- td->event = atomic_add_return(1, &rttest_event);
62447+ td->event = atomic_add_return_unchecked(1, &rttest_event);
62448 return;
62449
62450 case RTTEST_LOCKBKL:
62451diff -urNp linux-2.6.32.43/kernel/sched.c linux-2.6.32.43/kernel/sched.c
62452--- linux-2.6.32.43/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
62453+++ linux-2.6.32.43/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
62454@@ -5043,7 +5043,7 @@ out:
62455 * In CONFIG_NO_HZ case, the idle load balance owner will do the
62456 * rebalancing for all the cpus for whom scheduler ticks are stopped.
62457 */
62458-static void run_rebalance_domains(struct softirq_action *h)
62459+static void run_rebalance_domains(void)
62460 {
62461 int this_cpu = smp_processor_id();
62462 struct rq *this_rq = cpu_rq(this_cpu);
62463@@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
62464 struct rq *rq;
62465 int cpu;
62466
62467+ pax_track_stack();
62468+
62469 need_resched:
62470 preempt_disable();
62471 cpu = smp_processor_id();
62472@@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
62473 * Look out! "owner" is an entirely speculative pointer
62474 * access and not reliable.
62475 */
62476-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
62477+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
62478 {
62479 unsigned int cpu;
62480 struct rq *rq;
62481@@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
62482 * DEBUG_PAGEALLOC could have unmapped it if
62483 * the mutex owner just released it and exited.
62484 */
62485- if (probe_kernel_address(&owner->cpu, cpu))
62486+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
62487 return 0;
62488 #else
62489- cpu = owner->cpu;
62490+ cpu = task_thread_info(owner)->cpu;
62491 #endif
62492
62493 /*
62494@@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
62495 /*
62496 * Is that owner really running on that cpu?
62497 */
62498- if (task_thread_info(rq->curr) != owner || need_resched())
62499+ if (rq->curr != owner || need_resched())
62500 return 0;
62501
62502 cpu_relax();
62503@@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
62504 /* convert nice value [19,-20] to rlimit style value [1,40] */
62505 int nice_rlim = 20 - nice;
62506
62507+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
62508+
62509 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
62510 capable(CAP_SYS_NICE));
62511 }
62512@@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
62513 if (nice > 19)
62514 nice = 19;
62515
62516- if (increment < 0 && !can_nice(current, nice))
62517+ if (increment < 0 && (!can_nice(current, nice) ||
62518+ gr_handle_chroot_nice()))
62519 return -EPERM;
62520
62521 retval = security_task_setnice(current, nice);
62522@@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
62523 long power;
62524 int weight;
62525
62526- WARN_ON(!sd || !sd->groups);
62527+ BUG_ON(!sd || !sd->groups);
62528
62529 if (cpu != group_first_cpu(sd->groups))
62530 return;
62531diff -urNp linux-2.6.32.43/kernel/signal.c linux-2.6.32.43/kernel/signal.c
62532--- linux-2.6.32.43/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
62533+++ linux-2.6.32.43/kernel/signal.c 2011-07-14 20:33:33.000000000 -0400
62534@@ -41,12 +41,12 @@
62535
62536 static struct kmem_cache *sigqueue_cachep;
62537
62538-static void __user *sig_handler(struct task_struct *t, int sig)
62539+static __sighandler_t sig_handler(struct task_struct *t, int sig)
62540 {
62541 return t->sighand->action[sig - 1].sa.sa_handler;
62542 }
62543
62544-static int sig_handler_ignored(void __user *handler, int sig)
62545+static int sig_handler_ignored(__sighandler_t handler, int sig)
62546 {
62547 /* Is it explicitly or implicitly ignored? */
62548 return handler == SIG_IGN ||
62549@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
62550 static int sig_task_ignored(struct task_struct *t, int sig,
62551 int from_ancestor_ns)
62552 {
62553- void __user *handler;
62554+ __sighandler_t handler;
62555
62556 handler = sig_handler(t, sig);
62557
62558@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
62559 */
62560 user = get_uid(__task_cred(t)->user);
62561 atomic_inc(&user->sigpending);
62562+
62563+ if (!override_rlimit)
62564+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
62565 if (override_rlimit ||
62566 atomic_read(&user->sigpending) <=
62567 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
62568@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
62569
62570 int unhandled_signal(struct task_struct *tsk, int sig)
62571 {
62572- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
62573+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
62574 if (is_global_init(tsk))
62575 return 1;
62576 if (handler != SIG_IGN && handler != SIG_DFL)
62577@@ -627,6 +630,12 @@ static int check_kill_permission(int sig
62578 }
62579 }
62580
62581+ /* allow glibc communication via tgkill to other threads in our
62582+ thread group */
62583+ if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
62584+ task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
62585+ return -EPERM;
62586+
62587 return security_task_kill(t, info, sig, 0);
62588 }
62589
62590@@ -968,7 +977,7 @@ __group_send_sig_info(int sig, struct si
62591 return send_signal(sig, info, p, 1);
62592 }
62593
62594-static int
62595+int
62596 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
62597 {
62598 return send_signal(sig, info, t, 0);
62599@@ -1005,6 +1014,7 @@ force_sig_info(int sig, struct siginfo *
62600 unsigned long int flags;
62601 int ret, blocked, ignored;
62602 struct k_sigaction *action;
62603+ int is_unhandled = 0;
62604
62605 spin_lock_irqsave(&t->sighand->siglock, flags);
62606 action = &t->sighand->action[sig-1];
62607@@ -1019,9 +1029,18 @@ force_sig_info(int sig, struct siginfo *
62608 }
62609 if (action->sa.sa_handler == SIG_DFL)
62610 t->signal->flags &= ~SIGNAL_UNKILLABLE;
62611+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
62612+ is_unhandled = 1;
62613 ret = specific_send_sig_info(sig, info, t);
62614 spin_unlock_irqrestore(&t->sighand->siglock, flags);
62615
62616+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
62617+ normal operation */
62618+ if (is_unhandled) {
62619+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
62620+ gr_handle_crash(t, sig);
62621+ }
62622+
62623 return ret;
62624 }
62625
62626@@ -1081,8 +1100,11 @@ int group_send_sig_info(int sig, struct
62627 {
62628 int ret = check_kill_permission(sig, info, p);
62629
62630- if (!ret && sig)
62631+ if (!ret && sig) {
62632 ret = do_send_sig_info(sig, info, p, true);
62633+ if (!ret)
62634+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
62635+ }
62636
62637 return ret;
62638 }
62639@@ -1644,6 +1666,8 @@ void ptrace_notify(int exit_code)
62640 {
62641 siginfo_t info;
62642
62643+ pax_track_stack();
62644+
62645 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
62646
62647 memset(&info, 0, sizeof info);
62648@@ -2275,7 +2299,15 @@ do_send_specific(pid_t tgid, pid_t pid,
62649 int error = -ESRCH;
62650
62651 rcu_read_lock();
62652- p = find_task_by_vpid(pid);
62653+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
62654+ /* allow glibc communication via tgkill to other threads in our
62655+ thread group */
62656+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
62657+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
62658+ p = find_task_by_vpid_unrestricted(pid);
62659+ else
62660+#endif
62661+ p = find_task_by_vpid(pid);
62662 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
62663 error = check_kill_permission(sig, info, p);
62664 /*
62665diff -urNp linux-2.6.32.43/kernel/smp.c linux-2.6.32.43/kernel/smp.c
62666--- linux-2.6.32.43/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
62667+++ linux-2.6.32.43/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
62668@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
62669 }
62670 EXPORT_SYMBOL(smp_call_function);
62671
62672-void ipi_call_lock(void)
62673+void ipi_call_lock(void) __acquires(call_function.lock)
62674 {
62675 spin_lock(&call_function.lock);
62676 }
62677
62678-void ipi_call_unlock(void)
62679+void ipi_call_unlock(void) __releases(call_function.lock)
62680 {
62681 spin_unlock(&call_function.lock);
62682 }
62683
62684-void ipi_call_lock_irq(void)
62685+void ipi_call_lock_irq(void) __acquires(call_function.lock)
62686 {
62687 spin_lock_irq(&call_function.lock);
62688 }
62689
62690-void ipi_call_unlock_irq(void)
62691+void ipi_call_unlock_irq(void) __releases(call_function.lock)
62692 {
62693 spin_unlock_irq(&call_function.lock);
62694 }
62695diff -urNp linux-2.6.32.43/kernel/softirq.c linux-2.6.32.43/kernel/softirq.c
62696--- linux-2.6.32.43/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
62697+++ linux-2.6.32.43/kernel/softirq.c 2011-04-17 15:56:46.000000000 -0400
62698@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
62699
62700 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
62701
62702-char *softirq_to_name[NR_SOFTIRQS] = {
62703+const char * const softirq_to_name[NR_SOFTIRQS] = {
62704 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
62705 "TASKLET", "SCHED", "HRTIMER", "RCU"
62706 };
62707@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
62708
62709 asmlinkage void __do_softirq(void)
62710 {
62711- struct softirq_action *h;
62712+ const struct softirq_action *h;
62713 __u32 pending;
62714 int max_restart = MAX_SOFTIRQ_RESTART;
62715 int cpu;
62716@@ -233,7 +233,7 @@ restart:
62717 kstat_incr_softirqs_this_cpu(h - softirq_vec);
62718
62719 trace_softirq_entry(h, softirq_vec);
62720- h->action(h);
62721+ h->action();
62722 trace_softirq_exit(h, softirq_vec);
62723 if (unlikely(prev_count != preempt_count())) {
62724 printk(KERN_ERR "huh, entered softirq %td %s %p"
62725@@ -363,7 +363,7 @@ void raise_softirq(unsigned int nr)
62726 local_irq_restore(flags);
62727 }
62728
62729-void open_softirq(int nr, void (*action)(struct softirq_action *))
62730+void open_softirq(int nr, void (*action)(void))
62731 {
62732 softirq_vec[nr].action = action;
62733 }
62734@@ -419,7 +419,7 @@ void __tasklet_hi_schedule_first(struct
62735
62736 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
62737
62738-static void tasklet_action(struct softirq_action *a)
62739+static void tasklet_action(void)
62740 {
62741 struct tasklet_struct *list;
62742
62743@@ -454,7 +454,7 @@ static void tasklet_action(struct softir
62744 }
62745 }
62746
62747-static void tasklet_hi_action(struct softirq_action *a)
62748+static void tasklet_hi_action(void)
62749 {
62750 struct tasklet_struct *list;
62751
62752diff -urNp linux-2.6.32.43/kernel/sys.c linux-2.6.32.43/kernel/sys.c
62753--- linux-2.6.32.43/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
62754+++ linux-2.6.32.43/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
62755@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
62756 error = -EACCES;
62757 goto out;
62758 }
62759+
62760+ if (gr_handle_chroot_setpriority(p, niceval)) {
62761+ error = -EACCES;
62762+ goto out;
62763+ }
62764+
62765 no_nice = security_task_setnice(p, niceval);
62766 if (no_nice) {
62767 error = no_nice;
62768@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
62769 !(user = find_user(who)))
62770 goto out_unlock; /* No processes for this user */
62771
62772- do_each_thread(g, p)
62773+ do_each_thread(g, p) {
62774 if (__task_cred(p)->uid == who)
62775 error = set_one_prio(p, niceval, error);
62776- while_each_thread(g, p);
62777+ } while_each_thread(g, p);
62778 if (who != cred->uid)
62779 free_uid(user); /* For find_user() */
62780 break;
62781@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
62782 !(user = find_user(who)))
62783 goto out_unlock; /* No processes for this user */
62784
62785- do_each_thread(g, p)
62786+ do_each_thread(g, p) {
62787 if (__task_cred(p)->uid == who) {
62788 niceval = 20 - task_nice(p);
62789 if (niceval > retval)
62790 retval = niceval;
62791 }
62792- while_each_thread(g, p);
62793+ } while_each_thread(g, p);
62794 if (who != cred->uid)
62795 free_uid(user); /* for find_user() */
62796 break;
62797@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
62798 goto error;
62799 }
62800
62801+ if (gr_check_group_change(new->gid, new->egid, -1))
62802+ goto error;
62803+
62804 if (rgid != (gid_t) -1 ||
62805 (egid != (gid_t) -1 && egid != old->gid))
62806 new->sgid = new->egid;
62807@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
62808 goto error;
62809
62810 retval = -EPERM;
62811+
62812+ if (gr_check_group_change(gid, gid, gid))
62813+ goto error;
62814+
62815 if (capable(CAP_SETGID))
62816 new->gid = new->egid = new->sgid = new->fsgid = gid;
62817 else if (gid == old->gid || gid == old->sgid)
62818@@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
62819 goto error;
62820 }
62821
62822+ if (gr_check_user_change(new->uid, new->euid, -1))
62823+ goto error;
62824+
62825 if (new->uid != old->uid) {
62826 retval = set_user(new);
62827 if (retval < 0)
62828@@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
62829 goto error;
62830
62831 retval = -EPERM;
62832+
62833+ if (gr_check_crash_uid(uid))
62834+ goto error;
62835+ if (gr_check_user_change(uid, uid, uid))
62836+ goto error;
62837+
62838 if (capable(CAP_SETUID)) {
62839 new->suid = new->uid = uid;
62840 if (uid != old->uid) {
62841@@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
62842 goto error;
62843 }
62844
62845+ if (gr_check_user_change(ruid, euid, -1))
62846+ goto error;
62847+
62848 if (ruid != (uid_t) -1) {
62849 new->uid = ruid;
62850 if (ruid != old->uid) {
62851@@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
62852 goto error;
62853 }
62854
62855+ if (gr_check_group_change(rgid, egid, -1))
62856+ goto error;
62857+
62858 if (rgid != (gid_t) -1)
62859 new->gid = rgid;
62860 if (egid != (gid_t) -1)
62861@@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
62862 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
62863 goto error;
62864
62865+ if (gr_check_user_change(-1, -1, uid))
62866+ goto error;
62867+
62868 if (uid == old->uid || uid == old->euid ||
62869 uid == old->suid || uid == old->fsuid ||
62870 capable(CAP_SETUID)) {
62871@@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
62872 if (gid == old->gid || gid == old->egid ||
62873 gid == old->sgid || gid == old->fsgid ||
62874 capable(CAP_SETGID)) {
62875+ if (gr_check_group_change(-1, -1, gid))
62876+ goto error;
62877+
62878 if (gid != old_fsgid) {
62879 new->fsgid = gid;
62880 goto change_okay;
62881@@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
62882 error = get_dumpable(me->mm);
62883 break;
62884 case PR_SET_DUMPABLE:
62885- if (arg2 < 0 || arg2 > 1) {
62886+ if (arg2 > 1) {
62887 error = -EINVAL;
62888 break;
62889 }
62890diff -urNp linux-2.6.32.43/kernel/sysctl.c linux-2.6.32.43/kernel/sysctl.c
62891--- linux-2.6.32.43/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
62892+++ linux-2.6.32.43/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
62893@@ -63,6 +63,13 @@
62894 static int deprecated_sysctl_warning(struct __sysctl_args *args);
62895
62896 #if defined(CONFIG_SYSCTL)
62897+#include <linux/grsecurity.h>
62898+#include <linux/grinternal.h>
62899+
62900+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
62901+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
62902+ const int op);
62903+extern int gr_handle_chroot_sysctl(const int op);
62904
62905 /* External variables not in a header file. */
62906 extern int C_A_D;
62907@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
62908 static int proc_taint(struct ctl_table *table, int write,
62909 void __user *buffer, size_t *lenp, loff_t *ppos);
62910 #endif
62911+extern ctl_table grsecurity_table[];
62912
62913 static struct ctl_table root_table[];
62914 static struct ctl_table_root sysctl_table_root;
62915@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
62916 int sysctl_legacy_va_layout;
62917 #endif
62918
62919+#ifdef CONFIG_PAX_SOFTMODE
62920+static ctl_table pax_table[] = {
62921+ {
62922+ .ctl_name = CTL_UNNUMBERED,
62923+ .procname = "softmode",
62924+ .data = &pax_softmode,
62925+ .maxlen = sizeof(unsigned int),
62926+ .mode = 0600,
62927+ .proc_handler = &proc_dointvec,
62928+ },
62929+
62930+ { .ctl_name = 0 }
62931+};
62932+#endif
62933+
62934 extern int prove_locking;
62935 extern int lock_stat;
62936
62937@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
62938 #endif
62939
62940 static struct ctl_table kern_table[] = {
62941+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
62942+ {
62943+ .ctl_name = CTL_UNNUMBERED,
62944+ .procname = "grsecurity",
62945+ .mode = 0500,
62946+ .child = grsecurity_table,
62947+ },
62948+#endif
62949+
62950+#ifdef CONFIG_PAX_SOFTMODE
62951+ {
62952+ .ctl_name = CTL_UNNUMBERED,
62953+ .procname = "pax",
62954+ .mode = 0500,
62955+ .child = pax_table,
62956+ },
62957+#endif
62958+
62959 {
62960 .ctl_name = CTL_UNNUMBERED,
62961 .procname = "sched_child_runs_first",
62962@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
62963 .data = &modprobe_path,
62964 .maxlen = KMOD_PATH_LEN,
62965 .mode = 0644,
62966- .proc_handler = &proc_dostring,
62967- .strategy = &sysctl_string,
62968+ .proc_handler = &proc_dostring_modpriv,
62969+ .strategy = &sysctl_string_modpriv,
62970 },
62971 {
62972 .ctl_name = CTL_UNNUMBERED,
62973@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
62974 .mode = 0644,
62975 .proc_handler = &proc_dointvec
62976 },
62977+ {
62978+ .procname = "heap_stack_gap",
62979+ .data = &sysctl_heap_stack_gap,
62980+ .maxlen = sizeof(sysctl_heap_stack_gap),
62981+ .mode = 0644,
62982+ .proc_handler = proc_doulongvec_minmax,
62983+ },
62984 #else
62985 {
62986 .ctl_name = CTL_UNNUMBERED,
62987@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
62988 return 0;
62989 }
62990
62991+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
62992+
62993 static int parse_table(int __user *name, int nlen,
62994 void __user *oldval, size_t __user *oldlenp,
62995 void __user *newval, size_t newlen,
62996@@ -1821,7 +1871,7 @@ repeat:
62997 if (n == table->ctl_name) {
62998 int error;
62999 if (table->child) {
63000- if (sysctl_perm(root, table, MAY_EXEC))
63001+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
63002 return -EPERM;
63003 name++;
63004 nlen--;
63005@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
63006 int error;
63007 int mode;
63008
63009+ if (table->parent != NULL && table->parent->procname != NULL &&
63010+ table->procname != NULL &&
63011+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
63012+ return -EACCES;
63013+ if (gr_handle_chroot_sysctl(op))
63014+ return -EACCES;
63015+ error = gr_handle_sysctl(table, op);
63016+ if (error)
63017+ return error;
63018+
63019+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
63020+ if (error)
63021+ return error;
63022+
63023+ if (root->permissions)
63024+ mode = root->permissions(root, current->nsproxy, table);
63025+ else
63026+ mode = table->mode;
63027+
63028+ return test_perm(mode, op);
63029+}
63030+
63031+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
63032+{
63033+ int error;
63034+ int mode;
63035+
63036 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
63037 if (error)
63038 return error;
63039@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
63040 buffer, lenp, ppos);
63041 }
63042
63043+int proc_dostring_modpriv(struct ctl_table *table, int write,
63044+ void __user *buffer, size_t *lenp, loff_t *ppos)
63045+{
63046+ if (write && !capable(CAP_SYS_MODULE))
63047+ return -EPERM;
63048+
63049+ return _proc_do_string(table->data, table->maxlen, write,
63050+ buffer, lenp, ppos);
63051+}
63052+
63053
63054 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
63055 int *valp,
63056@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
63057 vleft = table->maxlen / sizeof(unsigned long);
63058 left = *lenp;
63059
63060- for (; left && vleft--; i++, min++, max++, first=0) {
63061+ for (; left && vleft--; i++, first=0) {
63062 if (write) {
63063 while (left) {
63064 char c;
63065@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
63066 return -ENOSYS;
63067 }
63068
63069+int proc_dostring_modpriv(struct ctl_table *table, int write,
63070+ void __user *buffer, size_t *lenp, loff_t *ppos)
63071+{
63072+ return -ENOSYS;
63073+}
63074+
63075 int proc_dointvec(struct ctl_table *table, int write,
63076 void __user *buffer, size_t *lenp, loff_t *ppos)
63077 {
63078@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
63079 return 1;
63080 }
63081
63082+int sysctl_string_modpriv(struct ctl_table *table,
63083+ void __user *oldval, size_t __user *oldlenp,
63084+ void __user *newval, size_t newlen)
63085+{
63086+ if (newval && newlen && !capable(CAP_SYS_MODULE))
63087+ return -EPERM;
63088+
63089+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
63090+}
63091+
63092 /*
63093 * This function makes sure that all of the integers in the vector
63094 * are between the minimum and maximum values given in the arrays
63095@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
63096 return -ENOSYS;
63097 }
63098
63099+int sysctl_string_modpriv(struct ctl_table *table,
63100+ void __user *oldval, size_t __user *oldlenp,
63101+ void __user *newval, size_t newlen)
63102+{
63103+ return -ENOSYS;
63104+}
63105+
63106 int sysctl_intvec(struct ctl_table *table,
63107 void __user *oldval, size_t __user *oldlenp,
63108 void __user *newval, size_t newlen)
63109@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
63110 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
63111 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
63112 EXPORT_SYMBOL(proc_dostring);
63113+EXPORT_SYMBOL(proc_dostring_modpriv);
63114 EXPORT_SYMBOL(proc_doulongvec_minmax);
63115 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
63116 EXPORT_SYMBOL(register_sysctl_table);
63117@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
63118 EXPORT_SYMBOL(sysctl_jiffies);
63119 EXPORT_SYMBOL(sysctl_ms_jiffies);
63120 EXPORT_SYMBOL(sysctl_string);
63121+EXPORT_SYMBOL(sysctl_string_modpriv);
63122 EXPORT_SYMBOL(sysctl_data);
63123 EXPORT_SYMBOL(unregister_sysctl_table);
63124diff -urNp linux-2.6.32.43/kernel/sysctl_check.c linux-2.6.32.43/kernel/sysctl_check.c
63125--- linux-2.6.32.43/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
63126+++ linux-2.6.32.43/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
63127@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
63128 } else {
63129 if ((table->strategy == sysctl_data) ||
63130 (table->strategy == sysctl_string) ||
63131+ (table->strategy == sysctl_string_modpriv) ||
63132 (table->strategy == sysctl_intvec) ||
63133 (table->strategy == sysctl_jiffies) ||
63134 (table->strategy == sysctl_ms_jiffies) ||
63135 (table->proc_handler == proc_dostring) ||
63136+ (table->proc_handler == proc_dostring_modpriv) ||
63137 (table->proc_handler == proc_dointvec) ||
63138 (table->proc_handler == proc_dointvec_minmax) ||
63139 (table->proc_handler == proc_dointvec_jiffies) ||
63140diff -urNp linux-2.6.32.43/kernel/taskstats.c linux-2.6.32.43/kernel/taskstats.c
63141--- linux-2.6.32.43/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
63142+++ linux-2.6.32.43/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
63143@@ -26,9 +26,12 @@
63144 #include <linux/cgroup.h>
63145 #include <linux/fs.h>
63146 #include <linux/file.h>
63147+#include <linux/grsecurity.h>
63148 #include <net/genetlink.h>
63149 #include <asm/atomic.h>
63150
63151+extern int gr_is_taskstats_denied(int pid);
63152+
63153 /*
63154 * Maximum length of a cpumask that can be specified in
63155 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
63156@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
63157 size_t size;
63158 cpumask_var_t mask;
63159
63160+ if (gr_is_taskstats_denied(current->pid))
63161+ return -EACCES;
63162+
63163 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
63164 return -ENOMEM;
63165
63166diff -urNp linux-2.6.32.43/kernel/time/tick-broadcast.c linux-2.6.32.43/kernel/time/tick-broadcast.c
63167--- linux-2.6.32.43/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
63168+++ linux-2.6.32.43/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
63169@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
63170 * then clear the broadcast bit.
63171 */
63172 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
63173- int cpu = smp_processor_id();
63174+ cpu = smp_processor_id();
63175
63176 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
63177 tick_broadcast_clear_oneshot(cpu);
63178diff -urNp linux-2.6.32.43/kernel/time/timekeeping.c linux-2.6.32.43/kernel/time/timekeeping.c
63179--- linux-2.6.32.43/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
63180+++ linux-2.6.32.43/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
63181@@ -14,6 +14,7 @@
63182 #include <linux/init.h>
63183 #include <linux/mm.h>
63184 #include <linux/sched.h>
63185+#include <linux/grsecurity.h>
63186 #include <linux/sysdev.h>
63187 #include <linux/clocksource.h>
63188 #include <linux/jiffies.h>
63189@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
63190 */
63191 struct timespec ts = xtime;
63192 timespec_add_ns(&ts, nsec);
63193- ACCESS_ONCE(xtime_cache) = ts;
63194+ ACCESS_ONCE_RW(xtime_cache) = ts;
63195 }
63196
63197 /* must hold xtime_lock */
63198@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
63199 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
63200 return -EINVAL;
63201
63202+ gr_log_timechange();
63203+
63204 write_seqlock_irqsave(&xtime_lock, flags);
63205
63206 timekeeping_forward_now();
63207diff -urNp linux-2.6.32.43/kernel/time/timer_list.c linux-2.6.32.43/kernel/time/timer_list.c
63208--- linux-2.6.32.43/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
63209+++ linux-2.6.32.43/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
63210@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
63211
63212 static void print_name_offset(struct seq_file *m, void *sym)
63213 {
63214+#ifdef CONFIG_GRKERNSEC_HIDESYM
63215+ SEQ_printf(m, "<%p>", NULL);
63216+#else
63217 char symname[KSYM_NAME_LEN];
63218
63219 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
63220 SEQ_printf(m, "<%p>", sym);
63221 else
63222 SEQ_printf(m, "%s", symname);
63223+#endif
63224 }
63225
63226 static void
63227@@ -112,7 +116,11 @@ next_one:
63228 static void
63229 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
63230 {
63231+#ifdef CONFIG_GRKERNSEC_HIDESYM
63232+ SEQ_printf(m, " .base: %p\n", NULL);
63233+#else
63234 SEQ_printf(m, " .base: %p\n", base);
63235+#endif
63236 SEQ_printf(m, " .index: %d\n",
63237 base->index);
63238 SEQ_printf(m, " .resolution: %Lu nsecs\n",
63239@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
63240 {
63241 struct proc_dir_entry *pe;
63242
63243+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63244+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
63245+#else
63246 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
63247+#endif
63248 if (!pe)
63249 return -ENOMEM;
63250 return 0;
63251diff -urNp linux-2.6.32.43/kernel/time/timer_stats.c linux-2.6.32.43/kernel/time/timer_stats.c
63252--- linux-2.6.32.43/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
63253+++ linux-2.6.32.43/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
63254@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
63255 static unsigned long nr_entries;
63256 static struct entry entries[MAX_ENTRIES];
63257
63258-static atomic_t overflow_count;
63259+static atomic_unchecked_t overflow_count;
63260
63261 /*
63262 * The entries are in a hash-table, for fast lookup:
63263@@ -140,7 +140,7 @@ static void reset_entries(void)
63264 nr_entries = 0;
63265 memset(entries, 0, sizeof(entries));
63266 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
63267- atomic_set(&overflow_count, 0);
63268+ atomic_set_unchecked(&overflow_count, 0);
63269 }
63270
63271 static struct entry *alloc_entry(void)
63272@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
63273 if (likely(entry))
63274 entry->count++;
63275 else
63276- atomic_inc(&overflow_count);
63277+ atomic_inc_unchecked(&overflow_count);
63278
63279 out_unlock:
63280 spin_unlock_irqrestore(lock, flags);
63281@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
63282
63283 static void print_name_offset(struct seq_file *m, unsigned long addr)
63284 {
63285+#ifdef CONFIG_GRKERNSEC_HIDESYM
63286+ seq_printf(m, "<%p>", NULL);
63287+#else
63288 char symname[KSYM_NAME_LEN];
63289
63290 if (lookup_symbol_name(addr, symname) < 0)
63291 seq_printf(m, "<%p>", (void *)addr);
63292 else
63293 seq_printf(m, "%s", symname);
63294+#endif
63295 }
63296
63297 static int tstats_show(struct seq_file *m, void *v)
63298@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
63299
63300 seq_puts(m, "Timer Stats Version: v0.2\n");
63301 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
63302- if (atomic_read(&overflow_count))
63303+ if (atomic_read_unchecked(&overflow_count))
63304 seq_printf(m, "Overflow: %d entries\n",
63305- atomic_read(&overflow_count));
63306+ atomic_read_unchecked(&overflow_count));
63307
63308 for (i = 0; i < nr_entries; i++) {
63309 entry = entries + i;
63310@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
63311 {
63312 struct proc_dir_entry *pe;
63313
63314+#ifdef CONFIG_GRKERNSEC_PROC_ADD
63315+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
63316+#else
63317 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
63318+#endif
63319 if (!pe)
63320 return -ENOMEM;
63321 return 0;
63322diff -urNp linux-2.6.32.43/kernel/time.c linux-2.6.32.43/kernel/time.c
63323--- linux-2.6.32.43/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
63324+++ linux-2.6.32.43/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
63325@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
63326 return error;
63327
63328 if (tz) {
63329+ /* we log in do_settimeofday called below, so don't log twice
63330+ */
63331+ if (!tv)
63332+ gr_log_timechange();
63333+
63334 /* SMP safe, global irq locking makes it work. */
63335 sys_tz = *tz;
63336 update_vsyscall_tz();
63337@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
63338 * Avoid unnecessary multiplications/divisions in the
63339 * two most common HZ cases:
63340 */
63341-unsigned int inline jiffies_to_msecs(const unsigned long j)
63342+inline unsigned int jiffies_to_msecs(const unsigned long j)
63343 {
63344 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
63345 return (MSEC_PER_SEC / HZ) * j;
63346@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
63347 }
63348 EXPORT_SYMBOL(jiffies_to_msecs);
63349
63350-unsigned int inline jiffies_to_usecs(const unsigned long j)
63351+inline unsigned int jiffies_to_usecs(const unsigned long j)
63352 {
63353 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
63354 return (USEC_PER_SEC / HZ) * j;
63355diff -urNp linux-2.6.32.43/kernel/timer.c linux-2.6.32.43/kernel/timer.c
63356--- linux-2.6.32.43/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
63357+++ linux-2.6.32.43/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
63358@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
63359 /*
63360 * This function runs timers and the timer-tq in bottom half context.
63361 */
63362-static void run_timer_softirq(struct softirq_action *h)
63363+static void run_timer_softirq(void)
63364 {
63365 struct tvec_base *base = __get_cpu_var(tvec_bases);
63366
63367diff -urNp linux-2.6.32.43/kernel/trace/blktrace.c linux-2.6.32.43/kernel/trace/blktrace.c
63368--- linux-2.6.32.43/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
63369+++ linux-2.6.32.43/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
63370@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
63371 struct blk_trace *bt = filp->private_data;
63372 char buf[16];
63373
63374- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
63375+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
63376
63377 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
63378 }
63379@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
63380 return 1;
63381
63382 bt = buf->chan->private_data;
63383- atomic_inc(&bt->dropped);
63384+ atomic_inc_unchecked(&bt->dropped);
63385 return 0;
63386 }
63387
63388@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
63389
63390 bt->dir = dir;
63391 bt->dev = dev;
63392- atomic_set(&bt->dropped, 0);
63393+ atomic_set_unchecked(&bt->dropped, 0);
63394
63395 ret = -EIO;
63396 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
63397diff -urNp linux-2.6.32.43/kernel/trace/ftrace.c linux-2.6.32.43/kernel/trace/ftrace.c
63398--- linux-2.6.32.43/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
63399+++ linux-2.6.32.43/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
63400@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
63401
63402 ip = rec->ip;
63403
63404+ ret = ftrace_arch_code_modify_prepare();
63405+ FTRACE_WARN_ON(ret);
63406+ if (ret)
63407+ return 0;
63408+
63409 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
63410+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
63411 if (ret) {
63412 ftrace_bug(ret, ip);
63413 rec->flags |= FTRACE_FL_FAILED;
63414- return 0;
63415 }
63416- return 1;
63417+ return ret ? 0 : 1;
63418 }
63419
63420 /*
63421diff -urNp linux-2.6.32.43/kernel/trace/ring_buffer.c linux-2.6.32.43/kernel/trace/ring_buffer.c
63422--- linux-2.6.32.43/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
63423+++ linux-2.6.32.43/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
63424@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
63425 * the reader page). But if the next page is a header page,
63426 * its flags will be non zero.
63427 */
63428-static int inline
63429+static inline int
63430 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
63431 struct buffer_page *page, struct list_head *list)
63432 {
63433diff -urNp linux-2.6.32.43/kernel/trace/trace.c linux-2.6.32.43/kernel/trace/trace.c
63434--- linux-2.6.32.43/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
63435+++ linux-2.6.32.43/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
63436@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
63437 size_t rem;
63438 unsigned int i;
63439
63440+ pax_track_stack();
63441+
63442 /* copy the tracer to avoid using a global lock all around */
63443 mutex_lock(&trace_types_lock);
63444 if (unlikely(old_tracer != current_trace && current_trace)) {
63445@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
63446 int entries, size, i;
63447 size_t ret;
63448
63449+ pax_track_stack();
63450+
63451 if (*ppos & (PAGE_SIZE - 1)) {
63452 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
63453 return -EINVAL;
63454@@ -3816,10 +3820,9 @@ static const struct file_operations trac
63455 };
63456 #endif
63457
63458-static struct dentry *d_tracer;
63459-
63460 struct dentry *tracing_init_dentry(void)
63461 {
63462+ static struct dentry *d_tracer;
63463 static int once;
63464
63465 if (d_tracer)
63466@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
63467 return d_tracer;
63468 }
63469
63470-static struct dentry *d_percpu;
63471-
63472 struct dentry *tracing_dentry_percpu(void)
63473 {
63474+ static struct dentry *d_percpu;
63475 static int once;
63476 struct dentry *d_tracer;
63477
63478diff -urNp linux-2.6.32.43/kernel/trace/trace_events.c linux-2.6.32.43/kernel/trace/trace_events.c
63479--- linux-2.6.32.43/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
63480+++ linux-2.6.32.43/kernel/trace/trace_events.c 2011-04-17 15:56:46.000000000 -0400
63481@@ -951,6 +951,8 @@ static LIST_HEAD(ftrace_module_file_list
63482 * Modules must own their file_operations to keep up with
63483 * reference counting.
63484 */
63485+
63486+/* cannot be const */
63487 struct ftrace_module_file_ops {
63488 struct list_head list;
63489 struct module *mod;
63490diff -urNp linux-2.6.32.43/kernel/trace/trace_mmiotrace.c linux-2.6.32.43/kernel/trace/trace_mmiotrace.c
63491--- linux-2.6.32.43/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
63492+++ linux-2.6.32.43/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
63493@@ -23,7 +23,7 @@ struct header_iter {
63494 static struct trace_array *mmio_trace_array;
63495 static bool overrun_detected;
63496 static unsigned long prev_overruns;
63497-static atomic_t dropped_count;
63498+static atomic_unchecked_t dropped_count;
63499
63500 static void mmio_reset_data(struct trace_array *tr)
63501 {
63502@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
63503
63504 static unsigned long count_overruns(struct trace_iterator *iter)
63505 {
63506- unsigned long cnt = atomic_xchg(&dropped_count, 0);
63507+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
63508 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
63509
63510 if (over > prev_overruns)
63511@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
63512 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
63513 sizeof(*entry), 0, pc);
63514 if (!event) {
63515- atomic_inc(&dropped_count);
63516+ atomic_inc_unchecked(&dropped_count);
63517 return;
63518 }
63519 entry = ring_buffer_event_data(event);
63520@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
63521 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
63522 sizeof(*entry), 0, pc);
63523 if (!event) {
63524- atomic_inc(&dropped_count);
63525+ atomic_inc_unchecked(&dropped_count);
63526 return;
63527 }
63528 entry = ring_buffer_event_data(event);
63529diff -urNp linux-2.6.32.43/kernel/trace/trace_output.c linux-2.6.32.43/kernel/trace/trace_output.c
63530--- linux-2.6.32.43/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
63531+++ linux-2.6.32.43/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
63532@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
63533 return 0;
63534 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
63535 if (!IS_ERR(p)) {
63536- p = mangle_path(s->buffer + s->len, p, "\n");
63537+ p = mangle_path(s->buffer + s->len, p, "\n\\");
63538 if (p) {
63539 s->len = p - s->buffer;
63540 return 1;
63541diff -urNp linux-2.6.32.43/kernel/trace/trace_stack.c linux-2.6.32.43/kernel/trace/trace_stack.c
63542--- linux-2.6.32.43/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
63543+++ linux-2.6.32.43/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
63544@@ -50,7 +50,7 @@ static inline void check_stack(void)
63545 return;
63546
63547 /* we do not handle interrupt stacks yet */
63548- if (!object_is_on_stack(&this_size))
63549+ if (!object_starts_on_stack(&this_size))
63550 return;
63551
63552 local_irq_save(flags);
63553diff -urNp linux-2.6.32.43/kernel/trace/trace_workqueue.c linux-2.6.32.43/kernel/trace/trace_workqueue.c
63554--- linux-2.6.32.43/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
63555+++ linux-2.6.32.43/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
63556@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
63557 int cpu;
63558 pid_t pid;
63559 /* Can be inserted from interrupt or user context, need to be atomic */
63560- atomic_t inserted;
63561+ atomic_unchecked_t inserted;
63562 /*
63563 * Don't need to be atomic, works are serialized in a single workqueue thread
63564 * on a single CPU.
63565@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
63566 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
63567 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
63568 if (node->pid == wq_thread->pid) {
63569- atomic_inc(&node->inserted);
63570+ atomic_inc_unchecked(&node->inserted);
63571 goto found;
63572 }
63573 }
63574@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
63575 tsk = get_pid_task(pid, PIDTYPE_PID);
63576 if (tsk) {
63577 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
63578- atomic_read(&cws->inserted), cws->executed,
63579+ atomic_read_unchecked(&cws->inserted), cws->executed,
63580 tsk->comm);
63581 put_task_struct(tsk);
63582 }
63583diff -urNp linux-2.6.32.43/kernel/user.c linux-2.6.32.43/kernel/user.c
63584--- linux-2.6.32.43/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
63585+++ linux-2.6.32.43/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
63586@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
63587 spin_lock_irq(&uidhash_lock);
63588 up = uid_hash_find(uid, hashent);
63589 if (up) {
63590+ put_user_ns(ns);
63591 key_put(new->uid_keyring);
63592 key_put(new->session_keyring);
63593 kmem_cache_free(uid_cachep, new);
63594diff -urNp linux-2.6.32.43/lib/bug.c linux-2.6.32.43/lib/bug.c
63595--- linux-2.6.32.43/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
63596+++ linux-2.6.32.43/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
63597@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
63598 return BUG_TRAP_TYPE_NONE;
63599
63600 bug = find_bug(bugaddr);
63601+ if (!bug)
63602+ return BUG_TRAP_TYPE_NONE;
63603
63604 printk(KERN_EMERG "------------[ cut here ]------------\n");
63605
63606diff -urNp linux-2.6.32.43/lib/debugobjects.c linux-2.6.32.43/lib/debugobjects.c
63607--- linux-2.6.32.43/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
63608+++ linux-2.6.32.43/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
63609@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
63610 if (limit > 4)
63611 return;
63612
63613- is_on_stack = object_is_on_stack(addr);
63614+ is_on_stack = object_starts_on_stack(addr);
63615 if (is_on_stack == onstack)
63616 return;
63617
63618diff -urNp linux-2.6.32.43/lib/dma-debug.c linux-2.6.32.43/lib/dma-debug.c
63619--- linux-2.6.32.43/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
63620+++ linux-2.6.32.43/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
63621@@ -861,7 +861,7 @@ out:
63622
63623 static void check_for_stack(struct device *dev, void *addr)
63624 {
63625- if (object_is_on_stack(addr))
63626+ if (object_starts_on_stack(addr))
63627 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
63628 "stack [addr=%p]\n", addr);
63629 }
63630diff -urNp linux-2.6.32.43/lib/idr.c linux-2.6.32.43/lib/idr.c
63631--- linux-2.6.32.43/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
63632+++ linux-2.6.32.43/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
63633@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
63634 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
63635
63636 /* if already at the top layer, we need to grow */
63637- if (id >= 1 << (idp->layers * IDR_BITS)) {
63638+ if (id >= (1 << (idp->layers * IDR_BITS))) {
63639 *starting_id = id;
63640 return IDR_NEED_TO_GROW;
63641 }
63642diff -urNp linux-2.6.32.43/lib/inflate.c linux-2.6.32.43/lib/inflate.c
63643--- linux-2.6.32.43/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
63644+++ linux-2.6.32.43/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
63645@@ -266,7 +266,7 @@ static void free(void *where)
63646 malloc_ptr = free_mem_ptr;
63647 }
63648 #else
63649-#define malloc(a) kmalloc(a, GFP_KERNEL)
63650+#define malloc(a) kmalloc((a), GFP_KERNEL)
63651 #define free(a) kfree(a)
63652 #endif
63653
63654diff -urNp linux-2.6.32.43/lib/Kconfig.debug linux-2.6.32.43/lib/Kconfig.debug
63655--- linux-2.6.32.43/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
63656+++ linux-2.6.32.43/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
63657@@ -905,7 +905,7 @@ config LATENCYTOP
63658 select STACKTRACE
63659 select SCHEDSTATS
63660 select SCHED_DEBUG
63661- depends on HAVE_LATENCYTOP_SUPPORT
63662+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
63663 help
63664 Enable this option if you want to use the LatencyTOP tool
63665 to find out which userspace is blocking on what kernel operations.
63666diff -urNp linux-2.6.32.43/lib/kobject.c linux-2.6.32.43/lib/kobject.c
63667--- linux-2.6.32.43/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
63668+++ linux-2.6.32.43/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
63669@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
63670 return ret;
63671 }
63672
63673-struct sysfs_ops kobj_sysfs_ops = {
63674+const struct sysfs_ops kobj_sysfs_ops = {
63675 .show = kobj_attr_show,
63676 .store = kobj_attr_store,
63677 };
63678@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
63679 * If the kset was not able to be created, NULL will be returned.
63680 */
63681 static struct kset *kset_create(const char *name,
63682- struct kset_uevent_ops *uevent_ops,
63683+ const struct kset_uevent_ops *uevent_ops,
63684 struct kobject *parent_kobj)
63685 {
63686 struct kset *kset;
63687@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
63688 * If the kset was not able to be created, NULL will be returned.
63689 */
63690 struct kset *kset_create_and_add(const char *name,
63691- struct kset_uevent_ops *uevent_ops,
63692+ const struct kset_uevent_ops *uevent_ops,
63693 struct kobject *parent_kobj)
63694 {
63695 struct kset *kset;
63696diff -urNp linux-2.6.32.43/lib/kobject_uevent.c linux-2.6.32.43/lib/kobject_uevent.c
63697--- linux-2.6.32.43/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
63698+++ linux-2.6.32.43/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
63699@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
63700 const char *subsystem;
63701 struct kobject *top_kobj;
63702 struct kset *kset;
63703- struct kset_uevent_ops *uevent_ops;
63704+ const struct kset_uevent_ops *uevent_ops;
63705 u64 seq;
63706 int i = 0;
63707 int retval = 0;
63708diff -urNp linux-2.6.32.43/lib/kref.c linux-2.6.32.43/lib/kref.c
63709--- linux-2.6.32.43/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
63710+++ linux-2.6.32.43/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
63711@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
63712 */
63713 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
63714 {
63715- WARN_ON(release == NULL);
63716+ BUG_ON(release == NULL);
63717 WARN_ON(release == (void (*)(struct kref *))kfree);
63718
63719 if (atomic_dec_and_test(&kref->refcount)) {
63720diff -urNp linux-2.6.32.43/lib/parser.c linux-2.6.32.43/lib/parser.c
63721--- linux-2.6.32.43/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
63722+++ linux-2.6.32.43/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
63723@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
63724 char *buf;
63725 int ret;
63726
63727- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
63728+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
63729 if (!buf)
63730 return -ENOMEM;
63731 memcpy(buf, s->from, s->to - s->from);
63732diff -urNp linux-2.6.32.43/lib/radix-tree.c linux-2.6.32.43/lib/radix-tree.c
63733--- linux-2.6.32.43/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
63734+++ linux-2.6.32.43/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
63735@@ -81,7 +81,7 @@ struct radix_tree_preload {
63736 int nr;
63737 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
63738 };
63739-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
63740+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
63741
63742 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
63743 {
63744diff -urNp linux-2.6.32.43/lib/random32.c linux-2.6.32.43/lib/random32.c
63745--- linux-2.6.32.43/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
63746+++ linux-2.6.32.43/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
63747@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
63748 */
63749 static inline u32 __seed(u32 x, u32 m)
63750 {
63751- return (x < m) ? x + m : x;
63752+ return (x <= m) ? x + m + 1 : x;
63753 }
63754
63755 /**
63756diff -urNp linux-2.6.32.43/lib/vsprintf.c linux-2.6.32.43/lib/vsprintf.c
63757--- linux-2.6.32.43/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
63758+++ linux-2.6.32.43/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
63759@@ -16,6 +16,9 @@
63760 * - scnprintf and vscnprintf
63761 */
63762
63763+#ifdef CONFIG_GRKERNSEC_HIDESYM
63764+#define __INCLUDED_BY_HIDESYM 1
63765+#endif
63766 #include <stdarg.h>
63767 #include <linux/module.h>
63768 #include <linux/types.h>
63769@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
63770 return buf;
63771 }
63772
63773-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
63774+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
63775 {
63776 int len, i;
63777
63778 if ((unsigned long)s < PAGE_SIZE)
63779- s = "<NULL>";
63780+ s = "(null)";
63781
63782 len = strnlen(s, spec.precision);
63783
63784@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
63785 unsigned long value = (unsigned long) ptr;
63786 #ifdef CONFIG_KALLSYMS
63787 char sym[KSYM_SYMBOL_LEN];
63788- if (ext != 'f' && ext != 's')
63789+ if (ext != 'f' && ext != 's' && ext != 'a')
63790 sprint_symbol(sym, value);
63791 else
63792 kallsyms_lookup(value, NULL, NULL, NULL, sym);
63793@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
63794 * - 'f' For simple symbolic function names without offset
63795 * - 'S' For symbolic direct pointers with offset
63796 * - 's' For symbolic direct pointers without offset
63797+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
63798+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
63799 * - 'R' For a struct resource pointer, it prints the range of
63800 * addresses (not the name nor the flags)
63801 * - 'M' For a 6-byte MAC address, it prints the address in the
63802@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
63803 struct printf_spec spec)
63804 {
63805 if (!ptr)
63806- return string(buf, end, "(null)", spec);
63807+ return string(buf, end, "(nil)", spec);
63808
63809 switch (*fmt) {
63810 case 'F':
63811@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
63812 case 's':
63813 /* Fallthrough */
63814 case 'S':
63815+#ifdef CONFIG_GRKERNSEC_HIDESYM
63816+ break;
63817+#else
63818+ return symbol_string(buf, end, ptr, spec, *fmt);
63819+#endif
63820+ case 'a':
63821+ /* Fallthrough */
63822+ case 'A':
63823 return symbol_string(buf, end, ptr, spec, *fmt);
63824 case 'R':
63825 return resource_string(buf, end, ptr, spec);
63826@@ -1445,7 +1458,7 @@ do { \
63827 size_t len;
63828 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
63829 || (unsigned long)save_str < PAGE_SIZE)
63830- save_str = "<NULL>";
63831+ save_str = "(null)";
63832 len = strlen(save_str);
63833 if (str + len + 1 < end)
63834 memcpy(str, save_str, len + 1);
63835@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
63836 typeof(type) value; \
63837 if (sizeof(type) == 8) { \
63838 args = PTR_ALIGN(args, sizeof(u32)); \
63839- *(u32 *)&value = *(u32 *)args; \
63840- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
63841+ *(u32 *)&value = *(const u32 *)args; \
63842+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
63843 } else { \
63844 args = PTR_ALIGN(args, sizeof(type)); \
63845- value = *(typeof(type) *)args; \
63846+ value = *(const typeof(type) *)args; \
63847 } \
63848 args += sizeof(type); \
63849 value; \
63850@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
63851 const char *str_arg = args;
63852 size_t len = strlen(str_arg);
63853 args += len + 1;
63854- str = string(str, end, (char *)str_arg, spec);
63855+ str = string(str, end, str_arg, spec);
63856 break;
63857 }
63858
63859diff -urNp linux-2.6.32.43/localversion-grsec linux-2.6.32.43/localversion-grsec
63860--- linux-2.6.32.43/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
63861+++ linux-2.6.32.43/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
63862@@ -0,0 +1 @@
63863+-grsec
63864diff -urNp linux-2.6.32.43/Makefile linux-2.6.32.43/Makefile
63865--- linux-2.6.32.43/Makefile 2011-07-13 17:23:04.000000000 -0400
63866+++ linux-2.6.32.43/Makefile 2011-07-13 17:23:18.000000000 -0400
63867@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
63868
63869 HOSTCC = gcc
63870 HOSTCXX = g++
63871-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
63872-HOSTCXXFLAGS = -O2
63873+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
63874+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
63875+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
63876
63877 # Decide whether to build built-in, modular, or both.
63878 # Normally, just do built-in.
63879@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
63880 KBUILD_CPPFLAGS := -D__KERNEL__
63881
63882 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
63883+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
63884 -fno-strict-aliasing -fno-common \
63885 -Werror-implicit-function-declaration \
63886 -Wno-format-security \
63887 -fno-delete-null-pointer-checks
63888+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
63889 KBUILD_AFLAGS := -D__ASSEMBLY__
63890
63891 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
63892@@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
63893 # Rules shared between *config targets and build targets
63894
63895 # Basic helpers built in scripts/
63896-PHONY += scripts_basic
63897-scripts_basic:
63898+PHONY += scripts_basic pax-plugin
63899+scripts_basic: pax-plugin
63900 $(Q)$(MAKE) $(build)=scripts/basic
63901
63902 # To avoid any implicit rule to kick in, define an empty command.
63903@@ -403,7 +406,7 @@ endif
63904 # of make so .config is not included in this case either (for *config).
63905
63906 no-dot-config-targets := clean mrproper distclean \
63907- cscope TAGS tags help %docs check% \
63908+ cscope gtags TAGS tags help %docs check% \
63909 include/linux/version.h headers_% \
63910 kernelrelease kernelversion
63911
63912@@ -528,6 +531,18 @@ endif
63913
63914 include $(srctree)/arch/$(SRCARCH)/Makefile
63915
63916+ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63917+KBUILD_CFLAGS += $(call cc-ifversion, -ge, 0405, -fplugin=$(objtree)/tools/gcc/pax_plugin.so -fplugin-arg-pax_plugin-track-lowest-sp=100)
63918+endif
63919+pax-plugin:
63920+ifneq (,$(findstring pax_plugin, $(KBUILD_CFLAGS)))
63921+ $(Q)$(MAKE) $(build)=tools/gcc
63922+else
63923+ifeq ($(CONFIG_PAX_MEMORY_STACKLEAK),y)
63924+ $(Q)echo "warning, your gcc does not support plugins, PAX_MEMORY_STACKLEAK will be less secure"
63925+endif
63926+endif
63927+
63928 ifneq ($(CONFIG_FRAME_WARN),0)
63929 KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
63930 endif
63931@@ -644,7 +659,7 @@ export mod_strip_cmd
63932
63933
63934 ifeq ($(KBUILD_EXTMOD),)
63935-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
63936+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
63937
63938 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
63939 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
63940@@ -970,7 +985,7 @@ ifneq ($(KBUILD_SRC),)
63941 endif
63942
63943 # prepare2 creates a makefile if using a separate output directory
63944-prepare2: prepare3 outputmakefile
63945+prepare2: prepare3 outputmakefile pax-plugin
63946
63947 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
63948 include/asm include/config/auto.conf
63949@@ -1198,7 +1213,7 @@ MRPROPER_FILES += .config .config.old in
63950 include/linux/autoconf.h include/linux/version.h \
63951 include/linux/utsrelease.h \
63952 include/linux/bounds.h include/asm*/asm-offsets.h \
63953- Module.symvers Module.markers tags TAGS cscope*
63954+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
63955
63956 # clean - Delete most, but leave enough to build external modules
63957 #
63958@@ -1289,6 +1304,7 @@ help:
63959 @echo ' modules_prepare - Set up for building external modules'
63960 @echo ' tags/TAGS - Generate tags file for editors'
63961 @echo ' cscope - Generate cscope index'
63962+ @echo ' gtags - Generate GNU GLOBAL index'
63963 @echo ' kernelrelease - Output the release version string'
63964 @echo ' kernelversion - Output the version stored in Makefile'
63965 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
63966@@ -1421,7 +1437,7 @@ clean: $(clean-dirs)
63967 $(call cmd,rmdirs)
63968 $(call cmd,rmfiles)
63969 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
63970- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
63971+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
63972 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
63973 -o -name '*.gcno' \) -type f -print | xargs rm -f
63974
63975@@ -1445,7 +1461,7 @@ endif # KBUILD_EXTMOD
63976 quiet_cmd_tags = GEN $@
63977 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
63978
63979-tags TAGS cscope: FORCE
63980+tags TAGS cscope gtags: FORCE
63981 $(call cmd,tags)
63982
63983 # Scripts to check various things for consistency
63984diff -urNp linux-2.6.32.43/mm/backing-dev.c linux-2.6.32.43/mm/backing-dev.c
63985--- linux-2.6.32.43/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
63986+++ linux-2.6.32.43/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
63987@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
63988 * Add the default flusher task that gets created for any bdi
63989 * that has dirty data pending writeout
63990 */
63991-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63992+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
63993 {
63994 if (!bdi_cap_writeback_dirty(bdi))
63995 return;
63996diff -urNp linux-2.6.32.43/mm/filemap.c linux-2.6.32.43/mm/filemap.c
63997--- linux-2.6.32.43/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
63998+++ linux-2.6.32.43/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
63999@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
64000 struct address_space *mapping = file->f_mapping;
64001
64002 if (!mapping->a_ops->readpage)
64003- return -ENOEXEC;
64004+ return -ENODEV;
64005 file_accessed(file);
64006 vma->vm_ops = &generic_file_vm_ops;
64007 vma->vm_flags |= VM_CAN_NONLINEAR;
64008@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
64009 *pos = i_size_read(inode);
64010
64011 if (limit != RLIM_INFINITY) {
64012+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
64013 if (*pos >= limit) {
64014 send_sig(SIGXFSZ, current, 0);
64015 return -EFBIG;
64016diff -urNp linux-2.6.32.43/mm/fremap.c linux-2.6.32.43/mm/fremap.c
64017--- linux-2.6.32.43/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
64018+++ linux-2.6.32.43/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
64019@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
64020 retry:
64021 vma = find_vma(mm, start);
64022
64023+#ifdef CONFIG_PAX_SEGMEXEC
64024+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
64025+ goto out;
64026+#endif
64027+
64028 /*
64029 * Make sure the vma is shared, that it supports prefaulting,
64030 * and that the remapped range is valid and fully within
64031@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
64032 /*
64033 * drop PG_Mlocked flag for over-mapped range
64034 */
64035- unsigned int saved_flags = vma->vm_flags;
64036+ unsigned long saved_flags = vma->vm_flags;
64037 munlock_vma_pages_range(vma, start, start + size);
64038 vma->vm_flags = saved_flags;
64039 }
64040diff -urNp linux-2.6.32.43/mm/highmem.c linux-2.6.32.43/mm/highmem.c
64041--- linux-2.6.32.43/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
64042+++ linux-2.6.32.43/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
64043@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
64044 * So no dangers, even with speculative execution.
64045 */
64046 page = pte_page(pkmap_page_table[i]);
64047+ pax_open_kernel();
64048 pte_clear(&init_mm, (unsigned long)page_address(page),
64049 &pkmap_page_table[i]);
64050-
64051+ pax_close_kernel();
64052 set_page_address(page, NULL);
64053 need_flush = 1;
64054 }
64055@@ -177,9 +178,11 @@ start:
64056 }
64057 }
64058 vaddr = PKMAP_ADDR(last_pkmap_nr);
64059+
64060+ pax_open_kernel();
64061 set_pte_at(&init_mm, vaddr,
64062 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
64063-
64064+ pax_close_kernel();
64065 pkmap_count[last_pkmap_nr] = 1;
64066 set_page_address(page, (void *)vaddr);
64067
64068diff -urNp linux-2.6.32.43/mm/hugetlb.c linux-2.6.32.43/mm/hugetlb.c
64069--- linux-2.6.32.43/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
64070+++ linux-2.6.32.43/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
64071@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
64072 return 1;
64073 }
64074
64075+#ifdef CONFIG_PAX_SEGMEXEC
64076+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
64077+{
64078+ struct mm_struct *mm = vma->vm_mm;
64079+ struct vm_area_struct *vma_m;
64080+ unsigned long address_m;
64081+ pte_t *ptep_m;
64082+
64083+ vma_m = pax_find_mirror_vma(vma);
64084+ if (!vma_m)
64085+ return;
64086+
64087+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64088+ address_m = address + SEGMEXEC_TASK_SIZE;
64089+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
64090+ get_page(page_m);
64091+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
64092+}
64093+#endif
64094+
64095 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
64096 unsigned long address, pte_t *ptep, pte_t pte,
64097 struct page *pagecache_page)
64098@@ -2004,6 +2024,11 @@ retry_avoidcopy:
64099 huge_ptep_clear_flush(vma, address, ptep);
64100 set_huge_pte_at(mm, address, ptep,
64101 make_huge_pte(vma, new_page, 1));
64102+
64103+#ifdef CONFIG_PAX_SEGMEXEC
64104+ pax_mirror_huge_pte(vma, address, new_page);
64105+#endif
64106+
64107 /* Make the old page be freed below */
64108 new_page = old_page;
64109 }
64110@@ -2135,6 +2160,10 @@ retry:
64111 && (vma->vm_flags & VM_SHARED)));
64112 set_huge_pte_at(mm, address, ptep, new_pte);
64113
64114+#ifdef CONFIG_PAX_SEGMEXEC
64115+ pax_mirror_huge_pte(vma, address, page);
64116+#endif
64117+
64118 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
64119 /* Optimization, do the COW without a second fault */
64120 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
64121@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
64122 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
64123 struct hstate *h = hstate_vma(vma);
64124
64125+#ifdef CONFIG_PAX_SEGMEXEC
64126+ struct vm_area_struct *vma_m;
64127+
64128+ vma_m = pax_find_mirror_vma(vma);
64129+ if (vma_m) {
64130+ unsigned long address_m;
64131+
64132+ if (vma->vm_start > vma_m->vm_start) {
64133+ address_m = address;
64134+ address -= SEGMEXEC_TASK_SIZE;
64135+ vma = vma_m;
64136+ h = hstate_vma(vma);
64137+ } else
64138+ address_m = address + SEGMEXEC_TASK_SIZE;
64139+
64140+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
64141+ return VM_FAULT_OOM;
64142+ address_m &= HPAGE_MASK;
64143+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
64144+ }
64145+#endif
64146+
64147 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
64148 if (!ptep)
64149 return VM_FAULT_OOM;
64150diff -urNp linux-2.6.32.43/mm/internal.h linux-2.6.32.43/mm/internal.h
64151--- linux-2.6.32.43/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
64152+++ linux-2.6.32.43/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
64153@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
64154 * in mm/page_alloc.c
64155 */
64156 extern void __free_pages_bootmem(struct page *page, unsigned int order);
64157+extern void free_compound_page(struct page *page);
64158 extern void prep_compound_page(struct page *page, unsigned long order);
64159
64160
64161diff -urNp linux-2.6.32.43/mm/Kconfig linux-2.6.32.43/mm/Kconfig
64162--- linux-2.6.32.43/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
64163+++ linux-2.6.32.43/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
64164@@ -228,7 +228,7 @@ config KSM
64165 config DEFAULT_MMAP_MIN_ADDR
64166 int "Low address space to protect from user allocation"
64167 depends on MMU
64168- default 4096
64169+ default 65536
64170 help
64171 This is the portion of low virtual memory which should be protected
64172 from userspace allocation. Keeping a user from writing to low pages
64173diff -urNp linux-2.6.32.43/mm/kmemleak.c linux-2.6.32.43/mm/kmemleak.c
64174--- linux-2.6.32.43/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
64175+++ linux-2.6.32.43/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
64176@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
64177
64178 for (i = 0; i < object->trace_len; i++) {
64179 void *ptr = (void *)object->trace[i];
64180- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
64181+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
64182 }
64183 }
64184
64185diff -urNp linux-2.6.32.43/mm/maccess.c linux-2.6.32.43/mm/maccess.c
64186--- linux-2.6.32.43/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
64187+++ linux-2.6.32.43/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
64188@@ -14,7 +14,7 @@
64189 * Safely read from address @src to the buffer at @dst. If a kernel fault
64190 * happens, handle that and return -EFAULT.
64191 */
64192-long probe_kernel_read(void *dst, void *src, size_t size)
64193+long probe_kernel_read(void *dst, const void *src, size_t size)
64194 {
64195 long ret;
64196 mm_segment_t old_fs = get_fs();
64197@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
64198 * Safely write to address @dst from the buffer at @src. If a kernel fault
64199 * happens, handle that and return -EFAULT.
64200 */
64201-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
64202+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
64203 {
64204 long ret;
64205 mm_segment_t old_fs = get_fs();
64206diff -urNp linux-2.6.32.43/mm/madvise.c linux-2.6.32.43/mm/madvise.c
64207--- linux-2.6.32.43/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
64208+++ linux-2.6.32.43/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
64209@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
64210 pgoff_t pgoff;
64211 unsigned long new_flags = vma->vm_flags;
64212
64213+#ifdef CONFIG_PAX_SEGMEXEC
64214+ struct vm_area_struct *vma_m;
64215+#endif
64216+
64217 switch (behavior) {
64218 case MADV_NORMAL:
64219 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
64220@@ -103,6 +107,13 @@ success:
64221 /*
64222 * vm_flags is protected by the mmap_sem held in write mode.
64223 */
64224+
64225+#ifdef CONFIG_PAX_SEGMEXEC
64226+ vma_m = pax_find_mirror_vma(vma);
64227+ if (vma_m)
64228+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
64229+#endif
64230+
64231 vma->vm_flags = new_flags;
64232
64233 out:
64234@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
64235 struct vm_area_struct ** prev,
64236 unsigned long start, unsigned long end)
64237 {
64238+
64239+#ifdef CONFIG_PAX_SEGMEXEC
64240+ struct vm_area_struct *vma_m;
64241+#endif
64242+
64243 *prev = vma;
64244 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
64245 return -EINVAL;
64246@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
64247 zap_page_range(vma, start, end - start, &details);
64248 } else
64249 zap_page_range(vma, start, end - start, NULL);
64250+
64251+#ifdef CONFIG_PAX_SEGMEXEC
64252+ vma_m = pax_find_mirror_vma(vma);
64253+ if (vma_m) {
64254+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
64255+ struct zap_details details = {
64256+ .nonlinear_vma = vma_m,
64257+ .last_index = ULONG_MAX,
64258+ };
64259+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
64260+ } else
64261+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
64262+ }
64263+#endif
64264+
64265 return 0;
64266 }
64267
64268@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
64269 if (end < start)
64270 goto out;
64271
64272+#ifdef CONFIG_PAX_SEGMEXEC
64273+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
64274+ if (end > SEGMEXEC_TASK_SIZE)
64275+ goto out;
64276+ } else
64277+#endif
64278+
64279+ if (end > TASK_SIZE)
64280+ goto out;
64281+
64282 error = 0;
64283 if (end == start)
64284 goto out;
64285diff -urNp linux-2.6.32.43/mm/memory.c linux-2.6.32.43/mm/memory.c
64286--- linux-2.6.32.43/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
64287+++ linux-2.6.32.43/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
64288@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
64289 return;
64290
64291 pmd = pmd_offset(pud, start);
64292+
64293+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
64294 pud_clear(pud);
64295 pmd_free_tlb(tlb, pmd, start);
64296+#endif
64297+
64298 }
64299
64300 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
64301@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
64302 if (end - 1 > ceiling - 1)
64303 return;
64304
64305+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
64306 pud = pud_offset(pgd, start);
64307 pgd_clear(pgd);
64308 pud_free_tlb(tlb, pud, start);
64309+#endif
64310+
64311 }
64312
64313 /*
64314@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
64315 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
64316 i = 0;
64317
64318- do {
64319+ while (nr_pages) {
64320 struct vm_area_struct *vma;
64321
64322- vma = find_extend_vma(mm, start);
64323+ vma = find_vma(mm, start);
64324 if (!vma && in_gate_area(tsk, start)) {
64325 unsigned long pg = start & PAGE_MASK;
64326 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
64327@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
64328 continue;
64329 }
64330
64331- if (!vma ||
64332+ if (!vma || start < vma->vm_start ||
64333 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
64334 !(vm_flags & vma->vm_flags))
64335 return i ? : -EFAULT;
64336@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
64337 start += PAGE_SIZE;
64338 nr_pages--;
64339 } while (nr_pages && start < vma->vm_end);
64340- } while (nr_pages);
64341+ }
64342 return i;
64343 }
64344
64345@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
64346 page_add_file_rmap(page);
64347 set_pte_at(mm, addr, pte, mk_pte(page, prot));
64348
64349+#ifdef CONFIG_PAX_SEGMEXEC
64350+ pax_mirror_file_pte(vma, addr, page, ptl);
64351+#endif
64352+
64353 retval = 0;
64354 pte_unmap_unlock(pte, ptl);
64355 return retval;
64356@@ -1560,10 +1571,22 @@ out:
64357 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
64358 struct page *page)
64359 {
64360+
64361+#ifdef CONFIG_PAX_SEGMEXEC
64362+ struct vm_area_struct *vma_m;
64363+#endif
64364+
64365 if (addr < vma->vm_start || addr >= vma->vm_end)
64366 return -EFAULT;
64367 if (!page_count(page))
64368 return -EINVAL;
64369+
64370+#ifdef CONFIG_PAX_SEGMEXEC
64371+ vma_m = pax_find_mirror_vma(vma);
64372+ if (vma_m)
64373+ vma_m->vm_flags |= VM_INSERTPAGE;
64374+#endif
64375+
64376 vma->vm_flags |= VM_INSERTPAGE;
64377 return insert_page(vma, addr, page, vma->vm_page_prot);
64378 }
64379@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
64380 unsigned long pfn)
64381 {
64382 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
64383+ BUG_ON(vma->vm_mirror);
64384
64385 if (addr < vma->vm_start || addr >= vma->vm_end)
64386 return -EFAULT;
64387@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
64388 copy_user_highpage(dst, src, va, vma);
64389 }
64390
64391+#ifdef CONFIG_PAX_SEGMEXEC
64392+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
64393+{
64394+ struct mm_struct *mm = vma->vm_mm;
64395+ spinlock_t *ptl;
64396+ pte_t *pte, entry;
64397+
64398+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
64399+ entry = *pte;
64400+ if (!pte_present(entry)) {
64401+ if (!pte_none(entry)) {
64402+ BUG_ON(pte_file(entry));
64403+ free_swap_and_cache(pte_to_swp_entry(entry));
64404+ pte_clear_not_present_full(mm, address, pte, 0);
64405+ }
64406+ } else {
64407+ struct page *page;
64408+
64409+ flush_cache_page(vma, address, pte_pfn(entry));
64410+ entry = ptep_clear_flush(vma, address, pte);
64411+ BUG_ON(pte_dirty(entry));
64412+ page = vm_normal_page(vma, address, entry);
64413+ if (page) {
64414+ update_hiwater_rss(mm);
64415+ if (PageAnon(page))
64416+ dec_mm_counter(mm, anon_rss);
64417+ else
64418+ dec_mm_counter(mm, file_rss);
64419+ page_remove_rmap(page);
64420+ page_cache_release(page);
64421+ }
64422+ }
64423+ pte_unmap_unlock(pte, ptl);
64424+}
64425+
64426+/* PaX: if vma is mirrored, synchronize the mirror's PTE
64427+ *
64428+ * the ptl of the lower mapped page is held on entry and is not released on exit
64429+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
64430+ */
64431+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64432+{
64433+ struct mm_struct *mm = vma->vm_mm;
64434+ unsigned long address_m;
64435+ spinlock_t *ptl_m;
64436+ struct vm_area_struct *vma_m;
64437+ pmd_t *pmd_m;
64438+ pte_t *pte_m, entry_m;
64439+
64440+ BUG_ON(!page_m || !PageAnon(page_m));
64441+
64442+ vma_m = pax_find_mirror_vma(vma);
64443+ if (!vma_m)
64444+ return;
64445+
64446+ BUG_ON(!PageLocked(page_m));
64447+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64448+ address_m = address + SEGMEXEC_TASK_SIZE;
64449+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64450+ pte_m = pte_offset_map_nested(pmd_m, address_m);
64451+ ptl_m = pte_lockptr(mm, pmd_m);
64452+ if (ptl != ptl_m) {
64453+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64454+ if (!pte_none(*pte_m))
64455+ goto out;
64456+ }
64457+
64458+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64459+ page_cache_get(page_m);
64460+ page_add_anon_rmap(page_m, vma_m, address_m);
64461+ inc_mm_counter(mm, anon_rss);
64462+ set_pte_at(mm, address_m, pte_m, entry_m);
64463+ update_mmu_cache(vma_m, address_m, entry_m);
64464+out:
64465+ if (ptl != ptl_m)
64466+ spin_unlock(ptl_m);
64467+ pte_unmap_nested(pte_m);
64468+ unlock_page(page_m);
64469+}
64470+
64471+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
64472+{
64473+ struct mm_struct *mm = vma->vm_mm;
64474+ unsigned long address_m;
64475+ spinlock_t *ptl_m;
64476+ struct vm_area_struct *vma_m;
64477+ pmd_t *pmd_m;
64478+ pte_t *pte_m, entry_m;
64479+
64480+ BUG_ON(!page_m || PageAnon(page_m));
64481+
64482+ vma_m = pax_find_mirror_vma(vma);
64483+ if (!vma_m)
64484+ return;
64485+
64486+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64487+ address_m = address + SEGMEXEC_TASK_SIZE;
64488+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64489+ pte_m = pte_offset_map_nested(pmd_m, address_m);
64490+ ptl_m = pte_lockptr(mm, pmd_m);
64491+ if (ptl != ptl_m) {
64492+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64493+ if (!pte_none(*pte_m))
64494+ goto out;
64495+ }
64496+
64497+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
64498+ page_cache_get(page_m);
64499+ page_add_file_rmap(page_m);
64500+ inc_mm_counter(mm, file_rss);
64501+ set_pte_at(mm, address_m, pte_m, entry_m);
64502+ update_mmu_cache(vma_m, address_m, entry_m);
64503+out:
64504+ if (ptl != ptl_m)
64505+ spin_unlock(ptl_m);
64506+ pte_unmap_nested(pte_m);
64507+}
64508+
64509+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
64510+{
64511+ struct mm_struct *mm = vma->vm_mm;
64512+ unsigned long address_m;
64513+ spinlock_t *ptl_m;
64514+ struct vm_area_struct *vma_m;
64515+ pmd_t *pmd_m;
64516+ pte_t *pte_m, entry_m;
64517+
64518+ vma_m = pax_find_mirror_vma(vma);
64519+ if (!vma_m)
64520+ return;
64521+
64522+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
64523+ address_m = address + SEGMEXEC_TASK_SIZE;
64524+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
64525+ pte_m = pte_offset_map_nested(pmd_m, address_m);
64526+ ptl_m = pte_lockptr(mm, pmd_m);
64527+ if (ptl != ptl_m) {
64528+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
64529+ if (!pte_none(*pte_m))
64530+ goto out;
64531+ }
64532+
64533+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
64534+ set_pte_at(mm, address_m, pte_m, entry_m);
64535+out:
64536+ if (ptl != ptl_m)
64537+ spin_unlock(ptl_m);
64538+ pte_unmap_nested(pte_m);
64539+}
64540+
64541+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
64542+{
64543+ struct page *page_m;
64544+ pte_t entry;
64545+
64546+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
64547+ goto out;
64548+
64549+ entry = *pte;
64550+ page_m = vm_normal_page(vma, address, entry);
64551+ if (!page_m)
64552+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
64553+ else if (PageAnon(page_m)) {
64554+ if (pax_find_mirror_vma(vma)) {
64555+ pte_unmap_unlock(pte, ptl);
64556+ lock_page(page_m);
64557+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
64558+ if (pte_same(entry, *pte))
64559+ pax_mirror_anon_pte(vma, address, page_m, ptl);
64560+ else
64561+ unlock_page(page_m);
64562+ }
64563+ } else
64564+ pax_mirror_file_pte(vma, address, page_m, ptl);
64565+
64566+out:
64567+ pte_unmap_unlock(pte, ptl);
64568+}
64569+#endif
64570+
64571 /*
64572 * This routine handles present pages, when users try to write
64573 * to a shared page. It is done by copying the page to a new address
64574@@ -2156,6 +2360,12 @@ gotten:
64575 */
64576 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64577 if (likely(pte_same(*page_table, orig_pte))) {
64578+
64579+#ifdef CONFIG_PAX_SEGMEXEC
64580+ if (pax_find_mirror_vma(vma))
64581+ BUG_ON(!trylock_page(new_page));
64582+#endif
64583+
64584 if (old_page) {
64585 if (!PageAnon(old_page)) {
64586 dec_mm_counter(mm, file_rss);
64587@@ -2207,6 +2417,10 @@ gotten:
64588 page_remove_rmap(old_page);
64589 }
64590
64591+#ifdef CONFIG_PAX_SEGMEXEC
64592+ pax_mirror_anon_pte(vma, address, new_page, ptl);
64593+#endif
64594+
64595 /* Free the old page.. */
64596 new_page = old_page;
64597 ret |= VM_FAULT_WRITE;
64598@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
64599 swap_free(entry);
64600 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
64601 try_to_free_swap(page);
64602+
64603+#ifdef CONFIG_PAX_SEGMEXEC
64604+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
64605+#endif
64606+
64607 unlock_page(page);
64608
64609 if (flags & FAULT_FLAG_WRITE) {
64610@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
64611
64612 /* No need to invalidate - it was non-present before */
64613 update_mmu_cache(vma, address, pte);
64614+
64615+#ifdef CONFIG_PAX_SEGMEXEC
64616+ pax_mirror_anon_pte(vma, address, page, ptl);
64617+#endif
64618+
64619 unlock:
64620 pte_unmap_unlock(page_table, ptl);
64621 out:
64622@@ -2632,40 +2856,6 @@ out_release:
64623 }
64624
64625 /*
64626- * This is like a special single-page "expand_{down|up}wards()",
64627- * except we must first make sure that 'address{-|+}PAGE_SIZE'
64628- * doesn't hit another vma.
64629- */
64630-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
64631-{
64632- address &= PAGE_MASK;
64633- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
64634- struct vm_area_struct *prev = vma->vm_prev;
64635-
64636- /*
64637- * Is there a mapping abutting this one below?
64638- *
64639- * That's only ok if it's the same stack mapping
64640- * that has gotten split..
64641- */
64642- if (prev && prev->vm_end == address)
64643- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
64644-
64645- expand_stack(vma, address - PAGE_SIZE);
64646- }
64647- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
64648- struct vm_area_struct *next = vma->vm_next;
64649-
64650- /* As VM_GROWSDOWN but s/below/above/ */
64651- if (next && next->vm_start == address + PAGE_SIZE)
64652- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
64653-
64654- expand_upwards(vma, address + PAGE_SIZE);
64655- }
64656- return 0;
64657-}
64658-
64659-/*
64660 * We enter with non-exclusive mmap_sem (to exclude vma changes,
64661 * but allow concurrent faults), and pte mapped but not yet locked.
64662 * We return with mmap_sem still held, but pte unmapped and unlocked.
64663@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
64664 unsigned long address, pte_t *page_table, pmd_t *pmd,
64665 unsigned int flags)
64666 {
64667- struct page *page;
64668+ struct page *page = NULL;
64669 spinlock_t *ptl;
64670 pte_t entry;
64671
64672- pte_unmap(page_table);
64673-
64674- /* Check if we need to add a guard page to the stack */
64675- if (check_stack_guard_page(vma, address) < 0)
64676- return VM_FAULT_SIGBUS;
64677-
64678- /* Use the zero-page for reads */
64679 if (!(flags & FAULT_FLAG_WRITE)) {
64680 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
64681 vma->vm_page_prot));
64682- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
64683+ ptl = pte_lockptr(mm, pmd);
64684+ spin_lock(ptl);
64685 if (!pte_none(*page_table))
64686 goto unlock;
64687 goto setpte;
64688 }
64689
64690 /* Allocate our own private page. */
64691+ pte_unmap(page_table);
64692+
64693 if (unlikely(anon_vma_prepare(vma)))
64694 goto oom;
64695 page = alloc_zeroed_user_highpage_movable(vma, address);
64696@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
64697 if (!pte_none(*page_table))
64698 goto release;
64699
64700+#ifdef CONFIG_PAX_SEGMEXEC
64701+ if (pax_find_mirror_vma(vma))
64702+ BUG_ON(!trylock_page(page));
64703+#endif
64704+
64705 inc_mm_counter(mm, anon_rss);
64706 page_add_new_anon_rmap(page, vma, address);
64707 setpte:
64708@@ -2720,6 +2911,12 @@ setpte:
64709
64710 /* No need to invalidate - it was non-present before */
64711 update_mmu_cache(vma, address, entry);
64712+
64713+#ifdef CONFIG_PAX_SEGMEXEC
64714+ if (page)
64715+ pax_mirror_anon_pte(vma, address, page, ptl);
64716+#endif
64717+
64718 unlock:
64719 pte_unmap_unlock(page_table, ptl);
64720 return 0;
64721@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
64722 */
64723 /* Only go through if we didn't race with anybody else... */
64724 if (likely(pte_same(*page_table, orig_pte))) {
64725+
64726+#ifdef CONFIG_PAX_SEGMEXEC
64727+ if (anon && pax_find_mirror_vma(vma))
64728+ BUG_ON(!trylock_page(page));
64729+#endif
64730+
64731 flush_icache_page(vma, page);
64732 entry = mk_pte(page, vma->vm_page_prot);
64733 if (flags & FAULT_FLAG_WRITE)
64734@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
64735
64736 /* no need to invalidate: a not-present page won't be cached */
64737 update_mmu_cache(vma, address, entry);
64738+
64739+#ifdef CONFIG_PAX_SEGMEXEC
64740+ if (anon)
64741+ pax_mirror_anon_pte(vma, address, page, ptl);
64742+ else
64743+ pax_mirror_file_pte(vma, address, page, ptl);
64744+#endif
64745+
64746 } else {
64747 if (charged)
64748 mem_cgroup_uncharge_page(page);
64749@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
64750 if (flags & FAULT_FLAG_WRITE)
64751 flush_tlb_page(vma, address);
64752 }
64753+
64754+#ifdef CONFIG_PAX_SEGMEXEC
64755+ pax_mirror_pte(vma, address, pte, pmd, ptl);
64756+ return 0;
64757+#endif
64758+
64759 unlock:
64760 pte_unmap_unlock(pte, ptl);
64761 return 0;
64762@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
64763 pmd_t *pmd;
64764 pte_t *pte;
64765
64766+#ifdef CONFIG_PAX_SEGMEXEC
64767+ struct vm_area_struct *vma_m;
64768+#endif
64769+
64770 __set_current_state(TASK_RUNNING);
64771
64772 count_vm_event(PGFAULT);
64773@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
64774 if (unlikely(is_vm_hugetlb_page(vma)))
64775 return hugetlb_fault(mm, vma, address, flags);
64776
64777+#ifdef CONFIG_PAX_SEGMEXEC
64778+ vma_m = pax_find_mirror_vma(vma);
64779+ if (vma_m) {
64780+ unsigned long address_m;
64781+ pgd_t *pgd_m;
64782+ pud_t *pud_m;
64783+ pmd_t *pmd_m;
64784+
64785+ if (vma->vm_start > vma_m->vm_start) {
64786+ address_m = address;
64787+ address -= SEGMEXEC_TASK_SIZE;
64788+ vma = vma_m;
64789+ } else
64790+ address_m = address + SEGMEXEC_TASK_SIZE;
64791+
64792+ pgd_m = pgd_offset(mm, address_m);
64793+ pud_m = pud_alloc(mm, pgd_m, address_m);
64794+ if (!pud_m)
64795+ return VM_FAULT_OOM;
64796+ pmd_m = pmd_alloc(mm, pud_m, address_m);
64797+ if (!pmd_m)
64798+ return VM_FAULT_OOM;
64799+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
64800+ return VM_FAULT_OOM;
64801+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
64802+ }
64803+#endif
64804+
64805 pgd = pgd_offset(mm, address);
64806 pud = pud_alloc(mm, pgd, address);
64807 if (!pud)
64808@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
64809 gate_vma.vm_start = FIXADDR_USER_START;
64810 gate_vma.vm_end = FIXADDR_USER_END;
64811 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
64812- gate_vma.vm_page_prot = __P101;
64813+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
64814 /*
64815 * Make sure the vDSO gets into every core dump.
64816 * Dumping its contents makes post-mortem fully interpretable later
64817diff -urNp linux-2.6.32.43/mm/memory-failure.c linux-2.6.32.43/mm/memory-failure.c
64818--- linux-2.6.32.43/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
64819+++ linux-2.6.32.43/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
64820@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
64821
64822 int sysctl_memory_failure_recovery __read_mostly = 1;
64823
64824-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64825+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
64826
64827 /*
64828 * Send all the processes who have the page mapped an ``action optional''
64829@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
64830 return 0;
64831 }
64832
64833- atomic_long_add(1, &mce_bad_pages);
64834+ atomic_long_add_unchecked(1, &mce_bad_pages);
64835
64836 /*
64837 * We need/can do nothing about count=0 pages.
64838diff -urNp linux-2.6.32.43/mm/mempolicy.c linux-2.6.32.43/mm/mempolicy.c
64839--- linux-2.6.32.43/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
64840+++ linux-2.6.32.43/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
64841@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
64842 struct vm_area_struct *next;
64843 int err;
64844
64845+#ifdef CONFIG_PAX_SEGMEXEC
64846+ struct vm_area_struct *vma_m;
64847+#endif
64848+
64849 err = 0;
64850 for (; vma && vma->vm_start < end; vma = next) {
64851 next = vma->vm_next;
64852@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
64853 err = policy_vma(vma, new);
64854 if (err)
64855 break;
64856+
64857+#ifdef CONFIG_PAX_SEGMEXEC
64858+ vma_m = pax_find_mirror_vma(vma);
64859+ if (vma_m) {
64860+ err = policy_vma(vma_m, new);
64861+ if (err)
64862+ break;
64863+ }
64864+#endif
64865+
64866 }
64867 return err;
64868 }
64869@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
64870
64871 if (end < start)
64872 return -EINVAL;
64873+
64874+#ifdef CONFIG_PAX_SEGMEXEC
64875+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
64876+ if (end > SEGMEXEC_TASK_SIZE)
64877+ return -EINVAL;
64878+ } else
64879+#endif
64880+
64881+ if (end > TASK_SIZE)
64882+ return -EINVAL;
64883+
64884 if (end == start)
64885 return 0;
64886
64887@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64888 if (!mm)
64889 return -EINVAL;
64890
64891+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64892+ if (mm != current->mm &&
64893+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64894+ err = -EPERM;
64895+ goto out;
64896+ }
64897+#endif
64898+
64899 /*
64900 * Check if this process has the right to modify the specified
64901 * process. The right exists if the process has administrative
64902@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
64903 rcu_read_lock();
64904 tcred = __task_cred(task);
64905 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64906- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64907- !capable(CAP_SYS_NICE)) {
64908+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64909 rcu_read_unlock();
64910 err = -EPERM;
64911 goto out;
64912@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
64913
64914 if (file) {
64915 seq_printf(m, " file=");
64916- seq_path(m, &file->f_path, "\n\t= ");
64917+ seq_path(m, &file->f_path, "\n\t\\= ");
64918 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
64919 seq_printf(m, " heap");
64920 } else if (vma->vm_start <= mm->start_stack &&
64921diff -urNp linux-2.6.32.43/mm/migrate.c linux-2.6.32.43/mm/migrate.c
64922--- linux-2.6.32.43/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
64923+++ linux-2.6.32.43/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
64924@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
64925 unsigned long chunk_start;
64926 int err;
64927
64928+ pax_track_stack();
64929+
64930 task_nodes = cpuset_mems_allowed(task);
64931
64932 err = -ENOMEM;
64933@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64934 if (!mm)
64935 return -EINVAL;
64936
64937+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
64938+ if (mm != current->mm &&
64939+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
64940+ err = -EPERM;
64941+ goto out;
64942+ }
64943+#endif
64944+
64945 /*
64946 * Check if this process has the right to modify the specified
64947 * process. The right exists if the process has administrative
64948@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
64949 rcu_read_lock();
64950 tcred = __task_cred(task);
64951 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
64952- cred->uid != tcred->suid && cred->uid != tcred->uid &&
64953- !capable(CAP_SYS_NICE)) {
64954+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
64955 rcu_read_unlock();
64956 err = -EPERM;
64957 goto out;
64958diff -urNp linux-2.6.32.43/mm/mlock.c linux-2.6.32.43/mm/mlock.c
64959--- linux-2.6.32.43/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
64960+++ linux-2.6.32.43/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
64961@@ -13,6 +13,7 @@
64962 #include <linux/pagemap.h>
64963 #include <linux/mempolicy.h>
64964 #include <linux/syscalls.h>
64965+#include <linux/security.h>
64966 #include <linux/sched.h>
64967 #include <linux/module.h>
64968 #include <linux/rmap.h>
64969@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
64970 }
64971 }
64972
64973-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
64974-{
64975- return (vma->vm_flags & VM_GROWSDOWN) &&
64976- (vma->vm_start == addr) &&
64977- !vma_stack_continue(vma->vm_prev, addr);
64978-}
64979-
64980 /**
64981 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
64982 * @vma: target vma
64983@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
64984 if (vma->vm_flags & VM_WRITE)
64985 gup_flags |= FOLL_WRITE;
64986
64987- /* We don't try to access the guard page of a stack vma */
64988- if (stack_guard_page(vma, start)) {
64989- addr += PAGE_SIZE;
64990- nr_pages--;
64991- }
64992-
64993 while (nr_pages > 0) {
64994 int i;
64995
64996@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
64997 {
64998 unsigned long nstart, end, tmp;
64999 struct vm_area_struct * vma, * prev;
65000- int error;
65001+ int error = -EINVAL;
65002
65003 len = PAGE_ALIGN(len);
65004 end = start + len;
65005@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
65006 return -EINVAL;
65007 if (end == start)
65008 return 0;
65009+ if (end > TASK_SIZE)
65010+ return -EINVAL;
65011+
65012 vma = find_vma_prev(current->mm, start, &prev);
65013 if (!vma || vma->vm_start > start)
65014 return -ENOMEM;
65015@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
65016 for (nstart = start ; ; ) {
65017 unsigned int newflags;
65018
65019+#ifdef CONFIG_PAX_SEGMEXEC
65020+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
65021+ break;
65022+#endif
65023+
65024 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
65025
65026 newflags = vma->vm_flags | VM_LOCKED;
65027@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
65028 lock_limit >>= PAGE_SHIFT;
65029
65030 /* check against resource limits */
65031+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
65032 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
65033 error = do_mlock(start, len, 1);
65034 up_write(&current->mm->mmap_sem);
65035@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
65036 static int do_mlockall(int flags)
65037 {
65038 struct vm_area_struct * vma, * prev = NULL;
65039- unsigned int def_flags = 0;
65040
65041 if (flags & MCL_FUTURE)
65042- def_flags = VM_LOCKED;
65043- current->mm->def_flags = def_flags;
65044+ current->mm->def_flags |= VM_LOCKED;
65045+ else
65046+ current->mm->def_flags &= ~VM_LOCKED;
65047 if (flags == MCL_FUTURE)
65048 goto out;
65049
65050 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
65051- unsigned int newflags;
65052+ unsigned long newflags;
65053+
65054+#ifdef CONFIG_PAX_SEGMEXEC
65055+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
65056+ break;
65057+#endif
65058
65059+ BUG_ON(vma->vm_end > TASK_SIZE);
65060 newflags = vma->vm_flags | VM_LOCKED;
65061 if (!(flags & MCL_CURRENT))
65062 newflags &= ~VM_LOCKED;
65063@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
65064 lock_limit >>= PAGE_SHIFT;
65065
65066 ret = -ENOMEM;
65067+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
65068 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
65069 capable(CAP_IPC_LOCK))
65070 ret = do_mlockall(flags);
65071diff -urNp linux-2.6.32.43/mm/mmap.c linux-2.6.32.43/mm/mmap.c
65072--- linux-2.6.32.43/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
65073+++ linux-2.6.32.43/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
65074@@ -45,6 +45,16 @@
65075 #define arch_rebalance_pgtables(addr, len) (addr)
65076 #endif
65077
65078+static inline void verify_mm_writelocked(struct mm_struct *mm)
65079+{
65080+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
65081+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
65082+ up_read(&mm->mmap_sem);
65083+ BUG();
65084+ }
65085+#endif
65086+}
65087+
65088 static void unmap_region(struct mm_struct *mm,
65089 struct vm_area_struct *vma, struct vm_area_struct *prev,
65090 unsigned long start, unsigned long end);
65091@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
65092 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
65093 *
65094 */
65095-pgprot_t protection_map[16] = {
65096+pgprot_t protection_map[16] __read_only = {
65097 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
65098 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
65099 };
65100
65101 pgprot_t vm_get_page_prot(unsigned long vm_flags)
65102 {
65103- return __pgprot(pgprot_val(protection_map[vm_flags &
65104+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
65105 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
65106 pgprot_val(arch_vm_get_page_prot(vm_flags)));
65107+
65108+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65109+ if (!nx_enabled &&
65110+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
65111+ (vm_flags & (VM_READ | VM_WRITE)))
65112+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
65113+#endif
65114+
65115+ return prot;
65116 }
65117 EXPORT_SYMBOL(vm_get_page_prot);
65118
65119 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
65120 int sysctl_overcommit_ratio = 50; /* default is 50% */
65121 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
65122+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
65123 struct percpu_counter vm_committed_as;
65124
65125 /*
65126@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
65127 struct vm_area_struct *next = vma->vm_next;
65128
65129 might_sleep();
65130+ BUG_ON(vma->vm_mirror);
65131 if (vma->vm_ops && vma->vm_ops->close)
65132 vma->vm_ops->close(vma);
65133 if (vma->vm_file) {
65134@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
65135 * not page aligned -Ram Gupta
65136 */
65137 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
65138+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
65139 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
65140 (mm->end_data - mm->start_data) > rlim)
65141 goto out;
65142@@ -704,6 +726,12 @@ static int
65143 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
65144 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65145 {
65146+
65147+#ifdef CONFIG_PAX_SEGMEXEC
65148+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
65149+ return 0;
65150+#endif
65151+
65152 if (is_mergeable_vma(vma, file, vm_flags) &&
65153 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
65154 if (vma->vm_pgoff == vm_pgoff)
65155@@ -723,6 +751,12 @@ static int
65156 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
65157 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
65158 {
65159+
65160+#ifdef CONFIG_PAX_SEGMEXEC
65161+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
65162+ return 0;
65163+#endif
65164+
65165 if (is_mergeable_vma(vma, file, vm_flags) &&
65166 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
65167 pgoff_t vm_pglen;
65168@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
65169 struct vm_area_struct *vma_merge(struct mm_struct *mm,
65170 struct vm_area_struct *prev, unsigned long addr,
65171 unsigned long end, unsigned long vm_flags,
65172- struct anon_vma *anon_vma, struct file *file,
65173+ struct anon_vma *anon_vma, struct file *file,
65174 pgoff_t pgoff, struct mempolicy *policy)
65175 {
65176 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
65177 struct vm_area_struct *area, *next;
65178
65179+#ifdef CONFIG_PAX_SEGMEXEC
65180+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
65181+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
65182+
65183+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
65184+#endif
65185+
65186 /*
65187 * We later require that vma->vm_flags == vm_flags,
65188 * so this tests vma->vm_flags & VM_SPECIAL, too.
65189@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
65190 if (next && next->vm_end == end) /* cases 6, 7, 8 */
65191 next = next->vm_next;
65192
65193+#ifdef CONFIG_PAX_SEGMEXEC
65194+ if (prev)
65195+ prev_m = pax_find_mirror_vma(prev);
65196+ if (area)
65197+ area_m = pax_find_mirror_vma(area);
65198+ if (next)
65199+ next_m = pax_find_mirror_vma(next);
65200+#endif
65201+
65202 /*
65203 * Can it merge with the predecessor?
65204 */
65205@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
65206 /* cases 1, 6 */
65207 vma_adjust(prev, prev->vm_start,
65208 next->vm_end, prev->vm_pgoff, NULL);
65209- } else /* cases 2, 5, 7 */
65210+
65211+#ifdef CONFIG_PAX_SEGMEXEC
65212+ if (prev_m)
65213+ vma_adjust(prev_m, prev_m->vm_start,
65214+ next_m->vm_end, prev_m->vm_pgoff, NULL);
65215+#endif
65216+
65217+ } else { /* cases 2, 5, 7 */
65218 vma_adjust(prev, prev->vm_start,
65219 end, prev->vm_pgoff, NULL);
65220+
65221+#ifdef CONFIG_PAX_SEGMEXEC
65222+ if (prev_m)
65223+ vma_adjust(prev_m, prev_m->vm_start,
65224+ end_m, prev_m->vm_pgoff, NULL);
65225+#endif
65226+
65227+ }
65228 return prev;
65229 }
65230
65231@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
65232 mpol_equal(policy, vma_policy(next)) &&
65233 can_vma_merge_before(next, vm_flags,
65234 anon_vma, file, pgoff+pglen)) {
65235- if (prev && addr < prev->vm_end) /* case 4 */
65236+ if (prev && addr < prev->vm_end) { /* case 4 */
65237 vma_adjust(prev, prev->vm_start,
65238 addr, prev->vm_pgoff, NULL);
65239- else /* cases 3, 8 */
65240+
65241+#ifdef CONFIG_PAX_SEGMEXEC
65242+ if (prev_m)
65243+ vma_adjust(prev_m, prev_m->vm_start,
65244+ addr_m, prev_m->vm_pgoff, NULL);
65245+#endif
65246+
65247+ } else { /* cases 3, 8 */
65248 vma_adjust(area, addr, next->vm_end,
65249 next->vm_pgoff - pglen, NULL);
65250+
65251+#ifdef CONFIG_PAX_SEGMEXEC
65252+ if (area_m)
65253+ vma_adjust(area_m, addr_m, next_m->vm_end,
65254+ next_m->vm_pgoff - pglen, NULL);
65255+#endif
65256+
65257+ }
65258 return area;
65259 }
65260
65261@@ -898,14 +978,11 @@ none:
65262 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
65263 struct file *file, long pages)
65264 {
65265- const unsigned long stack_flags
65266- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
65267-
65268 if (file) {
65269 mm->shared_vm += pages;
65270 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
65271 mm->exec_vm += pages;
65272- } else if (flags & stack_flags)
65273+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
65274 mm->stack_vm += pages;
65275 if (flags & (VM_RESERVED|VM_IO))
65276 mm->reserved_vm += pages;
65277@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
65278 * (the exception is when the underlying filesystem is noexec
65279 * mounted, in which case we dont add PROT_EXEC.)
65280 */
65281- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
65282+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
65283 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
65284 prot |= PROT_EXEC;
65285
65286@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
65287 /* Obtain the address to map to. we verify (or select) it and ensure
65288 * that it represents a valid section of the address space.
65289 */
65290- addr = get_unmapped_area(file, addr, len, pgoff, flags);
65291+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
65292 if (addr & ~PAGE_MASK)
65293 return addr;
65294
65295@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
65296 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
65297 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
65298
65299+#ifdef CONFIG_PAX_MPROTECT
65300+ if (mm->pax_flags & MF_PAX_MPROTECT) {
65301+#ifndef CONFIG_PAX_MPROTECT_COMPAT
65302+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
65303+ gr_log_rwxmmap(file);
65304+
65305+#ifdef CONFIG_PAX_EMUPLT
65306+ vm_flags &= ~VM_EXEC;
65307+#else
65308+ return -EPERM;
65309+#endif
65310+
65311+ }
65312+
65313+ if (!(vm_flags & VM_EXEC))
65314+ vm_flags &= ~VM_MAYEXEC;
65315+#else
65316+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
65317+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
65318+#endif
65319+ else
65320+ vm_flags &= ~VM_MAYWRITE;
65321+ }
65322+#endif
65323+
65324+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65325+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
65326+ vm_flags &= ~VM_PAGEEXEC;
65327+#endif
65328+
65329 if (flags & MAP_LOCKED)
65330 if (!can_do_mlock())
65331 return -EPERM;
65332@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
65333 locked += mm->locked_vm;
65334 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
65335 lock_limit >>= PAGE_SHIFT;
65336+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65337 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
65338 return -EAGAIN;
65339 }
65340@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
65341 if (error)
65342 return error;
65343
65344+ if (!gr_acl_handle_mmap(file, prot))
65345+ return -EACCES;
65346+
65347 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
65348 }
65349 EXPORT_SYMBOL(do_mmap_pgoff);
65350@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
65351 */
65352 int vma_wants_writenotify(struct vm_area_struct *vma)
65353 {
65354- unsigned int vm_flags = vma->vm_flags;
65355+ unsigned long vm_flags = vma->vm_flags;
65356
65357 /* If it was private or non-writable, the write bit is already clear */
65358- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
65359+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
65360 return 0;
65361
65362 /* The backer wishes to know when pages are first written to? */
65363@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
65364 unsigned long charged = 0;
65365 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
65366
65367+#ifdef CONFIG_PAX_SEGMEXEC
65368+ struct vm_area_struct *vma_m = NULL;
65369+#endif
65370+
65371+ /*
65372+ * mm->mmap_sem is required to protect against another thread
65373+ * changing the mappings in case we sleep.
65374+ */
65375+ verify_mm_writelocked(mm);
65376+
65377 /* Clear old maps */
65378 error = -ENOMEM;
65379-munmap_back:
65380 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65381 if (vma && vma->vm_start < addr + len) {
65382 if (do_munmap(mm, addr, len))
65383 return -ENOMEM;
65384- goto munmap_back;
65385+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
65386+ BUG_ON(vma && vma->vm_start < addr + len);
65387 }
65388
65389 /* Check against address space limit. */
65390@@ -1173,6 +1294,16 @@ munmap_back:
65391 goto unacct_error;
65392 }
65393
65394+#ifdef CONFIG_PAX_SEGMEXEC
65395+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
65396+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
65397+ if (!vma_m) {
65398+ error = -ENOMEM;
65399+ goto free_vma;
65400+ }
65401+ }
65402+#endif
65403+
65404 vma->vm_mm = mm;
65405 vma->vm_start = addr;
65406 vma->vm_end = addr + len;
65407@@ -1195,6 +1326,19 @@ munmap_back:
65408 error = file->f_op->mmap(file, vma);
65409 if (error)
65410 goto unmap_and_free_vma;
65411+
65412+#ifdef CONFIG_PAX_SEGMEXEC
65413+ if (vma_m && (vm_flags & VM_EXECUTABLE))
65414+ added_exe_file_vma(mm);
65415+#endif
65416+
65417+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
65418+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
65419+ vma->vm_flags |= VM_PAGEEXEC;
65420+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
65421+ }
65422+#endif
65423+
65424 if (vm_flags & VM_EXECUTABLE)
65425 added_exe_file_vma(mm);
65426
65427@@ -1218,6 +1362,11 @@ munmap_back:
65428 vma_link(mm, vma, prev, rb_link, rb_parent);
65429 file = vma->vm_file;
65430
65431+#ifdef CONFIG_PAX_SEGMEXEC
65432+ if (vma_m)
65433+ pax_mirror_vma(vma_m, vma);
65434+#endif
65435+
65436 /* Once vma denies write, undo our temporary denial count */
65437 if (correct_wcount)
65438 atomic_inc(&inode->i_writecount);
65439@@ -1226,6 +1375,7 @@ out:
65440
65441 mm->total_vm += len >> PAGE_SHIFT;
65442 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
65443+ track_exec_limit(mm, addr, addr + len, vm_flags);
65444 if (vm_flags & VM_LOCKED) {
65445 /*
65446 * makes pages present; downgrades, drops, reacquires mmap_sem
65447@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
65448 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
65449 charged = 0;
65450 free_vma:
65451+
65452+#ifdef CONFIG_PAX_SEGMEXEC
65453+ if (vma_m)
65454+ kmem_cache_free(vm_area_cachep, vma_m);
65455+#endif
65456+
65457 kmem_cache_free(vm_area_cachep, vma);
65458 unacct_error:
65459 if (charged)
65460@@ -1255,6 +1411,44 @@ unacct_error:
65461 return error;
65462 }
65463
65464+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
65465+{
65466+ if (!vma) {
65467+#ifdef CONFIG_STACK_GROWSUP
65468+ if (addr > sysctl_heap_stack_gap)
65469+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
65470+ else
65471+ vma = find_vma(current->mm, 0);
65472+ if (vma && (vma->vm_flags & VM_GROWSUP))
65473+ return false;
65474+#endif
65475+ return true;
65476+ }
65477+
65478+ if (addr + len > vma->vm_start)
65479+ return false;
65480+
65481+ if (vma->vm_flags & VM_GROWSDOWN)
65482+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
65483+#ifdef CONFIG_STACK_GROWSUP
65484+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
65485+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
65486+#endif
65487+
65488+ return true;
65489+}
65490+
65491+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
65492+{
65493+ if (vma->vm_start < len)
65494+ return -ENOMEM;
65495+ if (!(vma->vm_flags & VM_GROWSDOWN))
65496+ return vma->vm_start - len;
65497+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
65498+ return vma->vm_start - len - sysctl_heap_stack_gap;
65499+ return -ENOMEM;
65500+}
65501+
65502 /* Get an address range which is currently unmapped.
65503 * For shmat() with addr=0.
65504 *
65505@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
65506 if (flags & MAP_FIXED)
65507 return addr;
65508
65509+#ifdef CONFIG_PAX_RANDMMAP
65510+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65511+#endif
65512+
65513 if (addr) {
65514 addr = PAGE_ALIGN(addr);
65515- vma = find_vma(mm, addr);
65516- if (TASK_SIZE - len >= addr &&
65517- (!vma || addr + len <= vma->vm_start))
65518- return addr;
65519+ if (TASK_SIZE - len >= addr) {
65520+ vma = find_vma(mm, addr);
65521+ if (check_heap_stack_gap(vma, addr, len))
65522+ return addr;
65523+ }
65524 }
65525 if (len > mm->cached_hole_size) {
65526- start_addr = addr = mm->free_area_cache;
65527+ start_addr = addr = mm->free_area_cache;
65528 } else {
65529- start_addr = addr = TASK_UNMAPPED_BASE;
65530- mm->cached_hole_size = 0;
65531+ start_addr = addr = mm->mmap_base;
65532+ mm->cached_hole_size = 0;
65533 }
65534
65535 full_search:
65536@@ -1303,34 +1502,40 @@ full_search:
65537 * Start a new search - just in case we missed
65538 * some holes.
65539 */
65540- if (start_addr != TASK_UNMAPPED_BASE) {
65541- addr = TASK_UNMAPPED_BASE;
65542- start_addr = addr;
65543+ if (start_addr != mm->mmap_base) {
65544+ start_addr = addr = mm->mmap_base;
65545 mm->cached_hole_size = 0;
65546 goto full_search;
65547 }
65548 return -ENOMEM;
65549 }
65550- if (!vma || addr + len <= vma->vm_start) {
65551- /*
65552- * Remember the place where we stopped the search:
65553- */
65554- mm->free_area_cache = addr + len;
65555- return addr;
65556- }
65557+ if (check_heap_stack_gap(vma, addr, len))
65558+ break;
65559 if (addr + mm->cached_hole_size < vma->vm_start)
65560 mm->cached_hole_size = vma->vm_start - addr;
65561 addr = vma->vm_end;
65562 }
65563+
65564+ /*
65565+ * Remember the place where we stopped the search:
65566+ */
65567+ mm->free_area_cache = addr + len;
65568+ return addr;
65569 }
65570 #endif
65571
65572 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
65573 {
65574+
65575+#ifdef CONFIG_PAX_SEGMEXEC
65576+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65577+ return;
65578+#endif
65579+
65580 /*
65581 * Is this a new hole at the lowest possible address?
65582 */
65583- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
65584+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
65585 mm->free_area_cache = addr;
65586 mm->cached_hole_size = ~0UL;
65587 }
65588@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
65589 {
65590 struct vm_area_struct *vma;
65591 struct mm_struct *mm = current->mm;
65592- unsigned long addr = addr0;
65593+ unsigned long base = mm->mmap_base, addr = addr0;
65594
65595 /* requested length too big for entire address space */
65596 if (len > TASK_SIZE)
65597@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
65598 if (flags & MAP_FIXED)
65599 return addr;
65600
65601+#ifdef CONFIG_PAX_RANDMMAP
65602+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
65603+#endif
65604+
65605 /* requesting a specific address */
65606 if (addr) {
65607 addr = PAGE_ALIGN(addr);
65608- vma = find_vma(mm, addr);
65609- if (TASK_SIZE - len >= addr &&
65610- (!vma || addr + len <= vma->vm_start))
65611- return addr;
65612+ if (TASK_SIZE - len >= addr) {
65613+ vma = find_vma(mm, addr);
65614+ if (check_heap_stack_gap(vma, addr, len))
65615+ return addr;
65616+ }
65617 }
65618
65619 /* check if free_area_cache is useful for us */
65620@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
65621 /* make sure it can fit in the remaining address space */
65622 if (addr > len) {
65623 vma = find_vma(mm, addr-len);
65624- if (!vma || addr <= vma->vm_start)
65625+ if (check_heap_stack_gap(vma, addr - len, len))
65626 /* remember the address as a hint for next time */
65627 return (mm->free_area_cache = addr-len);
65628 }
65629@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
65630 * return with success:
65631 */
65632 vma = find_vma(mm, addr);
65633- if (!vma || addr+len <= vma->vm_start)
65634+ if (check_heap_stack_gap(vma, addr, len))
65635 /* remember the address as a hint for next time */
65636 return (mm->free_area_cache = addr);
65637
65638@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
65639 mm->cached_hole_size = vma->vm_start - addr;
65640
65641 /* try just below the current vma->vm_start */
65642- addr = vma->vm_start-len;
65643- } while (len < vma->vm_start);
65644+ addr = skip_heap_stack_gap(vma, len);
65645+ } while (!IS_ERR_VALUE(addr));
65646
65647 bottomup:
65648 /*
65649@@ -1414,13 +1624,21 @@ bottomup:
65650 * can happen with large stack limits and large mmap()
65651 * allocations.
65652 */
65653+ mm->mmap_base = TASK_UNMAPPED_BASE;
65654+
65655+#ifdef CONFIG_PAX_RANDMMAP
65656+ if (mm->pax_flags & MF_PAX_RANDMMAP)
65657+ mm->mmap_base += mm->delta_mmap;
65658+#endif
65659+
65660+ mm->free_area_cache = mm->mmap_base;
65661 mm->cached_hole_size = ~0UL;
65662- mm->free_area_cache = TASK_UNMAPPED_BASE;
65663 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
65664 /*
65665 * Restore the topdown base:
65666 */
65667- mm->free_area_cache = mm->mmap_base;
65668+ mm->mmap_base = base;
65669+ mm->free_area_cache = base;
65670 mm->cached_hole_size = ~0UL;
65671
65672 return addr;
65673@@ -1429,6 +1647,12 @@ bottomup:
65674
65675 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
65676 {
65677+
65678+#ifdef CONFIG_PAX_SEGMEXEC
65679+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
65680+ return;
65681+#endif
65682+
65683 /*
65684 * Is this a new hole at the highest possible address?
65685 */
65686@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
65687 mm->free_area_cache = addr;
65688
65689 /* dont allow allocations above current base */
65690- if (mm->free_area_cache > mm->mmap_base)
65691+ if (mm->free_area_cache > mm->mmap_base) {
65692 mm->free_area_cache = mm->mmap_base;
65693+ mm->cached_hole_size = ~0UL;
65694+ }
65695 }
65696
65697 unsigned long
65698@@ -1545,6 +1771,27 @@ out:
65699 return prev ? prev->vm_next : vma;
65700 }
65701
65702+#ifdef CONFIG_PAX_SEGMEXEC
65703+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
65704+{
65705+ struct vm_area_struct *vma_m;
65706+
65707+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
65708+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
65709+ BUG_ON(vma->vm_mirror);
65710+ return NULL;
65711+ }
65712+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
65713+ vma_m = vma->vm_mirror;
65714+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
65715+ BUG_ON(vma->vm_file != vma_m->vm_file);
65716+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
65717+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
65718+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
65719+ return vma_m;
65720+}
65721+#endif
65722+
65723 /*
65724 * Verify that the stack growth is acceptable and
65725 * update accounting. This is shared with both the
65726@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
65727 return -ENOMEM;
65728
65729 /* Stack limit test */
65730+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
65731 if (size > rlim[RLIMIT_STACK].rlim_cur)
65732 return -ENOMEM;
65733
65734@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
65735 unsigned long limit;
65736 locked = mm->locked_vm + grow;
65737 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
65738+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
65739 if (locked > limit && !capable(CAP_IPC_LOCK))
65740 return -ENOMEM;
65741 }
65742@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
65743 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
65744 * vma is the last one with address > vma->vm_end. Have to extend vma.
65745 */
65746+#ifndef CONFIG_IA64
65747+static
65748+#endif
65749 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
65750 {
65751 int error;
65752+ bool locknext;
65753
65754 if (!(vma->vm_flags & VM_GROWSUP))
65755 return -EFAULT;
65756
65757+ /* Also guard against wrapping around to address 0. */
65758+ if (address < PAGE_ALIGN(address+1))
65759+ address = PAGE_ALIGN(address+1);
65760+ else
65761+ return -ENOMEM;
65762+
65763 /*
65764 * We must make sure the anon_vma is allocated
65765 * so that the anon_vma locking is not a noop.
65766 */
65767 if (unlikely(anon_vma_prepare(vma)))
65768 return -ENOMEM;
65769+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
65770+ if (locknext && anon_vma_prepare(vma->vm_next))
65771+ return -ENOMEM;
65772 anon_vma_lock(vma);
65773+ if (locknext)
65774+ anon_vma_lock(vma->vm_next);
65775
65776 /*
65777 * vma->vm_start/vm_end cannot change under us because the caller
65778 * is required to hold the mmap_sem in read mode. We need the
65779- * anon_vma lock to serialize against concurrent expand_stacks.
65780- * Also guard against wrapping around to address 0.
65781+ * anon_vma locks to serialize against concurrent expand_stacks
65782+ * and expand_upwards.
65783 */
65784- if (address < PAGE_ALIGN(address+4))
65785- address = PAGE_ALIGN(address+4);
65786- else {
65787- anon_vma_unlock(vma);
65788- return -ENOMEM;
65789- }
65790 error = 0;
65791
65792 /* Somebody else might have raced and expanded it already */
65793- if (address > vma->vm_end) {
65794+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
65795+ error = -ENOMEM;
65796+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
65797 unsigned long size, grow;
65798
65799 size = address - vma->vm_start;
65800@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
65801 if (!error)
65802 vma->vm_end = address;
65803 }
65804+ if (locknext)
65805+ anon_vma_unlock(vma->vm_next);
65806 anon_vma_unlock(vma);
65807 return error;
65808 }
65809@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
65810 unsigned long address)
65811 {
65812 int error;
65813+ bool lockprev = false;
65814+ struct vm_area_struct *prev;
65815
65816 /*
65817 * We must make sure the anon_vma is allocated
65818@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
65819 if (error)
65820 return error;
65821
65822+ prev = vma->vm_prev;
65823+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
65824+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
65825+#endif
65826+ if (lockprev && anon_vma_prepare(prev))
65827+ return -ENOMEM;
65828+ if (lockprev)
65829+ anon_vma_lock(prev);
65830+
65831 anon_vma_lock(vma);
65832
65833 /*
65834@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
65835 */
65836
65837 /* Somebody else might have raced and expanded it already */
65838- if (address < vma->vm_start) {
65839+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
65840+ error = -ENOMEM;
65841+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
65842 unsigned long size, grow;
65843
65844+#ifdef CONFIG_PAX_SEGMEXEC
65845+ struct vm_area_struct *vma_m;
65846+
65847+ vma_m = pax_find_mirror_vma(vma);
65848+#endif
65849+
65850 size = vma->vm_end - address;
65851 grow = (vma->vm_start - address) >> PAGE_SHIFT;
65852
65853@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
65854 if (!error) {
65855 vma->vm_start = address;
65856 vma->vm_pgoff -= grow;
65857+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
65858+
65859+#ifdef CONFIG_PAX_SEGMEXEC
65860+ if (vma_m) {
65861+ vma_m->vm_start -= grow << PAGE_SHIFT;
65862+ vma_m->vm_pgoff -= grow;
65863+ }
65864+#endif
65865+
65866 }
65867 }
65868 anon_vma_unlock(vma);
65869+ if (lockprev)
65870+ anon_vma_unlock(prev);
65871 return error;
65872 }
65873
65874@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
65875 do {
65876 long nrpages = vma_pages(vma);
65877
65878+#ifdef CONFIG_PAX_SEGMEXEC
65879+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
65880+ vma = remove_vma(vma);
65881+ continue;
65882+ }
65883+#endif
65884+
65885 mm->total_vm -= nrpages;
65886 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
65887 vma = remove_vma(vma);
65888@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
65889 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
65890 vma->vm_prev = NULL;
65891 do {
65892+
65893+#ifdef CONFIG_PAX_SEGMEXEC
65894+ if (vma->vm_mirror) {
65895+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
65896+ vma->vm_mirror->vm_mirror = NULL;
65897+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
65898+ vma->vm_mirror = NULL;
65899+ }
65900+#endif
65901+
65902 rb_erase(&vma->vm_rb, &mm->mm_rb);
65903 mm->map_count--;
65904 tail_vma = vma;
65905@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
65906 struct mempolicy *pol;
65907 struct vm_area_struct *new;
65908
65909+#ifdef CONFIG_PAX_SEGMEXEC
65910+ struct vm_area_struct *vma_m, *new_m = NULL;
65911+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
65912+#endif
65913+
65914 if (is_vm_hugetlb_page(vma) && (addr &
65915 ~(huge_page_mask(hstate_vma(vma)))))
65916 return -EINVAL;
65917
65918+#ifdef CONFIG_PAX_SEGMEXEC
65919+ vma_m = pax_find_mirror_vma(vma);
65920+
65921+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
65922+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
65923+ if (mm->map_count >= sysctl_max_map_count-1)
65924+ return -ENOMEM;
65925+ } else
65926+#endif
65927+
65928 if (mm->map_count >= sysctl_max_map_count)
65929 return -ENOMEM;
65930
65931@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
65932 if (!new)
65933 return -ENOMEM;
65934
65935+#ifdef CONFIG_PAX_SEGMEXEC
65936+ if (vma_m) {
65937+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
65938+ if (!new_m) {
65939+ kmem_cache_free(vm_area_cachep, new);
65940+ return -ENOMEM;
65941+ }
65942+ }
65943+#endif
65944+
65945 /* most fields are the same, copy all, and then fixup */
65946 *new = *vma;
65947
65948@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
65949 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
65950 }
65951
65952+#ifdef CONFIG_PAX_SEGMEXEC
65953+ if (vma_m) {
65954+ *new_m = *vma_m;
65955+ new_m->vm_mirror = new;
65956+ new->vm_mirror = new_m;
65957+
65958+ if (new_below)
65959+ new_m->vm_end = addr_m;
65960+ else {
65961+ new_m->vm_start = addr_m;
65962+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
65963+ }
65964+ }
65965+#endif
65966+
65967 pol = mpol_dup(vma_policy(vma));
65968 if (IS_ERR(pol)) {
65969+
65970+#ifdef CONFIG_PAX_SEGMEXEC
65971+ if (new_m)
65972+ kmem_cache_free(vm_area_cachep, new_m);
65973+#endif
65974+
65975 kmem_cache_free(vm_area_cachep, new);
65976 return PTR_ERR(pol);
65977 }
65978@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
65979 else
65980 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
65981
65982+#ifdef CONFIG_PAX_SEGMEXEC
65983+ if (vma_m) {
65984+ mpol_get(pol);
65985+ vma_set_policy(new_m, pol);
65986+
65987+ if (new_m->vm_file) {
65988+ get_file(new_m->vm_file);
65989+ if (vma_m->vm_flags & VM_EXECUTABLE)
65990+ added_exe_file_vma(mm);
65991+ }
65992+
65993+ if (new_m->vm_ops && new_m->vm_ops->open)
65994+ new_m->vm_ops->open(new_m);
65995+
65996+ if (new_below)
65997+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
65998+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
65999+ else
66000+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
66001+ }
66002+#endif
66003+
66004 return 0;
66005 }
66006
66007@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
66008 * work. This now handles partial unmappings.
66009 * Jeremy Fitzhardinge <jeremy@goop.org>
66010 */
66011+#ifdef CONFIG_PAX_SEGMEXEC
66012+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66013+{
66014+ int ret = __do_munmap(mm, start, len);
66015+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
66016+ return ret;
66017+
66018+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
66019+}
66020+
66021+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66022+#else
66023 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
66024+#endif
66025 {
66026 unsigned long end;
66027 struct vm_area_struct *vma, *prev, *last;
66028
66029+ /*
66030+ * mm->mmap_sem is required to protect against another thread
66031+ * changing the mappings in case we sleep.
66032+ */
66033+ verify_mm_writelocked(mm);
66034+
66035 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
66036 return -EINVAL;
66037
66038@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
66039 /* Fix up all other VM information */
66040 remove_vma_list(mm, vma);
66041
66042+ track_exec_limit(mm, start, end, 0UL);
66043+
66044 return 0;
66045 }
66046
66047@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
66048
66049 profile_munmap(addr);
66050
66051+#ifdef CONFIG_PAX_SEGMEXEC
66052+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
66053+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
66054+ return -EINVAL;
66055+#endif
66056+
66057 down_write(&mm->mmap_sem);
66058 ret = do_munmap(mm, addr, len);
66059 up_write(&mm->mmap_sem);
66060 return ret;
66061 }
66062
66063-static inline void verify_mm_writelocked(struct mm_struct *mm)
66064-{
66065-#ifdef CONFIG_DEBUG_VM
66066- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
66067- WARN_ON(1);
66068- up_read(&mm->mmap_sem);
66069- }
66070-#endif
66071-}
66072-
66073 /*
66074 * this is really a simplified "do_mmap". it only handles
66075 * anonymous maps. eventually we may be able to do some
66076@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
66077 struct rb_node ** rb_link, * rb_parent;
66078 pgoff_t pgoff = addr >> PAGE_SHIFT;
66079 int error;
66080+ unsigned long charged;
66081
66082 len = PAGE_ALIGN(len);
66083 if (!len)
66084@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
66085
66086 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
66087
66088+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
66089+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
66090+ flags &= ~VM_EXEC;
66091+
66092+#ifdef CONFIG_PAX_MPROTECT
66093+ if (mm->pax_flags & MF_PAX_MPROTECT)
66094+ flags &= ~VM_MAYEXEC;
66095+#endif
66096+
66097+ }
66098+#endif
66099+
66100 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
66101 if (error & ~PAGE_MASK)
66102 return error;
66103
66104+ charged = len >> PAGE_SHIFT;
66105+
66106 /*
66107 * mlock MCL_FUTURE?
66108 */
66109 if (mm->def_flags & VM_LOCKED) {
66110 unsigned long locked, lock_limit;
66111- locked = len >> PAGE_SHIFT;
66112+ locked = charged;
66113 locked += mm->locked_vm;
66114 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
66115 lock_limit >>= PAGE_SHIFT;
66116@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
66117 /*
66118 * Clear old maps. this also does some error checking for us
66119 */
66120- munmap_back:
66121 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66122 if (vma && vma->vm_start < addr + len) {
66123 if (do_munmap(mm, addr, len))
66124 return -ENOMEM;
66125- goto munmap_back;
66126+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
66127+ BUG_ON(vma && vma->vm_start < addr + len);
66128 }
66129
66130 /* Check against address space limits *after* clearing old maps... */
66131- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
66132+ if (!may_expand_vm(mm, charged))
66133 return -ENOMEM;
66134
66135 if (mm->map_count > sysctl_max_map_count)
66136 return -ENOMEM;
66137
66138- if (security_vm_enough_memory(len >> PAGE_SHIFT))
66139+ if (security_vm_enough_memory(charged))
66140 return -ENOMEM;
66141
66142 /* Can we just expand an old private anonymous mapping? */
66143@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
66144 */
66145 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66146 if (!vma) {
66147- vm_unacct_memory(len >> PAGE_SHIFT);
66148+ vm_unacct_memory(charged);
66149 return -ENOMEM;
66150 }
66151
66152@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
66153 vma->vm_page_prot = vm_get_page_prot(flags);
66154 vma_link(mm, vma, prev, rb_link, rb_parent);
66155 out:
66156- mm->total_vm += len >> PAGE_SHIFT;
66157+ mm->total_vm += charged;
66158 if (flags & VM_LOCKED) {
66159 if (!mlock_vma_pages_range(vma, addr, addr + len))
66160- mm->locked_vm += (len >> PAGE_SHIFT);
66161+ mm->locked_vm += charged;
66162 }
66163+ track_exec_limit(mm, addr, addr + len, flags);
66164 return addr;
66165 }
66166
66167@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
66168 * Walk the list again, actually closing and freeing it,
66169 * with preemption enabled, without holding any MM locks.
66170 */
66171- while (vma)
66172+ while (vma) {
66173+ vma->vm_mirror = NULL;
66174 vma = remove_vma(vma);
66175+ }
66176
66177 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
66178 }
66179@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
66180 struct vm_area_struct * __vma, * prev;
66181 struct rb_node ** rb_link, * rb_parent;
66182
66183+#ifdef CONFIG_PAX_SEGMEXEC
66184+ struct vm_area_struct *vma_m = NULL;
66185+#endif
66186+
66187 /*
66188 * The vm_pgoff of a purely anonymous vma should be irrelevant
66189 * until its first write fault, when page's anon_vma and index
66190@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
66191 if ((vma->vm_flags & VM_ACCOUNT) &&
66192 security_vm_enough_memory_mm(mm, vma_pages(vma)))
66193 return -ENOMEM;
66194+
66195+#ifdef CONFIG_PAX_SEGMEXEC
66196+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
66197+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66198+ if (!vma_m)
66199+ return -ENOMEM;
66200+ }
66201+#endif
66202+
66203 vma_link(mm, vma, prev, rb_link, rb_parent);
66204+
66205+#ifdef CONFIG_PAX_SEGMEXEC
66206+ if (vma_m)
66207+ pax_mirror_vma(vma_m, vma);
66208+#endif
66209+
66210 return 0;
66211 }
66212
66213@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
66214 struct rb_node **rb_link, *rb_parent;
66215 struct mempolicy *pol;
66216
66217+ BUG_ON(vma->vm_mirror);
66218+
66219 /*
66220 * If anonymous vma has not yet been faulted, update new pgoff
66221 * to match new location, to increase its chance of merging.
66222@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
66223 return new_vma;
66224 }
66225
66226+#ifdef CONFIG_PAX_SEGMEXEC
66227+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
66228+{
66229+ struct vm_area_struct *prev_m;
66230+ struct rb_node **rb_link_m, *rb_parent_m;
66231+ struct mempolicy *pol_m;
66232+
66233+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
66234+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
66235+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
66236+ *vma_m = *vma;
66237+ pol_m = vma_policy(vma_m);
66238+ mpol_get(pol_m);
66239+ vma_set_policy(vma_m, pol_m);
66240+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
66241+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
66242+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
66243+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
66244+ if (vma_m->vm_file)
66245+ get_file(vma_m->vm_file);
66246+ if (vma_m->vm_ops && vma_m->vm_ops->open)
66247+ vma_m->vm_ops->open(vma_m);
66248+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
66249+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
66250+ vma_m->vm_mirror = vma;
66251+ vma->vm_mirror = vma_m;
66252+}
66253+#endif
66254+
66255 /*
66256 * Return true if the calling process may expand its vm space by the passed
66257 * number of pages
66258@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
66259 unsigned long lim;
66260
66261 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
66262-
66263+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
66264 if (cur + npages > lim)
66265 return 0;
66266 return 1;
66267@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
66268 vma->vm_start = addr;
66269 vma->vm_end = addr + len;
66270
66271+#ifdef CONFIG_PAX_MPROTECT
66272+ if (mm->pax_flags & MF_PAX_MPROTECT) {
66273+#ifndef CONFIG_PAX_MPROTECT_COMPAT
66274+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
66275+ return -EPERM;
66276+ if (!(vm_flags & VM_EXEC))
66277+ vm_flags &= ~VM_MAYEXEC;
66278+#else
66279+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
66280+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
66281+#endif
66282+ else
66283+ vm_flags &= ~VM_MAYWRITE;
66284+ }
66285+#endif
66286+
66287 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
66288 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
66289
66290diff -urNp linux-2.6.32.43/mm/mprotect.c linux-2.6.32.43/mm/mprotect.c
66291--- linux-2.6.32.43/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
66292+++ linux-2.6.32.43/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
66293@@ -24,10 +24,16 @@
66294 #include <linux/mmu_notifier.h>
66295 #include <linux/migrate.h>
66296 #include <linux/perf_event.h>
66297+
66298+#ifdef CONFIG_PAX_MPROTECT
66299+#include <linux/elf.h>
66300+#endif
66301+
66302 #include <asm/uaccess.h>
66303 #include <asm/pgtable.h>
66304 #include <asm/cacheflush.h>
66305 #include <asm/tlbflush.h>
66306+#include <asm/mmu_context.h>
66307
66308 #ifndef pgprot_modify
66309 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
66310@@ -132,6 +138,48 @@ static void change_protection(struct vm_
66311 flush_tlb_range(vma, start, end);
66312 }
66313
66314+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66315+/* called while holding the mmap semaphor for writing except stack expansion */
66316+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
66317+{
66318+ unsigned long oldlimit, newlimit = 0UL;
66319+
66320+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
66321+ return;
66322+
66323+ spin_lock(&mm->page_table_lock);
66324+ oldlimit = mm->context.user_cs_limit;
66325+ if ((prot & VM_EXEC) && oldlimit < end)
66326+ /* USER_CS limit moved up */
66327+ newlimit = end;
66328+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
66329+ /* USER_CS limit moved down */
66330+ newlimit = start;
66331+
66332+ if (newlimit) {
66333+ mm->context.user_cs_limit = newlimit;
66334+
66335+#ifdef CONFIG_SMP
66336+ wmb();
66337+ cpus_clear(mm->context.cpu_user_cs_mask);
66338+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
66339+#endif
66340+
66341+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
66342+ }
66343+ spin_unlock(&mm->page_table_lock);
66344+ if (newlimit == end) {
66345+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
66346+
66347+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
66348+ if (is_vm_hugetlb_page(vma))
66349+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
66350+ else
66351+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
66352+ }
66353+}
66354+#endif
66355+
66356 int
66357 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
66358 unsigned long start, unsigned long end, unsigned long newflags)
66359@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
66360 int error;
66361 int dirty_accountable = 0;
66362
66363+#ifdef CONFIG_PAX_SEGMEXEC
66364+ struct vm_area_struct *vma_m = NULL;
66365+ unsigned long start_m, end_m;
66366+
66367+ start_m = start + SEGMEXEC_TASK_SIZE;
66368+ end_m = end + SEGMEXEC_TASK_SIZE;
66369+#endif
66370+
66371 if (newflags == oldflags) {
66372 *pprev = vma;
66373 return 0;
66374 }
66375
66376+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
66377+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
66378+
66379+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
66380+ return -ENOMEM;
66381+
66382+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
66383+ return -ENOMEM;
66384+ }
66385+
66386 /*
66387 * If we make a private mapping writable we increase our commit;
66388 * but (without finer accounting) cannot reduce our commit if we
66389@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
66390 }
66391 }
66392
66393+#ifdef CONFIG_PAX_SEGMEXEC
66394+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
66395+ if (start != vma->vm_start) {
66396+ error = split_vma(mm, vma, start, 1);
66397+ if (error)
66398+ goto fail;
66399+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
66400+ *pprev = (*pprev)->vm_next;
66401+ }
66402+
66403+ if (end != vma->vm_end) {
66404+ error = split_vma(mm, vma, end, 0);
66405+ if (error)
66406+ goto fail;
66407+ }
66408+
66409+ if (pax_find_mirror_vma(vma)) {
66410+ error = __do_munmap(mm, start_m, end_m - start_m);
66411+ if (error)
66412+ goto fail;
66413+ } else {
66414+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
66415+ if (!vma_m) {
66416+ error = -ENOMEM;
66417+ goto fail;
66418+ }
66419+ vma->vm_flags = newflags;
66420+ pax_mirror_vma(vma_m, vma);
66421+ }
66422+ }
66423+#endif
66424+
66425 /*
66426 * First try to merge with previous and/or next vma.
66427 */
66428@@ -195,9 +293,21 @@ success:
66429 * vm_flags and vm_page_prot are protected by the mmap_sem
66430 * held in write mode.
66431 */
66432+
66433+#ifdef CONFIG_PAX_SEGMEXEC
66434+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
66435+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
66436+#endif
66437+
66438 vma->vm_flags = newflags;
66439+
66440+#ifdef CONFIG_PAX_MPROTECT
66441+ if (mm->binfmt && mm->binfmt->handle_mprotect)
66442+ mm->binfmt->handle_mprotect(vma, newflags);
66443+#endif
66444+
66445 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
66446- vm_get_page_prot(newflags));
66447+ vm_get_page_prot(vma->vm_flags));
66448
66449 if (vma_wants_writenotify(vma)) {
66450 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
66451@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66452 end = start + len;
66453 if (end <= start)
66454 return -ENOMEM;
66455+
66456+#ifdef CONFIG_PAX_SEGMEXEC
66457+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
66458+ if (end > SEGMEXEC_TASK_SIZE)
66459+ return -EINVAL;
66460+ } else
66461+#endif
66462+
66463+ if (end > TASK_SIZE)
66464+ return -EINVAL;
66465+
66466 if (!arch_validate_prot(prot))
66467 return -EINVAL;
66468
66469@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66470 /*
66471 * Does the application expect PROT_READ to imply PROT_EXEC:
66472 */
66473- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
66474+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
66475 prot |= PROT_EXEC;
66476
66477 vm_flags = calc_vm_prot_bits(prot);
66478@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66479 if (start > vma->vm_start)
66480 prev = vma;
66481
66482+#ifdef CONFIG_PAX_MPROTECT
66483+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
66484+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
66485+#endif
66486+
66487 for (nstart = start ; ; ) {
66488 unsigned long newflags;
66489
66490@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66491
66492 /* newflags >> 4 shift VM_MAY% in place of VM_% */
66493 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
66494+ if (prot & (PROT_WRITE | PROT_EXEC))
66495+ gr_log_rwxmprotect(vma->vm_file);
66496+
66497+ error = -EACCES;
66498+ goto out;
66499+ }
66500+
66501+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
66502 error = -EACCES;
66503 goto out;
66504 }
66505@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
66506 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
66507 if (error)
66508 goto out;
66509+
66510+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
66511+
66512 nstart = tmp;
66513
66514 if (nstart < prev->vm_end)
66515diff -urNp linux-2.6.32.43/mm/mremap.c linux-2.6.32.43/mm/mremap.c
66516--- linux-2.6.32.43/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
66517+++ linux-2.6.32.43/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
66518@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
66519 continue;
66520 pte = ptep_clear_flush(vma, old_addr, old_pte);
66521 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
66522+
66523+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
66524+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
66525+ pte = pte_exprotect(pte);
66526+#endif
66527+
66528 set_pte_at(mm, new_addr, new_pte, pte);
66529 }
66530
66531@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
66532 if (is_vm_hugetlb_page(vma))
66533 goto Einval;
66534
66535+#ifdef CONFIG_PAX_SEGMEXEC
66536+ if (pax_find_mirror_vma(vma))
66537+ goto Einval;
66538+#endif
66539+
66540 /* We can't remap across vm area boundaries */
66541 if (old_len > vma->vm_end - addr)
66542 goto Efault;
66543@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
66544 unsigned long ret = -EINVAL;
66545 unsigned long charged = 0;
66546 unsigned long map_flags;
66547+ unsigned long pax_task_size = TASK_SIZE;
66548
66549 if (new_addr & ~PAGE_MASK)
66550 goto out;
66551
66552- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
66553+#ifdef CONFIG_PAX_SEGMEXEC
66554+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66555+ pax_task_size = SEGMEXEC_TASK_SIZE;
66556+#endif
66557+
66558+ pax_task_size -= PAGE_SIZE;
66559+
66560+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
66561 goto out;
66562
66563 /* Check if the location we're moving into overlaps the
66564 * old location at all, and fail if it does.
66565 */
66566- if ((new_addr <= addr) && (new_addr+new_len) > addr)
66567- goto out;
66568-
66569- if ((addr <= new_addr) && (addr+old_len) > new_addr)
66570+ if (addr + old_len > new_addr && new_addr + new_len > addr)
66571 goto out;
66572
66573 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66574@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
66575 struct vm_area_struct *vma;
66576 unsigned long ret = -EINVAL;
66577 unsigned long charged = 0;
66578+ unsigned long pax_task_size = TASK_SIZE;
66579
66580 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
66581 goto out;
66582@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
66583 if (!new_len)
66584 goto out;
66585
66586+#ifdef CONFIG_PAX_SEGMEXEC
66587+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
66588+ pax_task_size = SEGMEXEC_TASK_SIZE;
66589+#endif
66590+
66591+ pax_task_size -= PAGE_SIZE;
66592+
66593+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
66594+ old_len > pax_task_size || addr > pax_task_size-old_len)
66595+ goto out;
66596+
66597 if (flags & MREMAP_FIXED) {
66598 if (flags & MREMAP_MAYMOVE)
66599 ret = mremap_to(addr, old_len, new_addr, new_len);
66600@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
66601 addr + new_len);
66602 }
66603 ret = addr;
66604+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
66605 goto out;
66606 }
66607 }
66608@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
66609 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
66610 if (ret)
66611 goto out;
66612+
66613+ map_flags = vma->vm_flags;
66614 ret = move_vma(vma, addr, old_len, new_len, new_addr);
66615+ if (!(ret & ~PAGE_MASK)) {
66616+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
66617+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
66618+ }
66619 }
66620 out:
66621 if (ret & ~PAGE_MASK)
66622diff -urNp linux-2.6.32.43/mm/nommu.c linux-2.6.32.43/mm/nommu.c
66623--- linux-2.6.32.43/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
66624+++ linux-2.6.32.43/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
66625@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
66626 int sysctl_overcommit_ratio = 50; /* default is 50% */
66627 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
66628 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
66629-int heap_stack_gap = 0;
66630
66631 atomic_long_t mmap_pages_allocated;
66632
66633@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
66634 EXPORT_SYMBOL(find_vma);
66635
66636 /*
66637- * find a VMA
66638- * - we don't extend stack VMAs under NOMMU conditions
66639- */
66640-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
66641-{
66642- return find_vma(mm, addr);
66643-}
66644-
66645-/*
66646 * expand a stack to a given address
66647 * - not supported under NOMMU conditions
66648 */
66649diff -urNp linux-2.6.32.43/mm/page_alloc.c linux-2.6.32.43/mm/page_alloc.c
66650--- linux-2.6.32.43/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
66651+++ linux-2.6.32.43/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
66652@@ -289,7 +289,7 @@ out:
66653 * This usage means that zero-order pages may not be compound.
66654 */
66655
66656-static void free_compound_page(struct page *page)
66657+void free_compound_page(struct page *page)
66658 {
66659 __free_pages_ok(page, compound_order(page));
66660 }
66661@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
66662 int bad = 0;
66663 int wasMlocked = __TestClearPageMlocked(page);
66664
66665+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66666+ unsigned long index = 1UL << order;
66667+#endif
66668+
66669 kmemcheck_free_shadow(page, order);
66670
66671 for (i = 0 ; i < (1 << order) ; ++i)
66672@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
66673 debug_check_no_obj_freed(page_address(page),
66674 PAGE_SIZE << order);
66675 }
66676+
66677+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66678+ for (; index; --index)
66679+ sanitize_highpage(page + index - 1);
66680+#endif
66681+
66682 arch_free_page(page, order);
66683 kernel_map_pages(page, 1 << order, 0);
66684
66685@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
66686 arch_alloc_page(page, order);
66687 kernel_map_pages(page, 1 << order, 1);
66688
66689+#ifndef CONFIG_PAX_MEMORY_SANITIZE
66690 if (gfp_flags & __GFP_ZERO)
66691 prep_zero_page(page, order, gfp_flags);
66692+#endif
66693
66694 if (order && (gfp_flags & __GFP_COMP))
66695 prep_compound_page(page, order);
66696@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
66697 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
66698 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
66699 }
66700+
66701+#ifdef CONFIG_PAX_MEMORY_SANITIZE
66702+ sanitize_highpage(page);
66703+#endif
66704+
66705 arch_free_page(page, 0);
66706 kernel_map_pages(page, 1, 0);
66707
66708@@ -2179,6 +2196,8 @@ void show_free_areas(void)
66709 int cpu;
66710 struct zone *zone;
66711
66712+ pax_track_stack();
66713+
66714 for_each_populated_zone(zone) {
66715 show_node(zone);
66716 printk("%s per-cpu:\n", zone->name);
66717@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
66718 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
66719 }
66720 #else
66721-static void inline setup_usemap(struct pglist_data *pgdat,
66722+static inline void setup_usemap(struct pglist_data *pgdat,
66723 struct zone *zone, unsigned long zonesize) {}
66724 #endif /* CONFIG_SPARSEMEM */
66725
66726diff -urNp linux-2.6.32.43/mm/percpu.c linux-2.6.32.43/mm/percpu.c
66727--- linux-2.6.32.43/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
66728+++ linux-2.6.32.43/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
66729@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
66730 static unsigned int pcpu_last_unit_cpu __read_mostly;
66731
66732 /* the address of the first chunk which starts with the kernel static area */
66733-void *pcpu_base_addr __read_mostly;
66734+void *pcpu_base_addr __read_only;
66735 EXPORT_SYMBOL_GPL(pcpu_base_addr);
66736
66737 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
66738diff -urNp linux-2.6.32.43/mm/rmap.c linux-2.6.32.43/mm/rmap.c
66739--- linux-2.6.32.43/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
66740+++ linux-2.6.32.43/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
66741@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
66742 /* page_table_lock to protect against threads */
66743 spin_lock(&mm->page_table_lock);
66744 if (likely(!vma->anon_vma)) {
66745+
66746+#ifdef CONFIG_PAX_SEGMEXEC
66747+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
66748+
66749+ if (vma_m) {
66750+ BUG_ON(vma_m->anon_vma);
66751+ vma_m->anon_vma = anon_vma;
66752+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
66753+ }
66754+#endif
66755+
66756 vma->anon_vma = anon_vma;
66757 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
66758 allocated = NULL;
66759diff -urNp linux-2.6.32.43/mm/shmem.c linux-2.6.32.43/mm/shmem.c
66760--- linux-2.6.32.43/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
66761+++ linux-2.6.32.43/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
66762@@ -31,7 +31,7 @@
66763 #include <linux/swap.h>
66764 #include <linux/ima.h>
66765
66766-static struct vfsmount *shm_mnt;
66767+struct vfsmount *shm_mnt;
66768
66769 #ifdef CONFIG_SHMEM
66770 /*
66771@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
66772 goto unlock;
66773 }
66774 entry = shmem_swp_entry(info, index, NULL);
66775+ if (!entry)
66776+ goto unlock;
66777 if (entry->val) {
66778 /*
66779 * The more uptodate page coming down from a stacked
66780@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
66781 struct vm_area_struct pvma;
66782 struct page *page;
66783
66784+ pax_track_stack();
66785+
66786 spol = mpol_cond_copy(&mpol,
66787 mpol_shared_policy_lookup(&info->policy, idx));
66788
66789@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
66790
66791 info = SHMEM_I(inode);
66792 inode->i_size = len-1;
66793- if (len <= (char *)inode - (char *)info) {
66794+ if (len <= (char *)inode - (char *)info && len <= 64) {
66795 /* do it inline */
66796 memcpy(info, symname, len);
66797 inode->i_op = &shmem_symlink_inline_operations;
66798@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
66799 int err = -ENOMEM;
66800
66801 /* Round up to L1_CACHE_BYTES to resist false sharing */
66802- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
66803- L1_CACHE_BYTES), GFP_KERNEL);
66804+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
66805 if (!sbinfo)
66806 return -ENOMEM;
66807
66808diff -urNp linux-2.6.32.43/mm/slab.c linux-2.6.32.43/mm/slab.c
66809--- linux-2.6.32.43/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
66810+++ linux-2.6.32.43/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
66811@@ -174,7 +174,7 @@
66812
66813 /* Legal flag mask for kmem_cache_create(). */
66814 #if DEBUG
66815-# define CREATE_MASK (SLAB_RED_ZONE | \
66816+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
66817 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
66818 SLAB_CACHE_DMA | \
66819 SLAB_STORE_USER | \
66820@@ -182,7 +182,7 @@
66821 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66822 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
66823 #else
66824-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
66825+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
66826 SLAB_CACHE_DMA | \
66827 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
66828 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
66829@@ -308,7 +308,7 @@ struct kmem_list3 {
66830 * Need this for bootstrapping a per node allocator.
66831 */
66832 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
66833-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
66834+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
66835 #define CACHE_CACHE 0
66836 #define SIZE_AC MAX_NUMNODES
66837 #define SIZE_L3 (2 * MAX_NUMNODES)
66838@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
66839 if ((x)->max_freeable < i) \
66840 (x)->max_freeable = i; \
66841 } while (0)
66842-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
66843-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
66844-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
66845-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
66846+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
66847+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
66848+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
66849+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
66850 #else
66851 #define STATS_INC_ACTIVE(x) do { } while (0)
66852 #define STATS_DEC_ACTIVE(x) do { } while (0)
66853@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
66854 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
66855 */
66856 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
66857- const struct slab *slab, void *obj)
66858+ const struct slab *slab, const void *obj)
66859 {
66860 u32 offset = (obj - slab->s_mem);
66861 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
66862@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
66863 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
66864 sizes[INDEX_AC].cs_size,
66865 ARCH_KMALLOC_MINALIGN,
66866- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66867+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66868 NULL);
66869
66870 if (INDEX_AC != INDEX_L3) {
66871@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
66872 kmem_cache_create(names[INDEX_L3].name,
66873 sizes[INDEX_L3].cs_size,
66874 ARCH_KMALLOC_MINALIGN,
66875- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66876+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66877 NULL);
66878 }
66879
66880@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
66881 sizes->cs_cachep = kmem_cache_create(names->name,
66882 sizes->cs_size,
66883 ARCH_KMALLOC_MINALIGN,
66884- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
66885+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
66886 NULL);
66887 }
66888 #ifdef CONFIG_ZONE_DMA
66889@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
66890 }
66891 /* cpu stats */
66892 {
66893- unsigned long allochit = atomic_read(&cachep->allochit);
66894- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
66895- unsigned long freehit = atomic_read(&cachep->freehit);
66896- unsigned long freemiss = atomic_read(&cachep->freemiss);
66897+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
66898+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
66899+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
66900+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
66901
66902 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
66903 allochit, allocmiss, freehit, freemiss);
66904@@ -4471,15 +4471,66 @@ static const struct file_operations proc
66905
66906 static int __init slab_proc_init(void)
66907 {
66908- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
66909+ mode_t gr_mode = S_IRUGO;
66910+
66911+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66912+ gr_mode = S_IRUSR;
66913+#endif
66914+
66915+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
66916 #ifdef CONFIG_DEBUG_SLAB_LEAK
66917- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
66918+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
66919 #endif
66920 return 0;
66921 }
66922 module_init(slab_proc_init);
66923 #endif
66924
66925+void check_object_size(const void *ptr, unsigned long n, bool to)
66926+{
66927+
66928+#ifdef CONFIG_PAX_USERCOPY
66929+ struct page *page;
66930+ struct kmem_cache *cachep = NULL;
66931+ struct slab *slabp;
66932+ unsigned int objnr;
66933+ unsigned long offset;
66934+
66935+ if (!n)
66936+ return;
66937+
66938+ if (ZERO_OR_NULL_PTR(ptr))
66939+ goto report;
66940+
66941+ if (!virt_addr_valid(ptr))
66942+ return;
66943+
66944+ page = virt_to_head_page(ptr);
66945+
66946+ if (!PageSlab(page)) {
66947+ if (object_is_on_stack(ptr, n) == -1)
66948+ goto report;
66949+ return;
66950+ }
66951+
66952+ cachep = page_get_cache(page);
66953+ if (!(cachep->flags & SLAB_USERCOPY))
66954+ goto report;
66955+
66956+ slabp = page_get_slab(page);
66957+ objnr = obj_to_index(cachep, slabp, ptr);
66958+ BUG_ON(objnr >= cachep->num);
66959+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
66960+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
66961+ return;
66962+
66963+report:
66964+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
66965+#endif
66966+
66967+}
66968+EXPORT_SYMBOL(check_object_size);
66969+
66970 /**
66971 * ksize - get the actual amount of memory allocated for a given object
66972 * @objp: Pointer to the object
66973diff -urNp linux-2.6.32.43/mm/slob.c linux-2.6.32.43/mm/slob.c
66974--- linux-2.6.32.43/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
66975+++ linux-2.6.32.43/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
66976@@ -29,7 +29,7 @@
66977 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
66978 * alloc_pages() directly, allocating compound pages so the page order
66979 * does not have to be separately tracked, and also stores the exact
66980- * allocation size in page->private so that it can be used to accurately
66981+ * allocation size in slob_page->size so that it can be used to accurately
66982 * provide ksize(). These objects are detected in kfree() because slob_page()
66983 * is false for them.
66984 *
66985@@ -58,6 +58,7 @@
66986 */
66987
66988 #include <linux/kernel.h>
66989+#include <linux/sched.h>
66990 #include <linux/slab.h>
66991 #include <linux/mm.h>
66992 #include <linux/swap.h> /* struct reclaim_state */
66993@@ -100,7 +101,8 @@ struct slob_page {
66994 unsigned long flags; /* mandatory */
66995 atomic_t _count; /* mandatory */
66996 slobidx_t units; /* free units left in page */
66997- unsigned long pad[2];
66998+ unsigned long pad[1];
66999+ unsigned long size; /* size when >=PAGE_SIZE */
67000 slob_t *free; /* first free slob_t in page */
67001 struct list_head list; /* linked list of free pages */
67002 };
67003@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
67004 */
67005 static inline int is_slob_page(struct slob_page *sp)
67006 {
67007- return PageSlab((struct page *)sp);
67008+ return PageSlab((struct page *)sp) && !sp->size;
67009 }
67010
67011 static inline void set_slob_page(struct slob_page *sp)
67012@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
67013
67014 static inline struct slob_page *slob_page(const void *addr)
67015 {
67016- return (struct slob_page *)virt_to_page(addr);
67017+ return (struct slob_page *)virt_to_head_page(addr);
67018 }
67019
67020 /*
67021@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
67022 /*
67023 * Return the size of a slob block.
67024 */
67025-static slobidx_t slob_units(slob_t *s)
67026+static slobidx_t slob_units(const slob_t *s)
67027 {
67028 if (s->units > 0)
67029 return s->units;
67030@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
67031 /*
67032 * Return the next free slob block pointer after this one.
67033 */
67034-static slob_t *slob_next(slob_t *s)
67035+static slob_t *slob_next(const slob_t *s)
67036 {
67037 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
67038 slobidx_t next;
67039@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
67040 /*
67041 * Returns true if s is the last free block in its page.
67042 */
67043-static int slob_last(slob_t *s)
67044+static int slob_last(const slob_t *s)
67045 {
67046 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
67047 }
67048@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
67049 if (!page)
67050 return NULL;
67051
67052+ set_slob_page(page);
67053 return page_address(page);
67054 }
67055
67056@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
67057 if (!b)
67058 return NULL;
67059 sp = slob_page(b);
67060- set_slob_page(sp);
67061
67062 spin_lock_irqsave(&slob_lock, flags);
67063 sp->units = SLOB_UNITS(PAGE_SIZE);
67064 sp->free = b;
67065+ sp->size = 0;
67066 INIT_LIST_HEAD(&sp->list);
67067 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
67068 set_slob_page_free(sp, slob_list);
67069@@ -475,10 +478,9 @@ out:
67070 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
67071 #endif
67072
67073-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67074+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
67075 {
67076- unsigned int *m;
67077- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67078+ slob_t *m;
67079 void *ret;
67080
67081 lockdep_trace_alloc(gfp);
67082@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
67083
67084 if (!m)
67085 return NULL;
67086- *m = size;
67087+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
67088+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
67089+ m[0].units = size;
67090+ m[1].units = align;
67091 ret = (void *)m + align;
67092
67093 trace_kmalloc_node(_RET_IP_, ret,
67094@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
67095
67096 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
67097 if (ret) {
67098- struct page *page;
67099- page = virt_to_page(ret);
67100- page->private = size;
67101+ struct slob_page *sp;
67102+ sp = slob_page(ret);
67103+ sp->size = size;
67104 }
67105
67106 trace_kmalloc_node(_RET_IP_, ret,
67107 size, PAGE_SIZE << order, gfp, node);
67108 }
67109
67110- kmemleak_alloc(ret, size, 1, gfp);
67111+ return ret;
67112+}
67113+
67114+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
67115+{
67116+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67117+ void *ret = __kmalloc_node_align(size, gfp, node, align);
67118+
67119+ if (!ZERO_OR_NULL_PTR(ret))
67120+ kmemleak_alloc(ret, size, 1, gfp);
67121 return ret;
67122 }
67123 EXPORT_SYMBOL(__kmalloc_node);
67124@@ -528,13 +542,88 @@ void kfree(const void *block)
67125 sp = slob_page(block);
67126 if (is_slob_page(sp)) {
67127 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67128- unsigned int *m = (unsigned int *)(block - align);
67129- slob_free(m, *m + align);
67130- } else
67131+ slob_t *m = (slob_t *)(block - align);
67132+ slob_free(m, m[0].units + align);
67133+ } else {
67134+ clear_slob_page(sp);
67135+ free_slob_page(sp);
67136+ sp->size = 0;
67137 put_page(&sp->page);
67138+ }
67139 }
67140 EXPORT_SYMBOL(kfree);
67141
67142+void check_object_size(const void *ptr, unsigned long n, bool to)
67143+{
67144+
67145+#ifdef CONFIG_PAX_USERCOPY
67146+ struct slob_page *sp;
67147+ const slob_t *free;
67148+ const void *base;
67149+ unsigned long flags;
67150+
67151+ if (!n)
67152+ return;
67153+
67154+ if (ZERO_OR_NULL_PTR(ptr))
67155+ goto report;
67156+
67157+ if (!virt_addr_valid(ptr))
67158+ return;
67159+
67160+ sp = slob_page(ptr);
67161+ if (!PageSlab((struct page*)sp)) {
67162+ if (object_is_on_stack(ptr, n) == -1)
67163+ goto report;
67164+ return;
67165+ }
67166+
67167+ if (sp->size) {
67168+ base = page_address(&sp->page);
67169+ if (base <= ptr && n <= sp->size - (ptr - base))
67170+ return;
67171+ goto report;
67172+ }
67173+
67174+ /* some tricky double walking to find the chunk */
67175+ spin_lock_irqsave(&slob_lock, flags);
67176+ base = (void *)((unsigned long)ptr & PAGE_MASK);
67177+ free = sp->free;
67178+
67179+ while (!slob_last(free) && (void *)free <= ptr) {
67180+ base = free + slob_units(free);
67181+ free = slob_next(free);
67182+ }
67183+
67184+ while (base < (void *)free) {
67185+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
67186+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
67187+ int offset;
67188+
67189+ if (ptr < base + align)
67190+ break;
67191+
67192+ offset = ptr - base - align;
67193+ if (offset >= m) {
67194+ base += size;
67195+ continue;
67196+ }
67197+
67198+ if (n > m - offset)
67199+ break;
67200+
67201+ spin_unlock_irqrestore(&slob_lock, flags);
67202+ return;
67203+ }
67204+
67205+ spin_unlock_irqrestore(&slob_lock, flags);
67206+report:
67207+ pax_report_usercopy(ptr, n, to, NULL);
67208+#endif
67209+
67210+}
67211+EXPORT_SYMBOL(check_object_size);
67212+
67213 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
67214 size_t ksize(const void *block)
67215 {
67216@@ -547,10 +636,10 @@ size_t ksize(const void *block)
67217 sp = slob_page(block);
67218 if (is_slob_page(sp)) {
67219 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
67220- unsigned int *m = (unsigned int *)(block - align);
67221- return SLOB_UNITS(*m) * SLOB_UNIT;
67222+ slob_t *m = (slob_t *)(block - align);
67223+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
67224 } else
67225- return sp->page.private;
67226+ return sp->size;
67227 }
67228 EXPORT_SYMBOL(ksize);
67229
67230@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
67231 {
67232 struct kmem_cache *c;
67233
67234+#ifdef CONFIG_PAX_USERCOPY
67235+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
67236+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
67237+#else
67238 c = slob_alloc(sizeof(struct kmem_cache),
67239 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
67240+#endif
67241
67242 if (c) {
67243 c->name = name;
67244@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
67245 {
67246 void *b;
67247
67248+#ifdef CONFIG_PAX_USERCOPY
67249+ b = __kmalloc_node_align(c->size, flags, node, c->align);
67250+#else
67251 if (c->size < PAGE_SIZE) {
67252 b = slob_alloc(c->size, flags, c->align, node);
67253 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67254 SLOB_UNITS(c->size) * SLOB_UNIT,
67255 flags, node);
67256 } else {
67257+ struct slob_page *sp;
67258+
67259 b = slob_new_pages(flags, get_order(c->size), node);
67260+ sp = slob_page(b);
67261+ sp->size = c->size;
67262 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
67263 PAGE_SIZE << get_order(c->size),
67264 flags, node);
67265 }
67266+#endif
67267
67268 if (c->ctor)
67269 c->ctor(b);
67270@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
67271
67272 static void __kmem_cache_free(void *b, int size)
67273 {
67274- if (size < PAGE_SIZE)
67275+ struct slob_page *sp = slob_page(b);
67276+
67277+ if (is_slob_page(sp))
67278 slob_free(b, size);
67279- else
67280+ else {
67281+ clear_slob_page(sp);
67282+ free_slob_page(sp);
67283+ sp->size = 0;
67284 slob_free_pages(b, get_order(size));
67285+ }
67286 }
67287
67288 static void kmem_rcu_free(struct rcu_head *head)
67289@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
67290
67291 void kmem_cache_free(struct kmem_cache *c, void *b)
67292 {
67293+ int size = c->size;
67294+
67295+#ifdef CONFIG_PAX_USERCOPY
67296+ if (size + c->align < PAGE_SIZE) {
67297+ size += c->align;
67298+ b -= c->align;
67299+ }
67300+#endif
67301+
67302 kmemleak_free_recursive(b, c->flags);
67303 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
67304 struct slob_rcu *slob_rcu;
67305- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
67306+ slob_rcu = b + (size - sizeof(struct slob_rcu));
67307 INIT_RCU_HEAD(&slob_rcu->head);
67308- slob_rcu->size = c->size;
67309+ slob_rcu->size = size;
67310 call_rcu(&slob_rcu->head, kmem_rcu_free);
67311 } else {
67312- __kmem_cache_free(b, c->size);
67313+ __kmem_cache_free(b, size);
67314 }
67315
67316+#ifdef CONFIG_PAX_USERCOPY
67317+ trace_kfree(_RET_IP_, b);
67318+#else
67319 trace_kmem_cache_free(_RET_IP_, b);
67320+#endif
67321+
67322 }
67323 EXPORT_SYMBOL(kmem_cache_free);
67324
67325diff -urNp linux-2.6.32.43/mm/slub.c linux-2.6.32.43/mm/slub.c
67326--- linux-2.6.32.43/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
67327+++ linux-2.6.32.43/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
67328@@ -410,7 +410,7 @@ static void print_track(const char *s, s
67329 if (!t->addr)
67330 return;
67331
67332- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
67333+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
67334 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
67335 }
67336
67337@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
67338
67339 page = virt_to_head_page(x);
67340
67341+ BUG_ON(!PageSlab(page));
67342+
67343 slab_free(s, page, x, _RET_IP_);
67344
67345 trace_kmem_cache_free(_RET_IP_, x);
67346@@ -1937,7 +1939,7 @@ static int slub_min_objects;
67347 * Merge control. If this is set then no merging of slab caches will occur.
67348 * (Could be removed. This was introduced to pacify the merge skeptics.)
67349 */
67350-static int slub_nomerge;
67351+static int slub_nomerge = 1;
67352
67353 /*
67354 * Calculate the order of allocation given an slab object size.
67355@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
67356 * list to avoid pounding the page allocator excessively.
67357 */
67358 set_min_partial(s, ilog2(s->size));
67359- s->refcount = 1;
67360+ atomic_set(&s->refcount, 1);
67361 #ifdef CONFIG_NUMA
67362 s->remote_node_defrag_ratio = 1000;
67363 #endif
67364@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
67365 void kmem_cache_destroy(struct kmem_cache *s)
67366 {
67367 down_write(&slub_lock);
67368- s->refcount--;
67369- if (!s->refcount) {
67370+ if (atomic_dec_and_test(&s->refcount)) {
67371 list_del(&s->list);
67372 up_write(&slub_lock);
67373 if (kmem_cache_close(s)) {
67374@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
67375 __setup("slub_nomerge", setup_slub_nomerge);
67376
67377 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
67378- const char *name, int size, gfp_t gfp_flags)
67379+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
67380 {
67381- unsigned int flags = 0;
67382-
67383 if (gfp_flags & SLUB_DMA)
67384- flags = SLAB_CACHE_DMA;
67385+ flags |= SLAB_CACHE_DMA;
67386
67387 /*
67388 * This function is called with IRQs disabled during early-boot on
67389@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
67390 EXPORT_SYMBOL(__kmalloc_node);
67391 #endif
67392
67393+void check_object_size(const void *ptr, unsigned long n, bool to)
67394+{
67395+
67396+#ifdef CONFIG_PAX_USERCOPY
67397+ struct page *page;
67398+ struct kmem_cache *s = NULL;
67399+ unsigned long offset;
67400+
67401+ if (!n)
67402+ return;
67403+
67404+ if (ZERO_OR_NULL_PTR(ptr))
67405+ goto report;
67406+
67407+ if (!virt_addr_valid(ptr))
67408+ return;
67409+
67410+ page = get_object_page(ptr);
67411+
67412+ if (!page) {
67413+ if (object_is_on_stack(ptr, n) == -1)
67414+ goto report;
67415+ return;
67416+ }
67417+
67418+ s = page->slab;
67419+ if (!(s->flags & SLAB_USERCOPY))
67420+ goto report;
67421+
67422+ offset = (ptr - page_address(page)) % s->size;
67423+ if (offset <= s->objsize && n <= s->objsize - offset)
67424+ return;
67425+
67426+report:
67427+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
67428+#endif
67429+
67430+}
67431+EXPORT_SYMBOL(check_object_size);
67432+
67433 size_t ksize(const void *object)
67434 {
67435 struct page *page;
67436@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
67437 * kmem_cache_open for slab_state == DOWN.
67438 */
67439 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
67440- sizeof(struct kmem_cache_node), GFP_NOWAIT);
67441- kmalloc_caches[0].refcount = -1;
67442+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
67443+ atomic_set(&kmalloc_caches[0].refcount, -1);
67444 caches++;
67445
67446 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
67447@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
67448 /* Caches that are not of the two-to-the-power-of size */
67449 if (KMALLOC_MIN_SIZE <= 32) {
67450 create_kmalloc_cache(&kmalloc_caches[1],
67451- "kmalloc-96", 96, GFP_NOWAIT);
67452+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
67453 caches++;
67454 }
67455 if (KMALLOC_MIN_SIZE <= 64) {
67456 create_kmalloc_cache(&kmalloc_caches[2],
67457- "kmalloc-192", 192, GFP_NOWAIT);
67458+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
67459 caches++;
67460 }
67461
67462 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
67463 create_kmalloc_cache(&kmalloc_caches[i],
67464- "kmalloc", 1 << i, GFP_NOWAIT);
67465+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
67466 caches++;
67467 }
67468
67469@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
67470 /*
67471 * We may have set a slab to be unmergeable during bootstrap.
67472 */
67473- if (s->refcount < 0)
67474+ if (atomic_read(&s->refcount) < 0)
67475 return 1;
67476
67477 return 0;
67478@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
67479 if (s) {
67480 int cpu;
67481
67482- s->refcount++;
67483+ atomic_inc(&s->refcount);
67484 /*
67485 * Adjust the object sizes so that we clear
67486 * the complete object on kzalloc.
67487@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
67488
67489 if (sysfs_slab_alias(s, name)) {
67490 down_write(&slub_lock);
67491- s->refcount--;
67492+ atomic_dec(&s->refcount);
67493 up_write(&slub_lock);
67494 goto err;
67495 }
67496@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
67497
67498 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
67499 {
67500- return sprintf(buf, "%d\n", s->refcount - 1);
67501+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
67502 }
67503 SLAB_ATTR_RO(aliases);
67504
67505@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
67506 kfree(s);
67507 }
67508
67509-static struct sysfs_ops slab_sysfs_ops = {
67510+static const struct sysfs_ops slab_sysfs_ops = {
67511 .show = slab_attr_show,
67512 .store = slab_attr_store,
67513 };
67514@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
67515 return 0;
67516 }
67517
67518-static struct kset_uevent_ops slab_uevent_ops = {
67519+static const struct kset_uevent_ops slab_uevent_ops = {
67520 .filter = uevent_filter,
67521 };
67522
67523@@ -4785,7 +4824,13 @@ static const struct file_operations proc
67524
67525 static int __init slab_proc_init(void)
67526 {
67527- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
67528+ mode_t gr_mode = S_IRUGO;
67529+
67530+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67531+ gr_mode = S_IRUSR;
67532+#endif
67533+
67534+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
67535 return 0;
67536 }
67537 module_init(slab_proc_init);
67538diff -urNp linux-2.6.32.43/mm/swap.c linux-2.6.32.43/mm/swap.c
67539--- linux-2.6.32.43/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
67540+++ linux-2.6.32.43/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
67541@@ -30,6 +30,7 @@
67542 #include <linux/notifier.h>
67543 #include <linux/backing-dev.h>
67544 #include <linux/memcontrol.h>
67545+#include <linux/hugetlb.h>
67546
67547 #include "internal.h"
67548
67549@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
67550 compound_page_dtor *dtor;
67551
67552 dtor = get_compound_page_dtor(page);
67553+ if (!PageHuge(page))
67554+ BUG_ON(dtor != free_compound_page);
67555 (*dtor)(page);
67556 }
67557 }
67558diff -urNp linux-2.6.32.43/mm/util.c linux-2.6.32.43/mm/util.c
67559--- linux-2.6.32.43/mm/util.c 2011-03-27 14:31:47.000000000 -0400
67560+++ linux-2.6.32.43/mm/util.c 2011-04-17 15:56:46.000000000 -0400
67561@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
67562 void arch_pick_mmap_layout(struct mm_struct *mm)
67563 {
67564 mm->mmap_base = TASK_UNMAPPED_BASE;
67565+
67566+#ifdef CONFIG_PAX_RANDMMAP
67567+ if (mm->pax_flags & MF_PAX_RANDMMAP)
67568+ mm->mmap_base += mm->delta_mmap;
67569+#endif
67570+
67571 mm->get_unmapped_area = arch_get_unmapped_area;
67572 mm->unmap_area = arch_unmap_area;
67573 }
67574diff -urNp linux-2.6.32.43/mm/vmalloc.c linux-2.6.32.43/mm/vmalloc.c
67575--- linux-2.6.32.43/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
67576+++ linux-2.6.32.43/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
67577@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
67578
67579 pte = pte_offset_kernel(pmd, addr);
67580 do {
67581- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67582- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67583+
67584+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67585+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
67586+ BUG_ON(!pte_exec(*pte));
67587+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
67588+ continue;
67589+ }
67590+#endif
67591+
67592+ {
67593+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
67594+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
67595+ }
67596 } while (pte++, addr += PAGE_SIZE, addr != end);
67597 }
67598
67599@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
67600 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
67601 {
67602 pte_t *pte;
67603+ int ret = -ENOMEM;
67604
67605 /*
67606 * nr is a running index into the array which helps higher level
67607@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
67608 pte = pte_alloc_kernel(pmd, addr);
67609 if (!pte)
67610 return -ENOMEM;
67611+
67612+ pax_open_kernel();
67613 do {
67614 struct page *page = pages[*nr];
67615
67616- if (WARN_ON(!pte_none(*pte)))
67617- return -EBUSY;
67618- if (WARN_ON(!page))
67619- return -ENOMEM;
67620+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67621+ if (!(pgprot_val(prot) & _PAGE_NX))
67622+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
67623+ else
67624+#endif
67625+
67626+ if (WARN_ON(!pte_none(*pte))) {
67627+ ret = -EBUSY;
67628+ goto out;
67629+ }
67630+ if (WARN_ON(!page)) {
67631+ ret = -ENOMEM;
67632+ goto out;
67633+ }
67634 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
67635 (*nr)++;
67636 } while (pte++, addr += PAGE_SIZE, addr != end);
67637- return 0;
67638+ ret = 0;
67639+out:
67640+ pax_close_kernel();
67641+ return ret;
67642 }
67643
67644 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
67645@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
67646 * and fall back on vmalloc() if that fails. Others
67647 * just put it in the vmalloc space.
67648 */
67649-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
67650+#ifdef CONFIG_MODULES
67651+#ifdef MODULES_VADDR
67652 unsigned long addr = (unsigned long)x;
67653 if (addr >= MODULES_VADDR && addr < MODULES_END)
67654 return 1;
67655 #endif
67656+
67657+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
67658+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
67659+ return 1;
67660+#endif
67661+
67662+#endif
67663+
67664 return is_vmalloc_addr(x);
67665 }
67666
67667@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
67668
67669 if (!pgd_none(*pgd)) {
67670 pud_t *pud = pud_offset(pgd, addr);
67671+#ifdef CONFIG_X86
67672+ if (!pud_large(*pud))
67673+#endif
67674 if (!pud_none(*pud)) {
67675 pmd_t *pmd = pmd_offset(pud, addr);
67676+#ifdef CONFIG_X86
67677+ if (!pmd_large(*pmd))
67678+#endif
67679 if (!pmd_none(*pmd)) {
67680 pte_t *ptep, pte;
67681
67682@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
67683 struct rb_node *tmp;
67684
67685 while (*p) {
67686- struct vmap_area *tmp;
67687+ struct vmap_area *varea;
67688
67689 parent = *p;
67690- tmp = rb_entry(parent, struct vmap_area, rb_node);
67691- if (va->va_start < tmp->va_end)
67692+ varea = rb_entry(parent, struct vmap_area, rb_node);
67693+ if (va->va_start < varea->va_end)
67694 p = &(*p)->rb_left;
67695- else if (va->va_end > tmp->va_start)
67696+ else if (va->va_end > varea->va_start)
67697 p = &(*p)->rb_right;
67698 else
67699 BUG();
67700@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
67701 struct vm_struct *area;
67702
67703 BUG_ON(in_interrupt());
67704+
67705+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67706+ if (flags & VM_KERNEXEC) {
67707+ if (start != VMALLOC_START || end != VMALLOC_END)
67708+ return NULL;
67709+ start = (unsigned long)MODULES_EXEC_VADDR;
67710+ end = (unsigned long)MODULES_EXEC_END;
67711+ }
67712+#endif
67713+
67714 if (flags & VM_IOREMAP) {
67715 int bit = fls(size);
67716
67717@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
67718 if (count > totalram_pages)
67719 return NULL;
67720
67721+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67722+ if (!(pgprot_val(prot) & _PAGE_NX))
67723+ flags |= VM_KERNEXEC;
67724+#endif
67725+
67726 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
67727 __builtin_return_address(0));
67728 if (!area)
67729@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
67730 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
67731 return NULL;
67732
67733+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
67734+ if (!(pgprot_val(prot) & _PAGE_NX))
67735+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
67736+ node, gfp_mask, caller);
67737+ else
67738+#endif
67739+
67740 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
67741 VMALLOC_END, node, gfp_mask, caller);
67742
67743@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
67744 return addr;
67745 }
67746
67747+#undef __vmalloc
67748 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
67749 {
67750 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
67751@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
67752 * For tight control over page level allocator and protection flags
67753 * use __vmalloc() instead.
67754 */
67755+#undef vmalloc
67756 void *vmalloc(unsigned long size)
67757 {
67758 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67759@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
67760 * The resulting memory area is zeroed so it can be mapped to userspace
67761 * without leaking data.
67762 */
67763+#undef vmalloc_user
67764 void *vmalloc_user(unsigned long size)
67765 {
67766 struct vm_struct *area;
67767@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
67768 * For tight control over page level allocator and protection flags
67769 * use __vmalloc() instead.
67770 */
67771+#undef vmalloc_node
67772 void *vmalloc_node(unsigned long size, int node)
67773 {
67774 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
67775@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
67776 * For tight control over page level allocator and protection flags
67777 * use __vmalloc() instead.
67778 */
67779-
67780+#undef vmalloc_exec
67781 void *vmalloc_exec(unsigned long size)
67782 {
67783- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
67784+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
67785 -1, __builtin_return_address(0));
67786 }
67787
67788@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
67789 * Allocate enough 32bit PA addressable pages to cover @size from the
67790 * page level allocator and map them into contiguous kernel virtual space.
67791 */
67792+#undef vmalloc_32
67793 void *vmalloc_32(unsigned long size)
67794 {
67795 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
67796@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
67797 * The resulting memory area is 32bit addressable and zeroed so it can be
67798 * mapped to userspace without leaking data.
67799 */
67800+#undef vmalloc_32_user
67801 void *vmalloc_32_user(unsigned long size)
67802 {
67803 struct vm_struct *area;
67804@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
67805 unsigned long uaddr = vma->vm_start;
67806 unsigned long usize = vma->vm_end - vma->vm_start;
67807
67808+ BUG_ON(vma->vm_mirror);
67809+
67810 if ((PAGE_SIZE-1) & (unsigned long)addr)
67811 return -EINVAL;
67812
67813diff -urNp linux-2.6.32.43/mm/vmstat.c linux-2.6.32.43/mm/vmstat.c
67814--- linux-2.6.32.43/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
67815+++ linux-2.6.32.43/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
67816@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
67817 *
67818 * vm_stat contains the global counters
67819 */
67820-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67821+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
67822 EXPORT_SYMBOL(vm_stat);
67823
67824 #ifdef CONFIG_SMP
67825@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
67826 v = p->vm_stat_diff[i];
67827 p->vm_stat_diff[i] = 0;
67828 local_irq_restore(flags);
67829- atomic_long_add(v, &zone->vm_stat[i]);
67830+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
67831 global_diff[i] += v;
67832 #ifdef CONFIG_NUMA
67833 /* 3 seconds idle till flush */
67834@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
67835
67836 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
67837 if (global_diff[i])
67838- atomic_long_add(global_diff[i], &vm_stat[i]);
67839+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
67840 }
67841
67842 #endif
67843@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
67844 start_cpu_timer(cpu);
67845 #endif
67846 #ifdef CONFIG_PROC_FS
67847- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
67848- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
67849- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
67850- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
67851+ {
67852+ mode_t gr_mode = S_IRUGO;
67853+#ifdef CONFIG_GRKERNSEC_PROC_ADD
67854+ gr_mode = S_IRUSR;
67855+#endif
67856+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
67857+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
67858+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
67859+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
67860+#else
67861+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
67862+#endif
67863+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
67864+ }
67865 #endif
67866 return 0;
67867 }
67868diff -urNp linux-2.6.32.43/net/8021q/vlan.c linux-2.6.32.43/net/8021q/vlan.c
67869--- linux-2.6.32.43/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
67870+++ linux-2.6.32.43/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
67871@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
67872 err = -EPERM;
67873 if (!capable(CAP_NET_ADMIN))
67874 break;
67875- if ((args.u.name_type >= 0) &&
67876- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
67877+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
67878 struct vlan_net *vn;
67879
67880 vn = net_generic(net, vlan_net_id);
67881diff -urNp linux-2.6.32.43/net/atm/atm_misc.c linux-2.6.32.43/net/atm/atm_misc.c
67882--- linux-2.6.32.43/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
67883+++ linux-2.6.32.43/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
67884@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
67885 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
67886 return 1;
67887 atm_return(vcc,truesize);
67888- atomic_inc(&vcc->stats->rx_drop);
67889+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67890 return 0;
67891 }
67892
67893@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
67894 }
67895 }
67896 atm_return(vcc,guess);
67897- atomic_inc(&vcc->stats->rx_drop);
67898+ atomic_inc_unchecked(&vcc->stats->rx_drop);
67899 return NULL;
67900 }
67901
67902@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
67903
67904 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67905 {
67906-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67907+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67908 __SONET_ITEMS
67909 #undef __HANDLE_ITEM
67910 }
67911@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
67912
67913 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
67914 {
67915-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
67916+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
67917 __SONET_ITEMS
67918 #undef __HANDLE_ITEM
67919 }
67920diff -urNp linux-2.6.32.43/net/atm/mpoa_caches.c linux-2.6.32.43/net/atm/mpoa_caches.c
67921--- linux-2.6.32.43/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
67922+++ linux-2.6.32.43/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
67923@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
67924 struct timeval now;
67925 struct k_message msg;
67926
67927+ pax_track_stack();
67928+
67929 do_gettimeofday(&now);
67930
67931 write_lock_irq(&client->egress_lock);
67932diff -urNp linux-2.6.32.43/net/atm/proc.c linux-2.6.32.43/net/atm/proc.c
67933--- linux-2.6.32.43/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
67934+++ linux-2.6.32.43/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
67935@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
67936 const struct k_atm_aal_stats *stats)
67937 {
67938 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
67939- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
67940- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
67941- atomic_read(&stats->rx_drop));
67942+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
67943+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
67944+ atomic_read_unchecked(&stats->rx_drop));
67945 }
67946
67947 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
67948@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
67949 {
67950 struct sock *sk = sk_atm(vcc);
67951
67952+#ifdef CONFIG_GRKERNSEC_HIDESYM
67953+ seq_printf(seq, "%p ", NULL);
67954+#else
67955 seq_printf(seq, "%p ", vcc);
67956+#endif
67957+
67958 if (!vcc->dev)
67959 seq_printf(seq, "Unassigned ");
67960 else
67961@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
67962 {
67963 if (!vcc->dev)
67964 seq_printf(seq, sizeof(void *) == 4 ?
67965+#ifdef CONFIG_GRKERNSEC_HIDESYM
67966+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
67967+#else
67968 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
67969+#endif
67970 else
67971 seq_printf(seq, "%3d %3d %5d ",
67972 vcc->dev->number, vcc->vpi, vcc->vci);
67973diff -urNp linux-2.6.32.43/net/atm/resources.c linux-2.6.32.43/net/atm/resources.c
67974--- linux-2.6.32.43/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
67975+++ linux-2.6.32.43/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
67976@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
67977 static void copy_aal_stats(struct k_atm_aal_stats *from,
67978 struct atm_aal_stats *to)
67979 {
67980-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
67981+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
67982 __AAL_STAT_ITEMS
67983 #undef __HANDLE_ITEM
67984 }
67985@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
67986 static void subtract_aal_stats(struct k_atm_aal_stats *from,
67987 struct atm_aal_stats *to)
67988 {
67989-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
67990+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
67991 __AAL_STAT_ITEMS
67992 #undef __HANDLE_ITEM
67993 }
67994diff -urNp linux-2.6.32.43/net/bluetooth/l2cap.c linux-2.6.32.43/net/bluetooth/l2cap.c
67995--- linux-2.6.32.43/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
67996+++ linux-2.6.32.43/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
67997@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
67998 err = -ENOTCONN;
67999 break;
68000 }
68001-
68002+ memset(&cinfo, 0, sizeof(cinfo));
68003 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
68004 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
68005
68006@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
68007
68008 /* Reject if config buffer is too small. */
68009 len = cmd_len - sizeof(*req);
68010- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
68011+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
68012 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
68013 l2cap_build_conf_rsp(sk, rsp,
68014 L2CAP_CONF_REJECT, flags), rsp);
68015diff -urNp linux-2.6.32.43/net/bluetooth/rfcomm/sock.c linux-2.6.32.43/net/bluetooth/rfcomm/sock.c
68016--- linux-2.6.32.43/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
68017+++ linux-2.6.32.43/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
68018@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
68019
68020 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
68021
68022+ memset(&cinfo, 0, sizeof(cinfo));
68023 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
68024 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
68025
68026diff -urNp linux-2.6.32.43/net/bridge/br_private.h linux-2.6.32.43/net/bridge/br_private.h
68027--- linux-2.6.32.43/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
68028+++ linux-2.6.32.43/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
68029@@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
68030
68031 #ifdef CONFIG_SYSFS
68032 /* br_sysfs_if.c */
68033-extern struct sysfs_ops brport_sysfs_ops;
68034+extern const struct sysfs_ops brport_sysfs_ops;
68035 extern int br_sysfs_addif(struct net_bridge_port *p);
68036
68037 /* br_sysfs_br.c */
68038diff -urNp linux-2.6.32.43/net/bridge/br_stp_if.c linux-2.6.32.43/net/bridge/br_stp_if.c
68039--- linux-2.6.32.43/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
68040+++ linux-2.6.32.43/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
68041@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
68042 char *envp[] = { NULL };
68043
68044 if (br->stp_enabled == BR_USER_STP) {
68045- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
68046+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
68047 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
68048 br->dev->name, r);
68049
68050diff -urNp linux-2.6.32.43/net/bridge/br_sysfs_if.c linux-2.6.32.43/net/bridge/br_sysfs_if.c
68051--- linux-2.6.32.43/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
68052+++ linux-2.6.32.43/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
68053@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
68054 return ret;
68055 }
68056
68057-struct sysfs_ops brport_sysfs_ops = {
68058+const struct sysfs_ops brport_sysfs_ops = {
68059 .show = brport_show,
68060 .store = brport_store,
68061 };
68062diff -urNp linux-2.6.32.43/net/bridge/netfilter/ebtables.c linux-2.6.32.43/net/bridge/netfilter/ebtables.c
68063--- linux-2.6.32.43/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
68064+++ linux-2.6.32.43/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
68065@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
68066 unsigned int entries_size, nentries;
68067 char *entries;
68068
68069+ pax_track_stack();
68070+
68071 if (cmd == EBT_SO_GET_ENTRIES) {
68072 entries_size = t->private->entries_size;
68073 nentries = t->private->nentries;
68074diff -urNp linux-2.6.32.43/net/can/bcm.c linux-2.6.32.43/net/can/bcm.c
68075--- linux-2.6.32.43/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
68076+++ linux-2.6.32.43/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
68077@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
68078 struct bcm_sock *bo = bcm_sk(sk);
68079 struct bcm_op *op;
68080
68081+#ifdef CONFIG_GRKERNSEC_HIDESYM
68082+ seq_printf(m, ">>> socket %p", NULL);
68083+ seq_printf(m, " / sk %p", NULL);
68084+ seq_printf(m, " / bo %p", NULL);
68085+#else
68086 seq_printf(m, ">>> socket %p", sk->sk_socket);
68087 seq_printf(m, " / sk %p", sk);
68088 seq_printf(m, " / bo %p", bo);
68089+#endif
68090 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
68091 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
68092 seq_printf(m, " <<<\n");
68093diff -urNp linux-2.6.32.43/net/core/dev.c linux-2.6.32.43/net/core/dev.c
68094--- linux-2.6.32.43/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
68095+++ linux-2.6.32.43/net/core/dev.c 2011-04-17 17:04:18.000000000 -0400
68096@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
68097 if (no_module && capable(CAP_NET_ADMIN))
68098 no_module = request_module("netdev-%s", name);
68099 if (no_module && capable(CAP_SYS_MODULE)) {
68100+#ifdef CONFIG_GRKERNSEC_MODHARDEN
68101+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
68102+#else
68103 if (!request_module("%s", name))
68104 pr_err("Loading kernel module for a network device "
68105 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
68106 "instead\n", name);
68107+#endif
68108 }
68109 }
68110 EXPORT_SYMBOL(dev_load);
68111@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
68112 }
68113 EXPORT_SYMBOL(netif_rx_ni);
68114
68115-static void net_tx_action(struct softirq_action *h)
68116+static void net_tx_action(void)
68117 {
68118 struct softnet_data *sd = &__get_cpu_var(softnet_data);
68119
68120@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
68121 EXPORT_SYMBOL(netif_napi_del);
68122
68123
68124-static void net_rx_action(struct softirq_action *h)
68125+static void net_rx_action(void)
68126 {
68127 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
68128 unsigned long time_limit = jiffies + 2;
68129diff -urNp linux-2.6.32.43/net/core/flow.c linux-2.6.32.43/net/core/flow.c
68130--- linux-2.6.32.43/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
68131+++ linux-2.6.32.43/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
68132@@ -35,11 +35,11 @@ struct flow_cache_entry {
68133 atomic_t *object_ref;
68134 };
68135
68136-atomic_t flow_cache_genid = ATOMIC_INIT(0);
68137+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
68138
68139 static u32 flow_hash_shift;
68140 #define flow_hash_size (1 << flow_hash_shift)
68141-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
68142+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
68143
68144 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
68145
68146@@ -52,7 +52,7 @@ struct flow_percpu_info {
68147 u32 hash_rnd;
68148 int count;
68149 };
68150-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
68151+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
68152
68153 #define flow_hash_rnd_recalc(cpu) \
68154 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
68155@@ -69,7 +69,7 @@ struct flow_flush_info {
68156 atomic_t cpuleft;
68157 struct completion completion;
68158 };
68159-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
68160+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
68161
68162 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
68163
68164@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
68165 if (fle->family == family &&
68166 fle->dir == dir &&
68167 flow_key_compare(key, &fle->key) == 0) {
68168- if (fle->genid == atomic_read(&flow_cache_genid)) {
68169+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
68170 void *ret = fle->object;
68171
68172 if (ret)
68173@@ -228,7 +228,7 @@ nocache:
68174 err = resolver(net, key, family, dir, &obj, &obj_ref);
68175
68176 if (fle && !err) {
68177- fle->genid = atomic_read(&flow_cache_genid);
68178+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
68179
68180 if (fle->object)
68181 atomic_dec(fle->object_ref);
68182@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
68183
68184 fle = flow_table(cpu)[i];
68185 for (; fle; fle = fle->next) {
68186- unsigned genid = atomic_read(&flow_cache_genid);
68187+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
68188
68189 if (!fle->object || fle->genid == genid)
68190 continue;
68191diff -urNp linux-2.6.32.43/net/core/skbuff.c linux-2.6.32.43/net/core/skbuff.c
68192--- linux-2.6.32.43/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
68193+++ linux-2.6.32.43/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
68194@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
68195 struct sk_buff *frag_iter;
68196 struct sock *sk = skb->sk;
68197
68198+ pax_track_stack();
68199+
68200 /*
68201 * __skb_splice_bits() only fails if the output has no room left,
68202 * so no point in going over the frag_list for the error case.
68203diff -urNp linux-2.6.32.43/net/core/sock.c linux-2.6.32.43/net/core/sock.c
68204--- linux-2.6.32.43/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
68205+++ linux-2.6.32.43/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
68206@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
68207 break;
68208
68209 case SO_PEERCRED:
68210+ {
68211+ struct ucred peercred;
68212 if (len > sizeof(sk->sk_peercred))
68213 len = sizeof(sk->sk_peercred);
68214- if (copy_to_user(optval, &sk->sk_peercred, len))
68215+ peercred = sk->sk_peercred;
68216+ if (copy_to_user(optval, &peercred, len))
68217 return -EFAULT;
68218 goto lenout;
68219+ }
68220
68221 case SO_PEERNAME:
68222 {
68223@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
68224 */
68225 smp_wmb();
68226 atomic_set(&sk->sk_refcnt, 1);
68227- atomic_set(&sk->sk_drops, 0);
68228+ atomic_set_unchecked(&sk->sk_drops, 0);
68229 }
68230 EXPORT_SYMBOL(sock_init_data);
68231
68232diff -urNp linux-2.6.32.43/net/decnet/sysctl_net_decnet.c linux-2.6.32.43/net/decnet/sysctl_net_decnet.c
68233--- linux-2.6.32.43/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
68234+++ linux-2.6.32.43/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
68235@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
68236
68237 if (len > *lenp) len = *lenp;
68238
68239- if (copy_to_user(buffer, addr, len))
68240+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
68241 return -EFAULT;
68242
68243 *lenp = len;
68244@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
68245
68246 if (len > *lenp) len = *lenp;
68247
68248- if (copy_to_user(buffer, devname, len))
68249+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
68250 return -EFAULT;
68251
68252 *lenp = len;
68253diff -urNp linux-2.6.32.43/net/econet/Kconfig linux-2.6.32.43/net/econet/Kconfig
68254--- linux-2.6.32.43/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
68255+++ linux-2.6.32.43/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
68256@@ -4,7 +4,7 @@
68257
68258 config ECONET
68259 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
68260- depends on EXPERIMENTAL && INET
68261+ depends on EXPERIMENTAL && INET && BROKEN
68262 ---help---
68263 Econet is a fairly old and slow networking protocol mainly used by
68264 Acorn computers to access file and print servers. It uses native
68265diff -urNp linux-2.6.32.43/net/ieee802154/dgram.c linux-2.6.32.43/net/ieee802154/dgram.c
68266--- linux-2.6.32.43/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
68267+++ linux-2.6.32.43/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
68268@@ -318,7 +318,7 @@ out:
68269 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
68270 {
68271 if (sock_queue_rcv_skb(sk, skb) < 0) {
68272- atomic_inc(&sk->sk_drops);
68273+ atomic_inc_unchecked(&sk->sk_drops);
68274 kfree_skb(skb);
68275 return NET_RX_DROP;
68276 }
68277diff -urNp linux-2.6.32.43/net/ieee802154/raw.c linux-2.6.32.43/net/ieee802154/raw.c
68278--- linux-2.6.32.43/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
68279+++ linux-2.6.32.43/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
68280@@ -206,7 +206,7 @@ out:
68281 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
68282 {
68283 if (sock_queue_rcv_skb(sk, skb) < 0) {
68284- atomic_inc(&sk->sk_drops);
68285+ atomic_inc_unchecked(&sk->sk_drops);
68286 kfree_skb(skb);
68287 return NET_RX_DROP;
68288 }
68289diff -urNp linux-2.6.32.43/net/ipv4/inet_diag.c linux-2.6.32.43/net/ipv4/inet_diag.c
68290--- linux-2.6.32.43/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
68291+++ linux-2.6.32.43/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
68292@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
68293 r->idiag_retrans = 0;
68294
68295 r->id.idiag_if = sk->sk_bound_dev_if;
68296+#ifdef CONFIG_GRKERNSEC_HIDESYM
68297+ r->id.idiag_cookie[0] = 0;
68298+ r->id.idiag_cookie[1] = 0;
68299+#else
68300 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
68301 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
68302+#endif
68303
68304 r->id.idiag_sport = inet->sport;
68305 r->id.idiag_dport = inet->dport;
68306@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
68307 r->idiag_family = tw->tw_family;
68308 r->idiag_retrans = 0;
68309 r->id.idiag_if = tw->tw_bound_dev_if;
68310+
68311+#ifdef CONFIG_GRKERNSEC_HIDESYM
68312+ r->id.idiag_cookie[0] = 0;
68313+ r->id.idiag_cookie[1] = 0;
68314+#else
68315 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
68316 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
68317+#endif
68318+
68319 r->id.idiag_sport = tw->tw_sport;
68320 r->id.idiag_dport = tw->tw_dport;
68321 r->id.idiag_src[0] = tw->tw_rcv_saddr;
68322@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
68323 if (sk == NULL)
68324 goto unlock;
68325
68326+#ifndef CONFIG_GRKERNSEC_HIDESYM
68327 err = -ESTALE;
68328 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
68329 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
68330 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
68331 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
68332 goto out;
68333+#endif
68334
68335 err = -ENOMEM;
68336 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
68337@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
68338 r->idiag_retrans = req->retrans;
68339
68340 r->id.idiag_if = sk->sk_bound_dev_if;
68341+
68342+#ifdef CONFIG_GRKERNSEC_HIDESYM
68343+ r->id.idiag_cookie[0] = 0;
68344+ r->id.idiag_cookie[1] = 0;
68345+#else
68346 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
68347 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
68348+#endif
68349
68350 tmo = req->expires - jiffies;
68351 if (tmo < 0)
68352diff -urNp linux-2.6.32.43/net/ipv4/inet_hashtables.c linux-2.6.32.43/net/ipv4/inet_hashtables.c
68353--- linux-2.6.32.43/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68354+++ linux-2.6.32.43/net/ipv4/inet_hashtables.c 2011-04-17 15:56:46.000000000 -0400
68355@@ -18,11 +18,14 @@
68356 #include <linux/sched.h>
68357 #include <linux/slab.h>
68358 #include <linux/wait.h>
68359+#include <linux/security.h>
68360
68361 #include <net/inet_connection_sock.h>
68362 #include <net/inet_hashtables.h>
68363 #include <net/ip.h>
68364
68365+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
68366+
68367 /*
68368 * Allocate and initialize a new local port bind bucket.
68369 * The bindhash mutex for snum's hash chain must be held here.
68370@@ -490,6 +493,8 @@ ok:
68371 }
68372 spin_unlock(&head->lock);
68373
68374+ gr_update_task_in_ip_table(current, inet_sk(sk));
68375+
68376 if (tw) {
68377 inet_twsk_deschedule(tw, death_row);
68378 inet_twsk_put(tw);
68379diff -urNp linux-2.6.32.43/net/ipv4/inetpeer.c linux-2.6.32.43/net/ipv4/inetpeer.c
68380--- linux-2.6.32.43/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
68381+++ linux-2.6.32.43/net/ipv4/inetpeer.c 2011-05-16 21:46:57.000000000 -0400
68382@@ -366,6 +366,8 @@ struct inet_peer *inet_getpeer(__be32 da
68383 struct inet_peer *p, *n;
68384 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
68385
68386+ pax_track_stack();
68387+
68388 /* Look up for the address quickly. */
68389 read_lock_bh(&peer_pool_lock);
68390 p = lookup(daddr, NULL);
68391@@ -389,7 +391,7 @@ struct inet_peer *inet_getpeer(__be32 da
68392 return NULL;
68393 n->v4daddr = daddr;
68394 atomic_set(&n->refcnt, 1);
68395- atomic_set(&n->rid, 0);
68396+ atomic_set_unchecked(&n->rid, 0);
68397 n->ip_id_count = secure_ip_id(daddr);
68398 n->tcp_ts_stamp = 0;
68399
68400diff -urNp linux-2.6.32.43/net/ipv4/ip_fragment.c linux-2.6.32.43/net/ipv4/ip_fragment.c
68401--- linux-2.6.32.43/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
68402+++ linux-2.6.32.43/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
68403@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
68404 return 0;
68405
68406 start = qp->rid;
68407- end = atomic_inc_return(&peer->rid);
68408+ end = atomic_inc_return_unchecked(&peer->rid);
68409 qp->rid = end;
68410
68411 rc = qp->q.fragments && (end - start) > max;
68412diff -urNp linux-2.6.32.43/net/ipv4/ip_sockglue.c linux-2.6.32.43/net/ipv4/ip_sockglue.c
68413--- linux-2.6.32.43/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68414+++ linux-2.6.32.43/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68415@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
68416 int val;
68417 int len;
68418
68419+ pax_track_stack();
68420+
68421 if (level != SOL_IP)
68422 return -EOPNOTSUPP;
68423
68424diff -urNp linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c
68425--- linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
68426+++ linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
68427@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
68428 private = &tmp;
68429 }
68430 #endif
68431+ memset(&info, 0, sizeof(info));
68432 info.valid_hooks = t->valid_hooks;
68433 memcpy(info.hook_entry, private->hook_entry,
68434 sizeof(info.hook_entry));
68435diff -urNp linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c
68436--- linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
68437+++ linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
68438@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
68439 private = &tmp;
68440 }
68441 #endif
68442+ memset(&info, 0, sizeof(info));
68443 info.valid_hooks = t->valid_hooks;
68444 memcpy(info.hook_entry, private->hook_entry,
68445 sizeof(info.hook_entry));
68446diff -urNp linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c
68447--- linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
68448+++ linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
68449@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
68450
68451 *len = 0;
68452
68453- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
68454+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
68455 if (*octets == NULL) {
68456 if (net_ratelimit())
68457 printk("OOM in bsalg (%d)\n", __LINE__);
68458diff -urNp linux-2.6.32.43/net/ipv4/raw.c linux-2.6.32.43/net/ipv4/raw.c
68459--- linux-2.6.32.43/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
68460+++ linux-2.6.32.43/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
68461@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
68462 /* Charge it to the socket. */
68463
68464 if (sock_queue_rcv_skb(sk, skb) < 0) {
68465- atomic_inc(&sk->sk_drops);
68466+ atomic_inc_unchecked(&sk->sk_drops);
68467 kfree_skb(skb);
68468 return NET_RX_DROP;
68469 }
68470@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
68471 int raw_rcv(struct sock *sk, struct sk_buff *skb)
68472 {
68473 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
68474- atomic_inc(&sk->sk_drops);
68475+ atomic_inc_unchecked(&sk->sk_drops);
68476 kfree_skb(skb);
68477 return NET_RX_DROP;
68478 }
68479@@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
68480
68481 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
68482 {
68483+ struct icmp_filter filter;
68484+
68485+ if (optlen < 0)
68486+ return -EINVAL;
68487 if (optlen > sizeof(struct icmp_filter))
68488 optlen = sizeof(struct icmp_filter);
68489- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
68490+ if (copy_from_user(&filter, optval, optlen))
68491 return -EFAULT;
68492+ memcpy(&raw_sk(sk)->filter, &filter, optlen);
68493+
68494 return 0;
68495 }
68496
68497 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
68498 {
68499+ struct icmp_filter filter;
68500 int len, ret = -EFAULT;
68501
68502 if (get_user(len, optlen))
68503@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
68504 if (len > sizeof(struct icmp_filter))
68505 len = sizeof(struct icmp_filter);
68506 ret = -EFAULT;
68507+ memcpy(&filter, &raw_sk(sk)->filter, len);
68508 if (put_user(len, optlen) ||
68509- copy_to_user(optval, &raw_sk(sk)->filter, len))
68510+ copy_to_user(optval, &filter, len))
68511 goto out;
68512 ret = 0;
68513 out: return ret;
68514@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
68515 sk_wmem_alloc_get(sp),
68516 sk_rmem_alloc_get(sp),
68517 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68518- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
68519+ atomic_read(&sp->sk_refcnt),
68520+#ifdef CONFIG_GRKERNSEC_HIDESYM
68521+ NULL,
68522+#else
68523+ sp,
68524+#endif
68525+ atomic_read_unchecked(&sp->sk_drops));
68526 }
68527
68528 static int raw_seq_show(struct seq_file *seq, void *v)
68529diff -urNp linux-2.6.32.43/net/ipv4/route.c linux-2.6.32.43/net/ipv4/route.c
68530--- linux-2.6.32.43/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
68531+++ linux-2.6.32.43/net/ipv4/route.c 2011-05-04 17:56:28.000000000 -0400
68532@@ -268,7 +268,7 @@ static inline unsigned int rt_hash(__be3
68533
68534 static inline int rt_genid(struct net *net)
68535 {
68536- return atomic_read(&net->ipv4.rt_genid);
68537+ return atomic_read_unchecked(&net->ipv4.rt_genid);
68538 }
68539
68540 #ifdef CONFIG_PROC_FS
68541@@ -888,7 +888,7 @@ static void rt_cache_invalidate(struct n
68542 unsigned char shuffle;
68543
68544 get_random_bytes(&shuffle, sizeof(shuffle));
68545- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
68546+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
68547 }
68548
68549 /*
68550@@ -3356,7 +3356,7 @@ static __net_initdata struct pernet_oper
68551
68552 static __net_init int rt_secret_timer_init(struct net *net)
68553 {
68554- atomic_set(&net->ipv4.rt_genid,
68555+ atomic_set_unchecked(&net->ipv4.rt_genid,
68556 (int) ((num_physpages ^ (num_physpages>>8)) ^
68557 (jiffies ^ (jiffies >> 7))));
68558
68559diff -urNp linux-2.6.32.43/net/ipv4/tcp.c linux-2.6.32.43/net/ipv4/tcp.c
68560--- linux-2.6.32.43/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
68561+++ linux-2.6.32.43/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
68562@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
68563 int val;
68564 int err = 0;
68565
68566+ pax_track_stack();
68567+
68568 /* This is a string value all the others are int's */
68569 if (optname == TCP_CONGESTION) {
68570 char name[TCP_CA_NAME_MAX];
68571@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
68572 struct tcp_sock *tp = tcp_sk(sk);
68573 int val, len;
68574
68575+ pax_track_stack();
68576+
68577 if (get_user(len, optlen))
68578 return -EFAULT;
68579
68580diff -urNp linux-2.6.32.43/net/ipv4/tcp_ipv4.c linux-2.6.32.43/net/ipv4/tcp_ipv4.c
68581--- linux-2.6.32.43/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
68582+++ linux-2.6.32.43/net/ipv4/tcp_ipv4.c 2011-04-17 15:56:46.000000000 -0400
68583@@ -84,6 +84,9 @@
68584 int sysctl_tcp_tw_reuse __read_mostly;
68585 int sysctl_tcp_low_latency __read_mostly;
68586
68587+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68588+extern int grsec_enable_blackhole;
68589+#endif
68590
68591 #ifdef CONFIG_TCP_MD5SIG
68592 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
68593@@ -1542,6 +1545,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
68594 return 0;
68595
68596 reset:
68597+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68598+ if (!grsec_enable_blackhole)
68599+#endif
68600 tcp_v4_send_reset(rsk, skb);
68601 discard:
68602 kfree_skb(skb);
68603@@ -1603,12 +1609,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
68604 TCP_SKB_CB(skb)->sacked = 0;
68605
68606 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
68607- if (!sk)
68608+ if (!sk) {
68609+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68610+ ret = 1;
68611+#endif
68612 goto no_tcp_socket;
68613+ }
68614
68615 process:
68616- if (sk->sk_state == TCP_TIME_WAIT)
68617+ if (sk->sk_state == TCP_TIME_WAIT) {
68618+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68619+ ret = 2;
68620+#endif
68621 goto do_time_wait;
68622+ }
68623
68624 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
68625 goto discard_and_relse;
68626@@ -1650,6 +1664,10 @@ no_tcp_socket:
68627 bad_packet:
68628 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
68629 } else {
68630+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68631+ if (!grsec_enable_blackhole || (ret == 1 &&
68632+ (skb->dev->flags & IFF_LOOPBACK)))
68633+#endif
68634 tcp_v4_send_reset(NULL, skb);
68635 }
68636
68637@@ -2237,7 +2255,11 @@ static void get_openreq4(struct sock *sk
68638 0, /* non standard timer */
68639 0, /* open_requests have no inode */
68640 atomic_read(&sk->sk_refcnt),
68641+#ifdef CONFIG_GRKERNSEC_HIDESYM
68642+ NULL,
68643+#else
68644 req,
68645+#endif
68646 len);
68647 }
68648
68649@@ -2279,7 +2301,12 @@ static void get_tcp4_sock(struct sock *s
68650 sock_i_uid(sk),
68651 icsk->icsk_probes_out,
68652 sock_i_ino(sk),
68653- atomic_read(&sk->sk_refcnt), sk,
68654+ atomic_read(&sk->sk_refcnt),
68655+#ifdef CONFIG_GRKERNSEC_HIDESYM
68656+ NULL,
68657+#else
68658+ sk,
68659+#endif
68660 jiffies_to_clock_t(icsk->icsk_rto),
68661 jiffies_to_clock_t(icsk->icsk_ack.ato),
68662 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
68663@@ -2307,7 +2334,13 @@ static void get_timewait4_sock(struct in
68664 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
68665 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
68666 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
68667- atomic_read(&tw->tw_refcnt), tw, len);
68668+ atomic_read(&tw->tw_refcnt),
68669+#ifdef CONFIG_GRKERNSEC_HIDESYM
68670+ NULL,
68671+#else
68672+ tw,
68673+#endif
68674+ len);
68675 }
68676
68677 #define TMPSZ 150
68678diff -urNp linux-2.6.32.43/net/ipv4/tcp_minisocks.c linux-2.6.32.43/net/ipv4/tcp_minisocks.c
68679--- linux-2.6.32.43/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
68680+++ linux-2.6.32.43/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
68681@@ -26,6 +26,10 @@
68682 #include <net/inet_common.h>
68683 #include <net/xfrm.h>
68684
68685+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68686+extern int grsec_enable_blackhole;
68687+#endif
68688+
68689 #ifdef CONFIG_SYSCTL
68690 #define SYNC_INIT 0 /* let the user enable it */
68691 #else
68692@@ -672,6 +676,10 @@ listen_overflow:
68693
68694 embryonic_reset:
68695 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
68696+
68697+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68698+ if (!grsec_enable_blackhole)
68699+#endif
68700 if (!(flg & TCP_FLAG_RST))
68701 req->rsk_ops->send_reset(sk, skb);
68702
68703diff -urNp linux-2.6.32.43/net/ipv4/tcp_output.c linux-2.6.32.43/net/ipv4/tcp_output.c
68704--- linux-2.6.32.43/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
68705+++ linux-2.6.32.43/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
68706@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
68707 __u8 *md5_hash_location;
68708 int mss;
68709
68710+ pax_track_stack();
68711+
68712 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
68713 if (skb == NULL)
68714 return NULL;
68715diff -urNp linux-2.6.32.43/net/ipv4/tcp_probe.c linux-2.6.32.43/net/ipv4/tcp_probe.c
68716--- linux-2.6.32.43/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
68717+++ linux-2.6.32.43/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
68718@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
68719 if (cnt + width >= len)
68720 break;
68721
68722- if (copy_to_user(buf + cnt, tbuf, width))
68723+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
68724 return -EFAULT;
68725 cnt += width;
68726 }
68727diff -urNp linux-2.6.32.43/net/ipv4/tcp_timer.c linux-2.6.32.43/net/ipv4/tcp_timer.c
68728--- linux-2.6.32.43/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
68729+++ linux-2.6.32.43/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
68730@@ -21,6 +21,10 @@
68731 #include <linux/module.h>
68732 #include <net/tcp.h>
68733
68734+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68735+extern int grsec_lastack_retries;
68736+#endif
68737+
68738 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
68739 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
68740 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
68741@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
68742 }
68743 }
68744
68745+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68746+ if ((sk->sk_state == TCP_LAST_ACK) &&
68747+ (grsec_lastack_retries > 0) &&
68748+ (grsec_lastack_retries < retry_until))
68749+ retry_until = grsec_lastack_retries;
68750+#endif
68751+
68752 if (retransmits_timed_out(sk, retry_until)) {
68753 /* Has it gone just too far? */
68754 tcp_write_err(sk);
68755diff -urNp linux-2.6.32.43/net/ipv4/udp.c linux-2.6.32.43/net/ipv4/udp.c
68756--- linux-2.6.32.43/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
68757+++ linux-2.6.32.43/net/ipv4/udp.c 2011-07-13 17:23:27.000000000 -0400
68758@@ -86,6 +86,7 @@
68759 #include <linux/types.h>
68760 #include <linux/fcntl.h>
68761 #include <linux/module.h>
68762+#include <linux/security.h>
68763 #include <linux/socket.h>
68764 #include <linux/sockios.h>
68765 #include <linux/igmp.h>
68766@@ -106,6 +107,10 @@
68767 #include <net/xfrm.h>
68768 #include "udp_impl.h"
68769
68770+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68771+extern int grsec_enable_blackhole;
68772+#endif
68773+
68774 struct udp_table udp_table;
68775 EXPORT_SYMBOL(udp_table);
68776
68777@@ -371,6 +376,9 @@ found:
68778 return s;
68779 }
68780
68781+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
68782+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
68783+
68784 /*
68785 * This routine is called by the ICMP module when it gets some
68786 * sort of error condition. If err < 0 then the socket should
68787@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
68788 dport = usin->sin_port;
68789 if (dport == 0)
68790 return -EINVAL;
68791+
68792+ err = gr_search_udp_sendmsg(sk, usin);
68793+ if (err)
68794+ return err;
68795 } else {
68796 if (sk->sk_state != TCP_ESTABLISHED)
68797 return -EDESTADDRREQ;
68798+
68799+ err = gr_search_udp_sendmsg(sk, NULL);
68800+ if (err)
68801+ return err;
68802+
68803 daddr = inet->daddr;
68804 dport = inet->dport;
68805 /* Open fast path for connected socket.
68806@@ -945,6 +962,10 @@ try_again:
68807 if (!skb)
68808 goto out;
68809
68810+ err = gr_search_udp_recvmsg(sk, skb);
68811+ if (err)
68812+ goto out_free;
68813+
68814 ulen = skb->len - sizeof(struct udphdr);
68815 copied = len;
68816 if (copied > ulen)
68817@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
68818 if (rc == -ENOMEM) {
68819 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
68820 is_udplite);
68821- atomic_inc(&sk->sk_drops);
68822+ atomic_inc_unchecked(&sk->sk_drops);
68823 }
68824 goto drop;
68825 }
68826@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
68827 goto csum_error;
68828
68829 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
68830+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
68831+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
68832+#endif
68833 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
68834
68835 /*
68836@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
68837 sk_wmem_alloc_get(sp),
68838 sk_rmem_alloc_get(sp),
68839 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
68840- atomic_read(&sp->sk_refcnt), sp,
68841- atomic_read(&sp->sk_drops), len);
68842+ atomic_read(&sp->sk_refcnt),
68843+#ifdef CONFIG_GRKERNSEC_HIDESYM
68844+ NULL,
68845+#else
68846+ sp,
68847+#endif
68848+ atomic_read_unchecked(&sp->sk_drops), len);
68849 }
68850
68851 int udp4_seq_show(struct seq_file *seq, void *v)
68852diff -urNp linux-2.6.32.43/net/ipv6/inet6_connection_sock.c linux-2.6.32.43/net/ipv6/inet6_connection_sock.c
68853--- linux-2.6.32.43/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
68854+++ linux-2.6.32.43/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
68855@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
68856 #ifdef CONFIG_XFRM
68857 {
68858 struct rt6_info *rt = (struct rt6_info *)dst;
68859- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
68860+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
68861 }
68862 #endif
68863 }
68864@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
68865 #ifdef CONFIG_XFRM
68866 if (dst) {
68867 struct rt6_info *rt = (struct rt6_info *)dst;
68868- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
68869+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
68870 sk->sk_dst_cache = NULL;
68871 dst_release(dst);
68872 dst = NULL;
68873diff -urNp linux-2.6.32.43/net/ipv6/inet6_hashtables.c linux-2.6.32.43/net/ipv6/inet6_hashtables.c
68874--- linux-2.6.32.43/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
68875+++ linux-2.6.32.43/net/ipv6/inet6_hashtables.c 2011-05-04 17:56:28.000000000 -0400
68876@@ -118,7 +118,7 @@ out:
68877 }
68878 EXPORT_SYMBOL(__inet6_lookup_established);
68879
68880-static int inline compute_score(struct sock *sk, struct net *net,
68881+static inline int compute_score(struct sock *sk, struct net *net,
68882 const unsigned short hnum,
68883 const struct in6_addr *daddr,
68884 const int dif)
68885diff -urNp linux-2.6.32.43/net/ipv6/ipv6_sockglue.c linux-2.6.32.43/net/ipv6/ipv6_sockglue.c
68886--- linux-2.6.32.43/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
68887+++ linux-2.6.32.43/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
68888@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
68889 int val, valbool;
68890 int retv = -ENOPROTOOPT;
68891
68892+ pax_track_stack();
68893+
68894 if (optval == NULL)
68895 val=0;
68896 else {
68897@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
68898 int len;
68899 int val;
68900
68901+ pax_track_stack();
68902+
68903 if (ip6_mroute_opt(optname))
68904 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
68905
68906diff -urNp linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c
68907--- linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
68908+++ linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
68909@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
68910 private = &tmp;
68911 }
68912 #endif
68913+ memset(&info, 0, sizeof(info));
68914 info.valid_hooks = t->valid_hooks;
68915 memcpy(info.hook_entry, private->hook_entry,
68916 sizeof(info.hook_entry));
68917diff -urNp linux-2.6.32.43/net/ipv6/raw.c linux-2.6.32.43/net/ipv6/raw.c
68918--- linux-2.6.32.43/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
68919+++ linux-2.6.32.43/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
68920@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
68921 {
68922 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
68923 skb_checksum_complete(skb)) {
68924- atomic_inc(&sk->sk_drops);
68925+ atomic_inc_unchecked(&sk->sk_drops);
68926 kfree_skb(skb);
68927 return NET_RX_DROP;
68928 }
68929
68930 /* Charge it to the socket. */
68931 if (sock_queue_rcv_skb(sk,skb)<0) {
68932- atomic_inc(&sk->sk_drops);
68933+ atomic_inc_unchecked(&sk->sk_drops);
68934 kfree_skb(skb);
68935 return NET_RX_DROP;
68936 }
68937@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68938 struct raw6_sock *rp = raw6_sk(sk);
68939
68940 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
68941- atomic_inc(&sk->sk_drops);
68942+ atomic_inc_unchecked(&sk->sk_drops);
68943 kfree_skb(skb);
68944 return NET_RX_DROP;
68945 }
68946@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
68947
68948 if (inet->hdrincl) {
68949 if (skb_checksum_complete(skb)) {
68950- atomic_inc(&sk->sk_drops);
68951+ atomic_inc_unchecked(&sk->sk_drops);
68952 kfree_skb(skb);
68953 return NET_RX_DROP;
68954 }
68955@@ -518,7 +518,7 @@ csum_copy_err:
68956 as some normal condition.
68957 */
68958 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
68959- atomic_inc(&sk->sk_drops);
68960+ atomic_inc_unchecked(&sk->sk_drops);
68961 goto out;
68962 }
68963
68964@@ -600,7 +600,7 @@ out:
68965 return err;
68966 }
68967
68968-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
68969+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
68970 struct flowi *fl, struct rt6_info *rt,
68971 unsigned int flags)
68972 {
68973@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
68974 u16 proto;
68975 int err;
68976
68977+ pax_track_stack();
68978+
68979 /* Rough check on arithmetic overflow,
68980 better check is made in ip6_append_data().
68981 */
68982@@ -916,12 +918,17 @@ do_confirm:
68983 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
68984 char __user *optval, int optlen)
68985 {
68986+ struct icmp6_filter filter;
68987+
68988 switch (optname) {
68989 case ICMPV6_FILTER:
68990+ if (optlen < 0)
68991+ return -EINVAL;
68992 if (optlen > sizeof(struct icmp6_filter))
68993 optlen = sizeof(struct icmp6_filter);
68994- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
68995+ if (copy_from_user(&filter, optval, optlen))
68996 return -EFAULT;
68997+ memcpy(&raw6_sk(sk)->filter, &filter, optlen);
68998 return 0;
68999 default:
69000 return -ENOPROTOOPT;
69001@@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
69002 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
69003 char __user *optval, int __user *optlen)
69004 {
69005+ struct icmp6_filter filter;
69006 int len;
69007
69008 switch (optname) {
69009@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
69010 len = sizeof(struct icmp6_filter);
69011 if (put_user(len, optlen))
69012 return -EFAULT;
69013- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
69014+ memcpy(&filter, &raw6_sk(sk)->filter, len);
69015+ if (copy_to_user(optval, &filter, len))
69016 return -EFAULT;
69017 return 0;
69018 default:
69019@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
69020 0, 0L, 0,
69021 sock_i_uid(sp), 0,
69022 sock_i_ino(sp),
69023- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
69024+ atomic_read(&sp->sk_refcnt),
69025+#ifdef CONFIG_GRKERNSEC_HIDESYM
69026+ NULL,
69027+#else
69028+ sp,
69029+#endif
69030+ atomic_read_unchecked(&sp->sk_drops));
69031 }
69032
69033 static int raw6_seq_show(struct seq_file *seq, void *v)
69034diff -urNp linux-2.6.32.43/net/ipv6/tcp_ipv6.c linux-2.6.32.43/net/ipv6/tcp_ipv6.c
69035--- linux-2.6.32.43/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
69036+++ linux-2.6.32.43/net/ipv6/tcp_ipv6.c 2011-04-17 15:56:46.000000000 -0400
69037@@ -88,6 +88,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
69038 }
69039 #endif
69040
69041+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69042+extern int grsec_enable_blackhole;
69043+#endif
69044+
69045 static void tcp_v6_hash(struct sock *sk)
69046 {
69047 if (sk->sk_state != TCP_CLOSE) {
69048@@ -1578,6 +1582,9 @@ static int tcp_v6_do_rcv(struct sock *sk
69049 return 0;
69050
69051 reset:
69052+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69053+ if (!grsec_enable_blackhole)
69054+#endif
69055 tcp_v6_send_reset(sk, skb);
69056 discard:
69057 if (opt_skb)
69058@@ -1655,12 +1662,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
69059 TCP_SKB_CB(skb)->sacked = 0;
69060
69061 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
69062- if (!sk)
69063+ if (!sk) {
69064+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69065+ ret = 1;
69066+#endif
69067 goto no_tcp_socket;
69068+ }
69069
69070 process:
69071- if (sk->sk_state == TCP_TIME_WAIT)
69072+ if (sk->sk_state == TCP_TIME_WAIT) {
69073+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69074+ ret = 2;
69075+#endif
69076 goto do_time_wait;
69077+ }
69078
69079 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
69080 goto discard_and_relse;
69081@@ -1700,6 +1715,10 @@ no_tcp_socket:
69082 bad_packet:
69083 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
69084 } else {
69085+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69086+ if (!grsec_enable_blackhole || (ret == 1 &&
69087+ (skb->dev->flags & IFF_LOOPBACK)))
69088+#endif
69089 tcp_v6_send_reset(NULL, skb);
69090 }
69091
69092@@ -1915,7 +1934,13 @@ static void get_openreq6(struct seq_file
69093 uid,
69094 0, /* non standard timer */
69095 0, /* open_requests have no inode */
69096- 0, req);
69097+ 0,
69098+#ifdef CONFIG_GRKERNSEC_HIDESYM
69099+ NULL
69100+#else
69101+ req
69102+#endif
69103+ );
69104 }
69105
69106 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
69107@@ -1965,7 +1990,12 @@ static void get_tcp6_sock(struct seq_fil
69108 sock_i_uid(sp),
69109 icsk->icsk_probes_out,
69110 sock_i_ino(sp),
69111- atomic_read(&sp->sk_refcnt), sp,
69112+ atomic_read(&sp->sk_refcnt),
69113+#ifdef CONFIG_GRKERNSEC_HIDESYM
69114+ NULL,
69115+#else
69116+ sp,
69117+#endif
69118 jiffies_to_clock_t(icsk->icsk_rto),
69119 jiffies_to_clock_t(icsk->icsk_ack.ato),
69120 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
69121@@ -2000,7 +2030,13 @@ static void get_timewait6_sock(struct se
69122 dest->s6_addr32[2], dest->s6_addr32[3], destp,
69123 tw->tw_substate, 0, 0,
69124 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
69125- atomic_read(&tw->tw_refcnt), tw);
69126+ atomic_read(&tw->tw_refcnt),
69127+#ifdef CONFIG_GRKERNSEC_HIDESYM
69128+ NULL
69129+#else
69130+ tw
69131+#endif
69132+ );
69133 }
69134
69135 static int tcp6_seq_show(struct seq_file *seq, void *v)
69136diff -urNp linux-2.6.32.43/net/ipv6/udp.c linux-2.6.32.43/net/ipv6/udp.c
69137--- linux-2.6.32.43/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
69138+++ linux-2.6.32.43/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
69139@@ -49,6 +49,10 @@
69140 #include <linux/seq_file.h>
69141 #include "udp_impl.h"
69142
69143+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69144+extern int grsec_enable_blackhole;
69145+#endif
69146+
69147 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
69148 {
69149 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
69150@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
69151 if (rc == -ENOMEM) {
69152 UDP6_INC_STATS_BH(sock_net(sk),
69153 UDP_MIB_RCVBUFERRORS, is_udplite);
69154- atomic_inc(&sk->sk_drops);
69155+ atomic_inc_unchecked(&sk->sk_drops);
69156 }
69157 goto drop;
69158 }
69159@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
69160 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
69161 proto == IPPROTO_UDPLITE);
69162
69163+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
69164+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
69165+#endif
69166 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
69167
69168 kfree_skb(skb);
69169@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
69170 0, 0L, 0,
69171 sock_i_uid(sp), 0,
69172 sock_i_ino(sp),
69173- atomic_read(&sp->sk_refcnt), sp,
69174- atomic_read(&sp->sk_drops));
69175+ atomic_read(&sp->sk_refcnt),
69176+#ifdef CONFIG_GRKERNSEC_HIDESYM
69177+ NULL,
69178+#else
69179+ sp,
69180+#endif
69181+ atomic_read_unchecked(&sp->sk_drops));
69182 }
69183
69184 int udp6_seq_show(struct seq_file *seq, void *v)
69185diff -urNp linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c
69186--- linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
69187+++ linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
69188@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
69189 add_wait_queue(&self->open_wait, &wait);
69190
69191 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
69192- __FILE__,__LINE__, tty->driver->name, self->open_count );
69193+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69194
69195 /* As far as I can see, we protect open_count - Jean II */
69196 spin_lock_irqsave(&self->spinlock, flags);
69197 if (!tty_hung_up_p(filp)) {
69198 extra_count = 1;
69199- self->open_count--;
69200+ local_dec(&self->open_count);
69201 }
69202 spin_unlock_irqrestore(&self->spinlock, flags);
69203- self->blocked_open++;
69204+ local_inc(&self->blocked_open);
69205
69206 while (1) {
69207 if (tty->termios->c_cflag & CBAUD) {
69208@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
69209 }
69210
69211 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
69212- __FILE__,__LINE__, tty->driver->name, self->open_count );
69213+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
69214
69215 schedule();
69216 }
69217@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
69218 if (extra_count) {
69219 /* ++ is not atomic, so this should be protected - Jean II */
69220 spin_lock_irqsave(&self->spinlock, flags);
69221- self->open_count++;
69222+ local_inc(&self->open_count);
69223 spin_unlock_irqrestore(&self->spinlock, flags);
69224 }
69225- self->blocked_open--;
69226+ local_dec(&self->blocked_open);
69227
69228 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
69229- __FILE__,__LINE__, tty->driver->name, self->open_count);
69230+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
69231
69232 if (!retval)
69233 self->flags |= ASYNC_NORMAL_ACTIVE;
69234@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
69235 }
69236 /* ++ is not atomic, so this should be protected - Jean II */
69237 spin_lock_irqsave(&self->spinlock, flags);
69238- self->open_count++;
69239+ local_inc(&self->open_count);
69240
69241 tty->driver_data = self;
69242 self->tty = tty;
69243 spin_unlock_irqrestore(&self->spinlock, flags);
69244
69245 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
69246- self->line, self->open_count);
69247+ self->line, local_read(&self->open_count));
69248
69249 /* Not really used by us, but lets do it anyway */
69250 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
69251@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
69252 return;
69253 }
69254
69255- if ((tty->count == 1) && (self->open_count != 1)) {
69256+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
69257 /*
69258 * Uh, oh. tty->count is 1, which means that the tty
69259 * structure will be freed. state->count should always
69260@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
69261 */
69262 IRDA_DEBUG(0, "%s(), bad serial port count; "
69263 "tty->count is 1, state->count is %d\n", __func__ ,
69264- self->open_count);
69265- self->open_count = 1;
69266+ local_read(&self->open_count));
69267+ local_set(&self->open_count, 1);
69268 }
69269
69270- if (--self->open_count < 0) {
69271+ if (local_dec_return(&self->open_count) < 0) {
69272 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
69273- __func__, self->line, self->open_count);
69274- self->open_count = 0;
69275+ __func__, self->line, local_read(&self->open_count));
69276+ local_set(&self->open_count, 0);
69277 }
69278- if (self->open_count) {
69279+ if (local_read(&self->open_count)) {
69280 spin_unlock_irqrestore(&self->spinlock, flags);
69281
69282 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
69283@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
69284 tty->closing = 0;
69285 self->tty = NULL;
69286
69287- if (self->blocked_open) {
69288+ if (local_read(&self->blocked_open)) {
69289 if (self->close_delay)
69290 schedule_timeout_interruptible(self->close_delay);
69291 wake_up_interruptible(&self->open_wait);
69292@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
69293 spin_lock_irqsave(&self->spinlock, flags);
69294 self->flags &= ~ASYNC_NORMAL_ACTIVE;
69295 self->tty = NULL;
69296- self->open_count = 0;
69297+ local_set(&self->open_count, 0);
69298 spin_unlock_irqrestore(&self->spinlock, flags);
69299
69300 wake_up_interruptible(&self->open_wait);
69301@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
69302 seq_putc(m, '\n');
69303
69304 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
69305- seq_printf(m, "Open count: %d\n", self->open_count);
69306+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
69307 seq_printf(m, "Max data size: %d\n", self->max_data_size);
69308 seq_printf(m, "Max header size: %d\n", self->max_header_size);
69309
69310diff -urNp linux-2.6.32.43/net/iucv/af_iucv.c linux-2.6.32.43/net/iucv/af_iucv.c
69311--- linux-2.6.32.43/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
69312+++ linux-2.6.32.43/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
69313@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
69314
69315 write_lock_bh(&iucv_sk_list.lock);
69316
69317- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
69318+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69319 while (__iucv_get_sock_by_name(name)) {
69320 sprintf(name, "%08x",
69321- atomic_inc_return(&iucv_sk_list.autobind_name));
69322+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
69323 }
69324
69325 write_unlock_bh(&iucv_sk_list.lock);
69326diff -urNp linux-2.6.32.43/net/key/af_key.c linux-2.6.32.43/net/key/af_key.c
69327--- linux-2.6.32.43/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
69328+++ linux-2.6.32.43/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
69329@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
69330 struct xfrm_migrate m[XFRM_MAX_DEPTH];
69331 struct xfrm_kmaddress k;
69332
69333+ pax_track_stack();
69334+
69335 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
69336 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
69337 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
69338@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
69339 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
69340 else
69341 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
69342+#ifdef CONFIG_GRKERNSEC_HIDESYM
69343+ NULL,
69344+#else
69345 s,
69346+#endif
69347 atomic_read(&s->sk_refcnt),
69348 sk_rmem_alloc_get(s),
69349 sk_wmem_alloc_get(s),
69350diff -urNp linux-2.6.32.43/net/mac80211/cfg.c linux-2.6.32.43/net/mac80211/cfg.c
69351--- linux-2.6.32.43/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
69352+++ linux-2.6.32.43/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
69353@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
69354 return err;
69355 }
69356
69357-struct cfg80211_ops mac80211_config_ops = {
69358+const struct cfg80211_ops mac80211_config_ops = {
69359 .add_virtual_intf = ieee80211_add_iface,
69360 .del_virtual_intf = ieee80211_del_iface,
69361 .change_virtual_intf = ieee80211_change_iface,
69362diff -urNp linux-2.6.32.43/net/mac80211/cfg.h linux-2.6.32.43/net/mac80211/cfg.h
69363--- linux-2.6.32.43/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
69364+++ linux-2.6.32.43/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
69365@@ -4,6 +4,6 @@
69366 #ifndef __CFG_H
69367 #define __CFG_H
69368
69369-extern struct cfg80211_ops mac80211_config_ops;
69370+extern const struct cfg80211_ops mac80211_config_ops;
69371
69372 #endif /* __CFG_H */
69373diff -urNp linux-2.6.32.43/net/mac80211/debugfs_key.c linux-2.6.32.43/net/mac80211/debugfs_key.c
69374--- linux-2.6.32.43/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
69375+++ linux-2.6.32.43/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
69376@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
69377 size_t count, loff_t *ppos)
69378 {
69379 struct ieee80211_key *key = file->private_data;
69380- int i, res, bufsize = 2 * key->conf.keylen + 2;
69381+ int i, bufsize = 2 * key->conf.keylen + 2;
69382 char *buf = kmalloc(bufsize, GFP_KERNEL);
69383 char *p = buf;
69384+ ssize_t res;
69385+
69386+ if (buf == NULL)
69387+ return -ENOMEM;
69388
69389 for (i = 0; i < key->conf.keylen; i++)
69390 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
69391diff -urNp linux-2.6.32.43/net/mac80211/debugfs_sta.c linux-2.6.32.43/net/mac80211/debugfs_sta.c
69392--- linux-2.6.32.43/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
69393+++ linux-2.6.32.43/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
69394@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
69395 int i;
69396 struct sta_info *sta = file->private_data;
69397
69398+ pax_track_stack();
69399+
69400 spin_lock_bh(&sta->lock);
69401 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
69402 sta->ampdu_mlme.dialog_token_allocator + 1);
69403diff -urNp linux-2.6.32.43/net/mac80211/ieee80211_i.h linux-2.6.32.43/net/mac80211/ieee80211_i.h
69404--- linux-2.6.32.43/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
69405+++ linux-2.6.32.43/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
69406@@ -25,6 +25,7 @@
69407 #include <linux/etherdevice.h>
69408 #include <net/cfg80211.h>
69409 #include <net/mac80211.h>
69410+#include <asm/local.h>
69411 #include "key.h"
69412 #include "sta_info.h"
69413
69414@@ -635,7 +636,7 @@ struct ieee80211_local {
69415 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
69416 spinlock_t queue_stop_reason_lock;
69417
69418- int open_count;
69419+ local_t open_count;
69420 int monitors, cooked_mntrs;
69421 /* number of interfaces with corresponding FIF_ flags */
69422 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
69423diff -urNp linux-2.6.32.43/net/mac80211/iface.c linux-2.6.32.43/net/mac80211/iface.c
69424--- linux-2.6.32.43/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
69425+++ linux-2.6.32.43/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
69426@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
69427 break;
69428 }
69429
69430- if (local->open_count == 0) {
69431+ if (local_read(&local->open_count) == 0) {
69432 res = drv_start(local);
69433 if (res)
69434 goto err_del_bss;
69435@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
69436 * Validate the MAC address for this device.
69437 */
69438 if (!is_valid_ether_addr(dev->dev_addr)) {
69439- if (!local->open_count)
69440+ if (!local_read(&local->open_count))
69441 drv_stop(local);
69442 return -EADDRNOTAVAIL;
69443 }
69444@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
69445
69446 hw_reconf_flags |= __ieee80211_recalc_idle(local);
69447
69448- local->open_count++;
69449+ local_inc(&local->open_count);
69450 if (hw_reconf_flags) {
69451 ieee80211_hw_config(local, hw_reconf_flags);
69452 /*
69453@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
69454 err_del_interface:
69455 drv_remove_interface(local, &conf);
69456 err_stop:
69457- if (!local->open_count)
69458+ if (!local_read(&local->open_count))
69459 drv_stop(local);
69460 err_del_bss:
69461 sdata->bss = NULL;
69462@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
69463 WARN_ON(!list_empty(&sdata->u.ap.vlans));
69464 }
69465
69466- local->open_count--;
69467+ local_dec(&local->open_count);
69468
69469 switch (sdata->vif.type) {
69470 case NL80211_IFTYPE_AP_VLAN:
69471@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
69472
69473 ieee80211_recalc_ps(local, -1);
69474
69475- if (local->open_count == 0) {
69476+ if (local_read(&local->open_count) == 0) {
69477 ieee80211_clear_tx_pending(local);
69478 ieee80211_stop_device(local);
69479
69480diff -urNp linux-2.6.32.43/net/mac80211/main.c linux-2.6.32.43/net/mac80211/main.c
69481--- linux-2.6.32.43/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
69482+++ linux-2.6.32.43/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
69483@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
69484 local->hw.conf.power_level = power;
69485 }
69486
69487- if (changed && local->open_count) {
69488+ if (changed && local_read(&local->open_count)) {
69489 ret = drv_config(local, changed);
69490 /*
69491 * Goal:
69492diff -urNp linux-2.6.32.43/net/mac80211/mlme.c linux-2.6.32.43/net/mac80211/mlme.c
69493--- linux-2.6.32.43/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
69494+++ linux-2.6.32.43/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
69495@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
69496 bool have_higher_than_11mbit = false, newsta = false;
69497 u16 ap_ht_cap_flags;
69498
69499+ pax_track_stack();
69500+
69501 /*
69502 * AssocResp and ReassocResp have identical structure, so process both
69503 * of them in this function.
69504diff -urNp linux-2.6.32.43/net/mac80211/pm.c linux-2.6.32.43/net/mac80211/pm.c
69505--- linux-2.6.32.43/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
69506+++ linux-2.6.32.43/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
69507@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
69508 }
69509
69510 /* stop hardware - this must stop RX */
69511- if (local->open_count)
69512+ if (local_read(&local->open_count))
69513 ieee80211_stop_device(local);
69514
69515 local->suspended = true;
69516diff -urNp linux-2.6.32.43/net/mac80211/rate.c linux-2.6.32.43/net/mac80211/rate.c
69517--- linux-2.6.32.43/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
69518+++ linux-2.6.32.43/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
69519@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
69520 struct rate_control_ref *ref, *old;
69521
69522 ASSERT_RTNL();
69523- if (local->open_count)
69524+ if (local_read(&local->open_count))
69525 return -EBUSY;
69526
69527 ref = rate_control_alloc(name, local);
69528diff -urNp linux-2.6.32.43/net/mac80211/tx.c linux-2.6.32.43/net/mac80211/tx.c
69529--- linux-2.6.32.43/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
69530+++ linux-2.6.32.43/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
69531@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
69532 return cpu_to_le16(dur);
69533 }
69534
69535-static int inline is_ieee80211_device(struct ieee80211_local *local,
69536+static inline int is_ieee80211_device(struct ieee80211_local *local,
69537 struct net_device *dev)
69538 {
69539 return local == wdev_priv(dev->ieee80211_ptr);
69540diff -urNp linux-2.6.32.43/net/mac80211/util.c linux-2.6.32.43/net/mac80211/util.c
69541--- linux-2.6.32.43/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
69542+++ linux-2.6.32.43/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
69543@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
69544 local->resuming = true;
69545
69546 /* restart hardware */
69547- if (local->open_count) {
69548+ if (local_read(&local->open_count)) {
69549 /*
69550 * Upon resume hardware can sometimes be goofy due to
69551 * various platform / driver / bus issues, so restarting
69552diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c
69553--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
69554+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
69555@@ -564,7 +564,7 @@ static const struct file_operations ip_v
69556 .open = ip_vs_app_open,
69557 .read = seq_read,
69558 .llseek = seq_lseek,
69559- .release = seq_release,
69560+ .release = seq_release_net,
69561 };
69562 #endif
69563
69564diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c
69565--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
69566+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
69567@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
69568 /* if the connection is not template and is created
69569 * by sync, preserve the activity flag.
69570 */
69571- cp->flags |= atomic_read(&dest->conn_flags) &
69572+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
69573 (~IP_VS_CONN_F_INACTIVE);
69574 else
69575- cp->flags |= atomic_read(&dest->conn_flags);
69576+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
69577 cp->dest = dest;
69578
69579 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
69580@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
69581 atomic_set(&cp->refcnt, 1);
69582
69583 atomic_set(&cp->n_control, 0);
69584- atomic_set(&cp->in_pkts, 0);
69585+ atomic_set_unchecked(&cp->in_pkts, 0);
69586
69587 atomic_inc(&ip_vs_conn_count);
69588 if (flags & IP_VS_CONN_F_NO_CPORT)
69589@@ -871,7 +871,7 @@ static const struct file_operations ip_v
69590 .open = ip_vs_conn_open,
69591 .read = seq_read,
69592 .llseek = seq_lseek,
69593- .release = seq_release,
69594+ .release = seq_release_net,
69595 };
69596
69597 static const char *ip_vs_origin_name(unsigned flags)
69598@@ -934,7 +934,7 @@ static const struct file_operations ip_v
69599 .open = ip_vs_conn_sync_open,
69600 .read = seq_read,
69601 .llseek = seq_lseek,
69602- .release = seq_release,
69603+ .release = seq_release_net,
69604 };
69605
69606 #endif
69607@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
69608
69609 /* Don't drop the entry if its number of incoming packets is not
69610 located in [0, 8] */
69611- i = atomic_read(&cp->in_pkts);
69612+ i = atomic_read_unchecked(&cp->in_pkts);
69613 if (i > 8 || i < 0) return 0;
69614
69615 if (!todrop_rate[i]) return 0;
69616diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c
69617--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
69618+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
69619@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
69620 ret = cp->packet_xmit(skb, cp, pp);
69621 /* do not touch skb anymore */
69622
69623- atomic_inc(&cp->in_pkts);
69624+ atomic_inc_unchecked(&cp->in_pkts);
69625 ip_vs_conn_put(cp);
69626 return ret;
69627 }
69628@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
69629 * Sync connection if it is about to close to
69630 * encorage the standby servers to update the connections timeout
69631 */
69632- pkts = atomic_add_return(1, &cp->in_pkts);
69633+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
69634 if (af == AF_INET &&
69635 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
69636 (((cp->protocol != IPPROTO_TCP ||
69637diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c
69638--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
69639+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
69640@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
69641 ip_vs_rs_hash(dest);
69642 write_unlock_bh(&__ip_vs_rs_lock);
69643 }
69644- atomic_set(&dest->conn_flags, conn_flags);
69645+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
69646
69647 /* bind the service */
69648 if (!dest->svc) {
69649@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
69650 " %-7s %-6d %-10d %-10d\n",
69651 &dest->addr.in6,
69652 ntohs(dest->port),
69653- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69654+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69655 atomic_read(&dest->weight),
69656 atomic_read(&dest->activeconns),
69657 atomic_read(&dest->inactconns));
69658@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
69659 "%-7s %-6d %-10d %-10d\n",
69660 ntohl(dest->addr.ip),
69661 ntohs(dest->port),
69662- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
69663+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
69664 atomic_read(&dest->weight),
69665 atomic_read(&dest->activeconns),
69666 atomic_read(&dest->inactconns));
69667@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
69668 .open = ip_vs_info_open,
69669 .read = seq_read,
69670 .llseek = seq_lseek,
69671- .release = seq_release_private,
69672+ .release = seq_release_net,
69673 };
69674
69675 #endif
69676@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
69677 .open = ip_vs_stats_seq_open,
69678 .read = seq_read,
69679 .llseek = seq_lseek,
69680- .release = single_release,
69681+ .release = single_release_net,
69682 };
69683
69684 #endif
69685@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
69686
69687 entry.addr = dest->addr.ip;
69688 entry.port = dest->port;
69689- entry.conn_flags = atomic_read(&dest->conn_flags);
69690+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
69691 entry.weight = atomic_read(&dest->weight);
69692 entry.u_threshold = dest->u_threshold;
69693 entry.l_threshold = dest->l_threshold;
69694@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
69695 unsigned char arg[128];
69696 int ret = 0;
69697
69698+ pax_track_stack();
69699+
69700 if (!capable(CAP_NET_ADMIN))
69701 return -EPERM;
69702
69703@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
69704 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
69705
69706 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
69707- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69708+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
69709 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
69710 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
69711 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
69712diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c
69713--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
69714+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
69715@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
69716
69717 if (opt)
69718 memcpy(&cp->in_seq, opt, sizeof(*opt));
69719- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69720+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
69721 cp->state = state;
69722 cp->old_state = cp->state;
69723 /*
69724diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c
69725--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
69726+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
69727@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
69728 else
69729 rc = NF_ACCEPT;
69730 /* do not touch skb anymore */
69731- atomic_inc(&cp->in_pkts);
69732+ atomic_inc_unchecked(&cp->in_pkts);
69733 goto out;
69734 }
69735
69736@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
69737 else
69738 rc = NF_ACCEPT;
69739 /* do not touch skb anymore */
69740- atomic_inc(&cp->in_pkts);
69741+ atomic_inc_unchecked(&cp->in_pkts);
69742 goto out;
69743 }
69744
69745diff -urNp linux-2.6.32.43/net/netfilter/Kconfig linux-2.6.32.43/net/netfilter/Kconfig
69746--- linux-2.6.32.43/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
69747+++ linux-2.6.32.43/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
69748@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
69749
69750 To compile it as a module, choose M here. If unsure, say N.
69751
69752+config NETFILTER_XT_MATCH_GRADM
69753+ tristate '"gradm" match support'
69754+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
69755+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
69756+ ---help---
69757+ The gradm match allows to match on grsecurity RBAC being enabled.
69758+ It is useful when iptables rules are applied early on bootup to
69759+ prevent connections to the machine (except from a trusted host)
69760+ while the RBAC system is disabled.
69761+
69762 config NETFILTER_XT_MATCH_HASHLIMIT
69763 tristate '"hashlimit" match support'
69764 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
69765diff -urNp linux-2.6.32.43/net/netfilter/Makefile linux-2.6.32.43/net/netfilter/Makefile
69766--- linux-2.6.32.43/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
69767+++ linux-2.6.32.43/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
69768@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
69769 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
69770 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
69771 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
69772+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
69773 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
69774 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
69775 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
69776diff -urNp linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c
69777--- linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
69778+++ linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
69779@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
69780 static int
69781 ctnetlink_parse_tuple(const struct nlattr * const cda[],
69782 struct nf_conntrack_tuple *tuple,
69783- enum ctattr_tuple type, u_int8_t l3num)
69784+ enum ctattr_type type, u_int8_t l3num)
69785 {
69786 struct nlattr *tb[CTA_TUPLE_MAX+1];
69787 int err;
69788diff -urNp linux-2.6.32.43/net/netfilter/nfnetlink_log.c linux-2.6.32.43/net/netfilter/nfnetlink_log.c
69789--- linux-2.6.32.43/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
69790+++ linux-2.6.32.43/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
69791@@ -68,7 +68,7 @@ struct nfulnl_instance {
69792 };
69793
69794 static DEFINE_RWLOCK(instances_lock);
69795-static atomic_t global_seq;
69796+static atomic_unchecked_t global_seq;
69797
69798 #define INSTANCE_BUCKETS 16
69799 static struct hlist_head instance_table[INSTANCE_BUCKETS];
69800@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
69801 /* global sequence number */
69802 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
69803 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
69804- htonl(atomic_inc_return(&global_seq)));
69805+ htonl(atomic_inc_return_unchecked(&global_seq)));
69806
69807 if (data_len) {
69808 struct nlattr *nla;
69809diff -urNp linux-2.6.32.43/net/netfilter/xt_gradm.c linux-2.6.32.43/net/netfilter/xt_gradm.c
69810--- linux-2.6.32.43/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
69811+++ linux-2.6.32.43/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
69812@@ -0,0 +1,51 @@
69813+/*
69814+ * gradm match for netfilter
69815