]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.2.2-2.6.32.43-201108071948.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.2.2-2.6.32.43-201108071948.patch
CommitLineData
b19e5ddb
PK
1diff -urNp linux-2.6.32.43/arch/alpha/include/asm/elf.h linux-2.6.32.43/arch/alpha/include/asm/elf.h
2--- linux-2.6.32.43/arch/alpha/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3+++ linux-2.6.32.43/arch/alpha/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
4@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
5
6 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
7
8+#ifdef CONFIG_PAX_ASLR
9+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
10+
11+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
12+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
13+#endif
14+
15 /* $0 is set by ld.so to a pointer to a function which might be
16 registered using atexit. This provides a mean for the dynamic
17 linker to call DT_FINI functions for shared libraries that have
18diff -urNp linux-2.6.32.43/arch/alpha/include/asm/pgtable.h linux-2.6.32.43/arch/alpha/include/asm/pgtable.h
19--- linux-2.6.32.43/arch/alpha/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
20+++ linux-2.6.32.43/arch/alpha/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
21@@ -101,6 +101,17 @@ struct vm_area_struct;
22 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
23 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
24 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
25+
26+#ifdef CONFIG_PAX_PAGEEXEC
27+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
28+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
29+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
30+#else
31+# define PAGE_SHARED_NOEXEC PAGE_SHARED
32+# define PAGE_COPY_NOEXEC PAGE_COPY
33+# define PAGE_READONLY_NOEXEC PAGE_READONLY
34+#endif
35+
36 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
37
38 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
39diff -urNp linux-2.6.32.43/arch/alpha/kernel/module.c linux-2.6.32.43/arch/alpha/kernel/module.c
40--- linux-2.6.32.43/arch/alpha/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
41+++ linux-2.6.32.43/arch/alpha/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
42@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs,
43
44 /* The small sections were sorted to the end of the segment.
45 The following should definitely cover them. */
46- gp = (u64)me->module_core + me->core_size - 0x8000;
47+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
48 got = sechdrs[me->arch.gotsecindex].sh_addr;
49
50 for (i = 0; i < n; i++) {
51diff -urNp linux-2.6.32.43/arch/alpha/kernel/osf_sys.c linux-2.6.32.43/arch/alpha/kernel/osf_sys.c
52--- linux-2.6.32.43/arch/alpha/kernel/osf_sys.c 2011-03-27 14:31:47.000000000 -0400
53+++ linux-2.6.32.43/arch/alpha/kernel/osf_sys.c 2011-06-13 17:19:47.000000000 -0400
54@@ -431,7 +431,7 @@ SYSCALL_DEFINE2(osf_getdomainname, char
55 return -EFAULT;
56
57 len = namelen;
58- if (namelen > 32)
59+ if (len > 32)
60 len = 32;
61
62 down_read(&uts_sem);
63@@ -618,7 +618,7 @@ SYSCALL_DEFINE3(osf_sysinfo, int, comman
64 down_read(&uts_sem);
65 res = sysinfo_table[offset];
66 len = strlen(res)+1;
67- if (len > count)
68+ if ((unsigned long)len > (unsigned long)count)
69 len = count;
70 if (copy_to_user(buf, res, len))
71 err = -EFAULT;
72@@ -673,7 +673,7 @@ SYSCALL_DEFINE5(osf_getsysinfo, unsigned
73 return 1;
74
75 case GSI_GET_HWRPB:
76- if (nbytes < sizeof(*hwrpb))
77+ if (nbytes > sizeof(*hwrpb))
78 return -EINVAL;
79 if (copy_to_user(buffer, hwrpb, nbytes) != 0)
80 return -EFAULT;
81@@ -1035,6 +1035,7 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
82 {
83 struct rusage r;
84 long ret, err;
85+ unsigned int status = 0;
86 mm_segment_t old_fs;
87
88 if (!ur)
89@@ -1043,13 +1044,15 @@ SYSCALL_DEFINE4(osf_wait4, pid_t, pid, i
90 old_fs = get_fs();
91
92 set_fs (KERNEL_DS);
93- ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
94+ ret = sys_wait4(pid, (unsigned int __user *) &status, options,
95+ (struct rusage __user *) &r);
96 set_fs (old_fs);
97
98 if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
99 return -EFAULT;
100
101 err = 0;
102+ err |= put_user(status, ustatus);
103 err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
104 err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
105 err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
106@@ -1169,7 +1172,7 @@ arch_get_unmapped_area_1(unsigned long a
107 /* At this point: (!vma || addr < vma->vm_end). */
108 if (limit - len < addr)
109 return -ENOMEM;
110- if (!vma || addr + len <= vma->vm_start)
111+ if (check_heap_stack_gap(vma, addr, len))
112 return addr;
113 addr = vma->vm_end;
114 vma = vma->vm_next;
115@@ -1205,6 +1208,10 @@ arch_get_unmapped_area(struct file *filp
116 merely specific addresses, but regions of memory -- perhaps
117 this feature should be incorporated into all ports? */
118
119+#ifdef CONFIG_PAX_RANDMMAP
120+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
121+#endif
122+
123 if (addr) {
124 addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
125 if (addr != (unsigned long) -ENOMEM)
126@@ -1212,8 +1219,8 @@ arch_get_unmapped_area(struct file *filp
127 }
128
129 /* Next, try allocating at TASK_UNMAPPED_BASE. */
130- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
131- len, limit);
132+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit);
133+
134 if (addr != (unsigned long) -ENOMEM)
135 return addr;
136
137diff -urNp linux-2.6.32.43/arch/alpha/mm/fault.c linux-2.6.32.43/arch/alpha/mm/fault.c
138--- linux-2.6.32.43/arch/alpha/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
139+++ linux-2.6.32.43/arch/alpha/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
140@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct *
141 __reload_thread(pcb);
142 }
143
144+#ifdef CONFIG_PAX_PAGEEXEC
145+/*
146+ * PaX: decide what to do with offenders (regs->pc = fault address)
147+ *
148+ * returns 1 when task should be killed
149+ * 2 when patched PLT trampoline was detected
150+ * 3 when unpatched PLT trampoline was detected
151+ */
152+static int pax_handle_fetch_fault(struct pt_regs *regs)
153+{
154+
155+#ifdef CONFIG_PAX_EMUPLT
156+ int err;
157+
158+ do { /* PaX: patched PLT emulation #1 */
159+ unsigned int ldah, ldq, jmp;
160+
161+ err = get_user(ldah, (unsigned int *)regs->pc);
162+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
163+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
164+
165+ if (err)
166+ break;
167+
168+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
169+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
170+ jmp == 0x6BFB0000U)
171+ {
172+ unsigned long r27, addr;
173+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
174+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
175+
176+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
177+ err = get_user(r27, (unsigned long *)addr);
178+ if (err)
179+ break;
180+
181+ regs->r27 = r27;
182+ regs->pc = r27;
183+ return 2;
184+ }
185+ } while (0);
186+
187+ do { /* PaX: patched PLT emulation #2 */
188+ unsigned int ldah, lda, br;
189+
190+ err = get_user(ldah, (unsigned int *)regs->pc);
191+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
192+ err |= get_user(br, (unsigned int *)(regs->pc+8));
193+
194+ if (err)
195+ break;
196+
197+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
198+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
199+ (br & 0xFFE00000U) == 0xC3E00000U)
200+ {
201+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
202+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
203+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
204+
205+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
206+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
207+ return 2;
208+ }
209+ } while (0);
210+
211+ do { /* PaX: unpatched PLT emulation */
212+ unsigned int br;
213+
214+ err = get_user(br, (unsigned int *)regs->pc);
215+
216+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
217+ unsigned int br2, ldq, nop, jmp;
218+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
219+
220+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
221+ err = get_user(br2, (unsigned int *)addr);
222+ err |= get_user(ldq, (unsigned int *)(addr+4));
223+ err |= get_user(nop, (unsigned int *)(addr+8));
224+ err |= get_user(jmp, (unsigned int *)(addr+12));
225+ err |= get_user(resolver, (unsigned long *)(addr+16));
226+
227+ if (err)
228+ break;
229+
230+ if (br2 == 0xC3600000U &&
231+ ldq == 0xA77B000CU &&
232+ nop == 0x47FF041FU &&
233+ jmp == 0x6B7B0000U)
234+ {
235+ regs->r28 = regs->pc+4;
236+ regs->r27 = addr+16;
237+ regs->pc = resolver;
238+ return 3;
239+ }
240+ }
241+ } while (0);
242+#endif
243+
244+ return 1;
245+}
246+
247+void pax_report_insns(void *pc, void *sp)
248+{
249+ unsigned long i;
250+
251+ printk(KERN_ERR "PAX: bytes at PC: ");
252+ for (i = 0; i < 5; i++) {
253+ unsigned int c;
254+ if (get_user(c, (unsigned int *)pc+i))
255+ printk(KERN_CONT "???????? ");
256+ else
257+ printk(KERN_CONT "%08x ", c);
258+ }
259+ printk("\n");
260+}
261+#endif
262
263 /*
264 * This routine handles page faults. It determines the address,
265@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns
266 good_area:
267 si_code = SEGV_ACCERR;
268 if (cause < 0) {
269- if (!(vma->vm_flags & VM_EXEC))
270+ if (!(vma->vm_flags & VM_EXEC)) {
271+
272+#ifdef CONFIG_PAX_PAGEEXEC
273+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
274+ goto bad_area;
275+
276+ up_read(&mm->mmap_sem);
277+ switch (pax_handle_fetch_fault(regs)) {
278+
279+#ifdef CONFIG_PAX_EMUPLT
280+ case 2:
281+ case 3:
282+ return;
283+#endif
284+
285+ }
286+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
287+ do_group_exit(SIGKILL);
288+#else
289 goto bad_area;
290+#endif
291+
292+ }
293 } else if (!cause) {
294 /* Allow reads even for write-only mappings */
295 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
296diff -urNp linux-2.6.32.43/arch/arm/include/asm/elf.h linux-2.6.32.43/arch/arm/include/asm/elf.h
297--- linux-2.6.32.43/arch/arm/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
298+++ linux-2.6.32.43/arch/arm/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
299@@ -109,7 +109,14 @@ int dump_task_regs(struct task_struct *t
300 the loader. We need to make sure that it is out of the way of the program
301 that it will "exec", and that there is sufficient room for the brk. */
302
303-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
304+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
305+
306+#ifdef CONFIG_PAX_ASLR
307+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
308+
309+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
310+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
311+#endif
312
313 /* When the program starts, a1 contains a pointer to a function to be
314 registered with atexit, as per the SVR4 ABI. A value of 0 means we
315diff -urNp linux-2.6.32.43/arch/arm/include/asm/kmap_types.h linux-2.6.32.43/arch/arm/include/asm/kmap_types.h
316--- linux-2.6.32.43/arch/arm/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
317+++ linux-2.6.32.43/arch/arm/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
318@@ -19,6 +19,7 @@ enum km_type {
319 KM_SOFTIRQ0,
320 KM_SOFTIRQ1,
321 KM_L2_CACHE,
322+ KM_CLEARPAGE,
323 KM_TYPE_NR
324 };
325
326diff -urNp linux-2.6.32.43/arch/arm/include/asm/uaccess.h linux-2.6.32.43/arch/arm/include/asm/uaccess.h
327--- linux-2.6.32.43/arch/arm/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
328+++ linux-2.6.32.43/arch/arm/include/asm/uaccess.h 2011-06-29 21:02:24.000000000 -0400
329@@ -22,6 +22,8 @@
330 #define VERIFY_READ 0
331 #define VERIFY_WRITE 1
332
333+extern void check_object_size(const void *ptr, unsigned long n, bool to);
334+
335 /*
336 * The exception table consists of pairs of addresses: the first is the
337 * address of an instruction that is allowed to fault, and the second is
338@@ -387,8 +389,23 @@ do { \
339
340
341 #ifdef CONFIG_MMU
342-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
343-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
344+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
345+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
346+
347+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
348+{
349+ if (!__builtin_constant_p(n))
350+ check_object_size(to, n, false);
351+ return ___copy_from_user(to, from, n);
352+}
353+
354+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
355+{
356+ if (!__builtin_constant_p(n))
357+ check_object_size(from, n, true);
358+ return ___copy_to_user(to, from, n);
359+}
360+
361 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
362 extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
363 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
364@@ -403,6 +420,9 @@ extern unsigned long __must_check __strn
365
366 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
367 {
368+ if ((long)n < 0)
369+ return n;
370+
371 if (access_ok(VERIFY_READ, from, n))
372 n = __copy_from_user(to, from, n);
373 else /* security hole - plug it */
374@@ -412,6 +432,9 @@ static inline unsigned long __must_check
375
376 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
377 {
378+ if ((long)n < 0)
379+ return n;
380+
381 if (access_ok(VERIFY_WRITE, to, n))
382 n = __copy_to_user(to, from, n);
383 return n;
384diff -urNp linux-2.6.32.43/arch/arm/kernel/armksyms.c linux-2.6.32.43/arch/arm/kernel/armksyms.c
385--- linux-2.6.32.43/arch/arm/kernel/armksyms.c 2011-03-27 14:31:47.000000000 -0400
386+++ linux-2.6.32.43/arch/arm/kernel/armksyms.c 2011-07-06 19:51:50.000000000 -0400
387@@ -118,8 +118,8 @@ EXPORT_SYMBOL(__strncpy_from_user);
388 #ifdef CONFIG_MMU
389 EXPORT_SYMBOL(copy_page);
390
391-EXPORT_SYMBOL(__copy_from_user);
392-EXPORT_SYMBOL(__copy_to_user);
393+EXPORT_SYMBOL(___copy_from_user);
394+EXPORT_SYMBOL(___copy_to_user);
395 EXPORT_SYMBOL(__clear_user);
396
397 EXPORT_SYMBOL(__get_user_1);
398diff -urNp linux-2.6.32.43/arch/arm/kernel/kgdb.c linux-2.6.32.43/arch/arm/kernel/kgdb.c
399--- linux-2.6.32.43/arch/arm/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
400+++ linux-2.6.32.43/arch/arm/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
401@@ -190,7 +190,7 @@ void kgdb_arch_exit(void)
402 * and we handle the normal undef case within the do_undefinstr
403 * handler.
404 */
405-struct kgdb_arch arch_kgdb_ops = {
406+const struct kgdb_arch arch_kgdb_ops = {
407 #ifndef __ARMEB__
408 .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7}
409 #else /* ! __ARMEB__ */
410diff -urNp linux-2.6.32.43/arch/arm/kernel/traps.c linux-2.6.32.43/arch/arm/kernel/traps.c
411--- linux-2.6.32.43/arch/arm/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
412+++ linux-2.6.32.43/arch/arm/kernel/traps.c 2011-06-13 21:31:18.000000000 -0400
413@@ -247,6 +247,8 @@ static void __die(const char *str, int e
414
415 DEFINE_SPINLOCK(die_lock);
416
417+extern void gr_handle_kernel_exploit(void);
418+
419 /*
420 * This function is protected against re-entrancy.
421 */
422@@ -271,6 +273,8 @@ NORET_TYPE void die(const char *str, str
423 if (panic_on_oops)
424 panic("Fatal exception");
425
426+ gr_handle_kernel_exploit();
427+
428 do_exit(SIGSEGV);
429 }
430
431diff -urNp linux-2.6.32.43/arch/arm/lib/copy_from_user.S linux-2.6.32.43/arch/arm/lib/copy_from_user.S
432--- linux-2.6.32.43/arch/arm/lib/copy_from_user.S 2011-03-27 14:31:47.000000000 -0400
433+++ linux-2.6.32.43/arch/arm/lib/copy_from_user.S 2011-06-29 20:48:38.000000000 -0400
434@@ -16,7 +16,7 @@
435 /*
436 * Prototype:
437 *
438- * size_t __copy_from_user(void *to, const void *from, size_t n)
439+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
440 *
441 * Purpose:
442 *
443@@ -84,11 +84,11 @@
444
445 .text
446
447-ENTRY(__copy_from_user)
448+ENTRY(___copy_from_user)
449
450 #include "copy_template.S"
451
452-ENDPROC(__copy_from_user)
453+ENDPROC(___copy_from_user)
454
455 .section .fixup,"ax"
456 .align 0
457diff -urNp linux-2.6.32.43/arch/arm/lib/copy_to_user.S linux-2.6.32.43/arch/arm/lib/copy_to_user.S
458--- linux-2.6.32.43/arch/arm/lib/copy_to_user.S 2011-03-27 14:31:47.000000000 -0400
459+++ linux-2.6.32.43/arch/arm/lib/copy_to_user.S 2011-06-29 20:46:49.000000000 -0400
460@@ -16,7 +16,7 @@
461 /*
462 * Prototype:
463 *
464- * size_t __copy_to_user(void *to, const void *from, size_t n)
465+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
466 *
467 * Purpose:
468 *
469@@ -88,11 +88,11 @@
470 .text
471
472 ENTRY(__copy_to_user_std)
473-WEAK(__copy_to_user)
474+WEAK(___copy_to_user)
475
476 #include "copy_template.S"
477
478-ENDPROC(__copy_to_user)
479+ENDPROC(___copy_to_user)
480
481 .section .fixup,"ax"
482 .align 0
483diff -urNp linux-2.6.32.43/arch/arm/lib/uaccess.S linux-2.6.32.43/arch/arm/lib/uaccess.S
484--- linux-2.6.32.43/arch/arm/lib/uaccess.S 2011-03-27 14:31:47.000000000 -0400
485+++ linux-2.6.32.43/arch/arm/lib/uaccess.S 2011-06-29 20:48:53.000000000 -0400
486@@ -19,7 +19,7 @@
487
488 #define PAGE_SHIFT 12
489
490-/* Prototype: int __copy_to_user(void *to, const char *from, size_t n)
491+/* Prototype: int ___copy_to_user(void *to, const char *from, size_t n)
492 * Purpose : copy a block to user memory from kernel memory
493 * Params : to - user memory
494 * : from - kernel memory
495@@ -39,7 +39,7 @@ USER( strgtbt r3, [r0], #1) @ May fau
496 sub r2, r2, ip
497 b .Lc2u_dest_aligned
498
499-ENTRY(__copy_to_user)
500+ENTRY(___copy_to_user)
501 stmfd sp!, {r2, r4 - r7, lr}
502 cmp r2, #4
503 blt .Lc2u_not_enough
504@@ -277,14 +277,14 @@ USER( strgebt r3, [r0], #1) @ May fau
505 ldrgtb r3, [r1], #0
506 USER( strgtbt r3, [r0], #1) @ May fault
507 b .Lc2u_finished
508-ENDPROC(__copy_to_user)
509+ENDPROC(___copy_to_user)
510
511 .section .fixup,"ax"
512 .align 0
513 9001: ldmfd sp!, {r0, r4 - r7, pc}
514 .previous
515
516-/* Prototype: unsigned long __copy_from_user(void *to,const void *from,unsigned long n);
517+/* Prototype: unsigned long ___copy_from_user(void *to,const void *from,unsigned long n);
518 * Purpose : copy a block from user memory to kernel memory
519 * Params : to - kernel memory
520 * : from - user memory
521@@ -303,7 +303,7 @@ USER( ldrgtbt r3, [r1], #1) @ May fau
522 sub r2, r2, ip
523 b .Lcfu_dest_aligned
524
525-ENTRY(__copy_from_user)
526+ENTRY(___copy_from_user)
527 stmfd sp!, {r0, r2, r4 - r7, lr}
528 cmp r2, #4
529 blt .Lcfu_not_enough
530@@ -543,7 +543,7 @@ USER( ldrgebt r3, [r1], #1) @ May fau
531 USER( ldrgtbt r3, [r1], #1) @ May fault
532 strgtb r3, [r0], #1
533 b .Lcfu_finished
534-ENDPROC(__copy_from_user)
535+ENDPROC(___copy_from_user)
536
537 .section .fixup,"ax"
538 .align 0
539diff -urNp linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c
540--- linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c 2011-03-27 14:31:47.000000000 -0400
541+++ linux-2.6.32.43/arch/arm/lib/uaccess_with_memcpy.c 2011-06-29 20:44:35.000000000 -0400
542@@ -97,7 +97,7 @@ out:
543 }
544
545 unsigned long
546-__copy_to_user(void __user *to, const void *from, unsigned long n)
547+___copy_to_user(void __user *to, const void *from, unsigned long n)
548 {
549 /*
550 * This test is stubbed out of the main function above to keep
551diff -urNp linux-2.6.32.43/arch/arm/mach-at91/pm.c linux-2.6.32.43/arch/arm/mach-at91/pm.c
552--- linux-2.6.32.43/arch/arm/mach-at91/pm.c 2011-03-27 14:31:47.000000000 -0400
553+++ linux-2.6.32.43/arch/arm/mach-at91/pm.c 2011-04-17 15:56:45.000000000 -0400
554@@ -348,7 +348,7 @@ static void at91_pm_end(void)
555 }
556
557
558-static struct platform_suspend_ops at91_pm_ops ={
559+static const struct platform_suspend_ops at91_pm_ops ={
560 .valid = at91_pm_valid_state,
561 .begin = at91_pm_begin,
562 .enter = at91_pm_enter,
563diff -urNp linux-2.6.32.43/arch/arm/mach-omap1/pm.c linux-2.6.32.43/arch/arm/mach-omap1/pm.c
564--- linux-2.6.32.43/arch/arm/mach-omap1/pm.c 2011-03-27 14:31:47.000000000 -0400
565+++ linux-2.6.32.43/arch/arm/mach-omap1/pm.c 2011-04-17 15:56:45.000000000 -0400
566@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq
567
568
569
570-static struct platform_suspend_ops omap_pm_ops ={
571+static const struct platform_suspend_ops omap_pm_ops ={
572 .prepare = omap_pm_prepare,
573 .enter = omap_pm_enter,
574 .finish = omap_pm_finish,
575diff -urNp linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c
576--- linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c 2011-03-27 14:31:47.000000000 -0400
577+++ linux-2.6.32.43/arch/arm/mach-omap2/pm24xx.c 2011-04-17 15:56:45.000000000 -0400
578@@ -326,7 +326,7 @@ static void omap2_pm_finish(void)
579 enable_hlt();
580 }
581
582-static struct platform_suspend_ops omap_pm_ops = {
583+static const struct platform_suspend_ops omap_pm_ops = {
584 .prepare = omap2_pm_prepare,
585 .enter = omap2_pm_enter,
586 .finish = omap2_pm_finish,
587diff -urNp linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c
588--- linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c 2011-03-27 14:31:47.000000000 -0400
589+++ linux-2.6.32.43/arch/arm/mach-omap2/pm34xx.c 2011-04-17 15:56:45.000000000 -0400
590@@ -401,7 +401,7 @@ static void omap3_pm_end(void)
591 return;
592 }
593
594-static struct platform_suspend_ops omap_pm_ops = {
595+static const struct platform_suspend_ops omap_pm_ops = {
596 .begin = omap3_pm_begin,
597 .end = omap3_pm_end,
598 .prepare = omap3_pm_prepare,
599diff -urNp linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c
600--- linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c 2011-03-27 14:31:47.000000000 -0400
601+++ linux-2.6.32.43/arch/arm/mach-pnx4008/pm.c 2011-04-17 15:56:45.000000000 -0400
602@@ -116,7 +116,7 @@ static int pnx4008_pm_valid(suspend_stat
603 (state == PM_SUSPEND_MEM);
604 }
605
606-static struct platform_suspend_ops pnx4008_pm_ops = {
607+static const struct platform_suspend_ops pnx4008_pm_ops = {
608 .enter = pnx4008_pm_enter,
609 .valid = pnx4008_pm_valid,
610 };
611diff -urNp linux-2.6.32.43/arch/arm/mach-pxa/pm.c linux-2.6.32.43/arch/arm/mach-pxa/pm.c
612--- linux-2.6.32.43/arch/arm/mach-pxa/pm.c 2011-03-27 14:31:47.000000000 -0400
613+++ linux-2.6.32.43/arch/arm/mach-pxa/pm.c 2011-04-17 15:56:45.000000000 -0400
614@@ -95,7 +95,7 @@ void pxa_pm_finish(void)
615 pxa_cpu_pm_fns->finish();
616 }
617
618-static struct platform_suspend_ops pxa_pm_ops = {
619+static const struct platform_suspend_ops pxa_pm_ops = {
620 .valid = pxa_pm_valid,
621 .enter = pxa_pm_enter,
622 .prepare = pxa_pm_prepare,
623diff -urNp linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c
624--- linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c 2011-03-27 14:31:47.000000000 -0400
625+++ linux-2.6.32.43/arch/arm/mach-pxa/sharpsl_pm.c 2011-04-17 15:56:45.000000000 -0400
626@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status
627 }
628
629 #ifdef CONFIG_PM
630-static struct platform_suspend_ops sharpsl_pm_ops = {
631+static const struct platform_suspend_ops sharpsl_pm_ops = {
632 .prepare = pxa_pm_prepare,
633 .finish = pxa_pm_finish,
634 .enter = corgi_pxa_pm_enter,
635diff -urNp linux-2.6.32.43/arch/arm/mach-sa1100/pm.c linux-2.6.32.43/arch/arm/mach-sa1100/pm.c
636--- linux-2.6.32.43/arch/arm/mach-sa1100/pm.c 2011-03-27 14:31:47.000000000 -0400
637+++ linux-2.6.32.43/arch/arm/mach-sa1100/pm.c 2011-04-17 15:56:45.000000000 -0400
638@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp)
639 return virt_to_phys(sp);
640 }
641
642-static struct platform_suspend_ops sa11x0_pm_ops = {
643+static const struct platform_suspend_ops sa11x0_pm_ops = {
644 .enter = sa11x0_pm_enter,
645 .valid = suspend_valid_only_mem,
646 };
647diff -urNp linux-2.6.32.43/arch/arm/mm/fault.c linux-2.6.32.43/arch/arm/mm/fault.c
648--- linux-2.6.32.43/arch/arm/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
649+++ linux-2.6.32.43/arch/arm/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
650@@ -166,6 +166,13 @@ __do_user_fault(struct task_struct *tsk,
651 }
652 #endif
653
654+#ifdef CONFIG_PAX_PAGEEXEC
655+ if (fsr & FSR_LNX_PF) {
656+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
657+ do_group_exit(SIGKILL);
658+ }
659+#endif
660+
661 tsk->thread.address = addr;
662 tsk->thread.error_code = fsr;
663 tsk->thread.trap_no = 14;
664@@ -357,6 +364,33 @@ do_page_fault(unsigned long addr, unsign
665 }
666 #endif /* CONFIG_MMU */
667
668+#ifdef CONFIG_PAX_PAGEEXEC
669+void pax_report_insns(void *pc, void *sp)
670+{
671+ long i;
672+
673+ printk(KERN_ERR "PAX: bytes at PC: ");
674+ for (i = 0; i < 20; i++) {
675+ unsigned char c;
676+ if (get_user(c, (__force unsigned char __user *)pc+i))
677+ printk(KERN_CONT "?? ");
678+ else
679+ printk(KERN_CONT "%02x ", c);
680+ }
681+ printk("\n");
682+
683+ printk(KERN_ERR "PAX: bytes at SP-4: ");
684+ for (i = -1; i < 20; i++) {
685+ unsigned long c;
686+ if (get_user(c, (__force unsigned long __user *)sp+i))
687+ printk(KERN_CONT "???????? ");
688+ else
689+ printk(KERN_CONT "%08lx ", c);
690+ }
691+ printk("\n");
692+}
693+#endif
694+
695 /*
696 * First Level Translation Fault Handler
697 *
698diff -urNp linux-2.6.32.43/arch/arm/mm/mmap.c linux-2.6.32.43/arch/arm/mm/mmap.c
699--- linux-2.6.32.43/arch/arm/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
700+++ linux-2.6.32.43/arch/arm/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
701@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp
702 if (len > TASK_SIZE)
703 return -ENOMEM;
704
705+#ifdef CONFIG_PAX_RANDMMAP
706+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
707+#endif
708+
709 if (addr) {
710 if (do_align)
711 addr = COLOUR_ALIGN(addr, pgoff);
712@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp
713 addr = PAGE_ALIGN(addr);
714
715 vma = find_vma(mm, addr);
716- if (TASK_SIZE - len >= addr &&
717- (!vma || addr + len <= vma->vm_start))
718+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
719 return addr;
720 }
721 if (len > mm->cached_hole_size) {
722- start_addr = addr = mm->free_area_cache;
723+ start_addr = addr = mm->free_area_cache;
724 } else {
725- start_addr = addr = TASK_UNMAPPED_BASE;
726- mm->cached_hole_size = 0;
727+ start_addr = addr = mm->mmap_base;
728+ mm->cached_hole_size = 0;
729 }
730
731 full_search:
732@@ -94,14 +97,14 @@ full_search:
733 * Start a new search - just in case we missed
734 * some holes.
735 */
736- if (start_addr != TASK_UNMAPPED_BASE) {
737- start_addr = addr = TASK_UNMAPPED_BASE;
738+ if (start_addr != mm->mmap_base) {
739+ start_addr = addr = mm->mmap_base;
740 mm->cached_hole_size = 0;
741 goto full_search;
742 }
743 return -ENOMEM;
744 }
745- if (!vma || addr + len <= vma->vm_start) {
746+ if (check_heap_stack_gap(vma, addr, len)) {
747 /*
748 * Remember the place where we stopped the search:
749 */
750diff -urNp linux-2.6.32.43/arch/arm/plat-s3c/pm.c linux-2.6.32.43/arch/arm/plat-s3c/pm.c
751--- linux-2.6.32.43/arch/arm/plat-s3c/pm.c 2011-03-27 14:31:47.000000000 -0400
752+++ linux-2.6.32.43/arch/arm/plat-s3c/pm.c 2011-04-17 15:56:45.000000000 -0400
753@@ -355,7 +355,7 @@ static void s3c_pm_finish(void)
754 s3c_pm_check_cleanup();
755 }
756
757-static struct platform_suspend_ops s3c_pm_ops = {
758+static const struct platform_suspend_ops s3c_pm_ops = {
759 .enter = s3c_pm_enter,
760 .prepare = s3c_pm_prepare,
761 .finish = s3c_pm_finish,
762diff -urNp linux-2.6.32.43/arch/avr32/include/asm/elf.h linux-2.6.32.43/arch/avr32/include/asm/elf.h
763--- linux-2.6.32.43/arch/avr32/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
764+++ linux-2.6.32.43/arch/avr32/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
765@@ -85,8 +85,14 @@ typedef struct user_fpu_struct elf_fpreg
766 the loader. We need to make sure that it is out of the way of the program
767 that it will "exec", and that there is sufficient room for the brk. */
768
769-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
770+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
771
772+#ifdef CONFIG_PAX_ASLR
773+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
774+
775+#define PAX_DELTA_MMAP_LEN 15
776+#define PAX_DELTA_STACK_LEN 15
777+#endif
778
779 /* This yields a mask that user programs can use to figure out what
780 instruction set this CPU supports. This could be done in user space,
781diff -urNp linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h
782--- linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
783+++ linux-2.6.32.43/arch/avr32/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
784@@ -22,7 +22,8 @@ D(10) KM_IRQ0,
785 D(11) KM_IRQ1,
786 D(12) KM_SOFTIRQ0,
787 D(13) KM_SOFTIRQ1,
788-D(14) KM_TYPE_NR
789+D(14) KM_CLEARPAGE,
790+D(15) KM_TYPE_NR
791 };
792
793 #undef D
794diff -urNp linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c
795--- linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c 2011-03-27 14:31:47.000000000 -0400
796+++ linux-2.6.32.43/arch/avr32/mach-at32ap/pm.c 2011-04-17 15:56:45.000000000 -0400
797@@ -176,7 +176,7 @@ out:
798 return 0;
799 }
800
801-static struct platform_suspend_ops avr32_pm_ops = {
802+static const struct platform_suspend_ops avr32_pm_ops = {
803 .valid = avr32_pm_valid_state,
804 .enter = avr32_pm_enter,
805 };
806diff -urNp linux-2.6.32.43/arch/avr32/mm/fault.c linux-2.6.32.43/arch/avr32/mm/fault.c
807--- linux-2.6.32.43/arch/avr32/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
808+++ linux-2.6.32.43/arch/avr32/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
809@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru
810
811 int exception_trace = 1;
812
813+#ifdef CONFIG_PAX_PAGEEXEC
814+void pax_report_insns(void *pc, void *sp)
815+{
816+ unsigned long i;
817+
818+ printk(KERN_ERR "PAX: bytes at PC: ");
819+ for (i = 0; i < 20; i++) {
820+ unsigned char c;
821+ if (get_user(c, (unsigned char *)pc+i))
822+ printk(KERN_CONT "???????? ");
823+ else
824+ printk(KERN_CONT "%02x ", c);
825+ }
826+ printk("\n");
827+}
828+#endif
829+
830 /*
831 * This routine handles page faults. It determines the address and the
832 * problem, and then passes it off to one of the appropriate routines.
833@@ -157,6 +174,16 @@ bad_area:
834 up_read(&mm->mmap_sem);
835
836 if (user_mode(regs)) {
837+
838+#ifdef CONFIG_PAX_PAGEEXEC
839+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
840+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
841+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
842+ do_group_exit(SIGKILL);
843+ }
844+ }
845+#endif
846+
847 if (exception_trace && printk_ratelimit())
848 printk("%s%s[%d]: segfault at %08lx pc %08lx "
849 "sp %08lx ecr %lu\n",
850diff -urNp linux-2.6.32.43/arch/blackfin/kernel/kgdb.c linux-2.6.32.43/arch/blackfin/kernel/kgdb.c
851--- linux-2.6.32.43/arch/blackfin/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
852+++ linux-2.6.32.43/arch/blackfin/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
853@@ -428,7 +428,7 @@ int kgdb_arch_handle_exception(int vecto
854 return -1; /* this means that we do not want to exit from the handler */
855 }
856
857-struct kgdb_arch arch_kgdb_ops = {
858+const struct kgdb_arch arch_kgdb_ops = {
859 .gdb_bpt_instr = {0xa1},
860 #ifdef CONFIG_SMP
861 .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP,
862diff -urNp linux-2.6.32.43/arch/blackfin/mach-common/pm.c linux-2.6.32.43/arch/blackfin/mach-common/pm.c
863--- linux-2.6.32.43/arch/blackfin/mach-common/pm.c 2011-03-27 14:31:47.000000000 -0400
864+++ linux-2.6.32.43/arch/blackfin/mach-common/pm.c 2011-04-17 15:56:45.000000000 -0400
865@@ -255,7 +255,7 @@ static int bfin_pm_enter(suspend_state_t
866 return 0;
867 }
868
869-struct platform_suspend_ops bfin_pm_ops = {
870+const struct platform_suspend_ops bfin_pm_ops = {
871 .enter = bfin_pm_enter,
872 .valid = bfin_pm_valid,
873 };
874diff -urNp linux-2.6.32.43/arch/frv/include/asm/kmap_types.h linux-2.6.32.43/arch/frv/include/asm/kmap_types.h
875--- linux-2.6.32.43/arch/frv/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
876+++ linux-2.6.32.43/arch/frv/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
877@@ -23,6 +23,7 @@ enum km_type {
878 KM_IRQ1,
879 KM_SOFTIRQ0,
880 KM_SOFTIRQ1,
881+ KM_CLEARPAGE,
882 KM_TYPE_NR
883 };
884
885diff -urNp linux-2.6.32.43/arch/frv/mm/elf-fdpic.c linux-2.6.32.43/arch/frv/mm/elf-fdpic.c
886--- linux-2.6.32.43/arch/frv/mm/elf-fdpic.c 2011-03-27 14:31:47.000000000 -0400
887+++ linux-2.6.32.43/arch/frv/mm/elf-fdpic.c 2011-04-17 15:56:45.000000000 -0400
888@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str
889 if (addr) {
890 addr = PAGE_ALIGN(addr);
891 vma = find_vma(current->mm, addr);
892- if (TASK_SIZE - len >= addr &&
893- (!vma || addr + len <= vma->vm_start))
894+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
895 goto success;
896 }
897
898@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str
899 for (; vma; vma = vma->vm_next) {
900 if (addr > limit)
901 break;
902- if (addr + len <= vma->vm_start)
903+ if (check_heap_stack_gap(vma, addr, len))
904 goto success;
905 addr = vma->vm_end;
906 }
907@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str
908 for (; vma; vma = vma->vm_next) {
909 if (addr > limit)
910 break;
911- if (addr + len <= vma->vm_start)
912+ if (check_heap_stack_gap(vma, addr, len))
913 goto success;
914 addr = vma->vm_end;
915 }
916diff -urNp linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c
917--- linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c 2011-03-27 14:31:47.000000000 -0400
918+++ linux-2.6.32.43/arch/ia64/hp/common/hwsw_iommu.c 2011-04-17 15:56:45.000000000 -0400
919@@ -17,7 +17,7 @@
920 #include <linux/swiotlb.h>
921 #include <asm/machvec.h>
922
923-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
924+extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops;
925
926 /* swiotlb declarations & definitions: */
927 extern int swiotlb_late_init_with_default_size (size_t size);
928@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev
929 !sba_dma_ops.dma_supported(dev, *dev->dma_mask);
930 }
931
932-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
933+const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev)
934 {
935 if (use_swiotlb(dev))
936 return &swiotlb_dma_ops;
937diff -urNp linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c
938--- linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c 2011-03-27 14:31:47.000000000 -0400
939+++ linux-2.6.32.43/arch/ia64/hp/common/sba_iommu.c 2011-04-17 15:56:45.000000000 -0400
940@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d
941 },
942 };
943
944-extern struct dma_map_ops swiotlb_dma_ops;
945+extern const struct dma_map_ops swiotlb_dma_ops;
946
947 static int __init
948 sba_init(void)
949@@ -2211,7 +2211,7 @@ sba_page_override(char *str)
950
951 __setup("sbapagesize=",sba_page_override);
952
953-struct dma_map_ops sba_dma_ops = {
954+const struct dma_map_ops sba_dma_ops = {
955 .alloc_coherent = sba_alloc_coherent,
956 .free_coherent = sba_free_coherent,
957 .map_page = sba_map_page,
958diff -urNp linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c
959--- linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c 2011-03-27 14:31:47.000000000 -0400
960+++ linux-2.6.32.43/arch/ia64/ia32/binfmt_elf32.c 2011-04-17 15:56:45.000000000 -0400
961@@ -45,6 +45,13 @@ randomize_stack_top(unsigned long stack_
962
963 #define elf_read_implies_exec(ex, have_pt_gnu_stack) (!(have_pt_gnu_stack))
964
965+#ifdef CONFIG_PAX_ASLR
966+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
967+
968+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
969+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
970+#endif
971+
972 /* Ugly but avoids duplication */
973 #include "../../../fs/binfmt_elf.c"
974
975diff -urNp linux-2.6.32.43/arch/ia64/ia32/ia32priv.h linux-2.6.32.43/arch/ia64/ia32/ia32priv.h
976--- linux-2.6.32.43/arch/ia64/ia32/ia32priv.h 2011-03-27 14:31:47.000000000 -0400
977+++ linux-2.6.32.43/arch/ia64/ia32/ia32priv.h 2011-04-17 15:56:45.000000000 -0400
978@@ -296,7 +296,14 @@ typedef struct compat_siginfo {
979 #define ELF_DATA ELFDATA2LSB
980 #define ELF_ARCH EM_386
981
982-#define IA32_STACK_TOP IA32_PAGE_OFFSET
983+#ifdef CONFIG_PAX_RANDUSTACK
984+#define __IA32_DELTA_STACK (current->mm->delta_stack)
985+#else
986+#define __IA32_DELTA_STACK 0UL
987+#endif
988+
989+#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
990+
991 #define IA32_GATE_OFFSET IA32_PAGE_OFFSET
992 #define IA32_GATE_END IA32_PAGE_OFFSET + PAGE_SIZE
993
994diff -urNp linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h
995--- linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
996+++ linux-2.6.32.43/arch/ia64/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
997@@ -12,7 +12,7 @@
998
999 #define ARCH_HAS_DMA_GET_REQUIRED_MASK
1000
1001-extern struct dma_map_ops *dma_ops;
1002+extern const struct dma_map_ops *dma_ops;
1003 extern struct ia64_machine_vector ia64_mv;
1004 extern void set_iommu_machvec(void);
1005
1006@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d
1007 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
1008 dma_addr_t *daddr, gfp_t gfp)
1009 {
1010- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1011+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1012 void *caddr;
1013
1014 caddr = ops->alloc_coherent(dev, size, daddr, gfp);
1015@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s
1016 static inline void dma_free_coherent(struct device *dev, size_t size,
1017 void *caddr, dma_addr_t daddr)
1018 {
1019- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1020+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1021 debug_dma_free_coherent(dev, size, caddr, daddr);
1022 ops->free_coherent(dev, size, caddr, daddr);
1023 }
1024@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str
1025
1026 static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr)
1027 {
1028- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1029+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1030 return ops->mapping_error(dev, daddr);
1031 }
1032
1033 static inline int dma_supported(struct device *dev, u64 mask)
1034 {
1035- struct dma_map_ops *ops = platform_dma_get_ops(dev);
1036+ const struct dma_map_ops *ops = platform_dma_get_ops(dev);
1037 return ops->dma_supported(dev, mask);
1038 }
1039
1040diff -urNp linux-2.6.32.43/arch/ia64/include/asm/elf.h linux-2.6.32.43/arch/ia64/include/asm/elf.h
1041--- linux-2.6.32.43/arch/ia64/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1042+++ linux-2.6.32.43/arch/ia64/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1043@@ -43,6 +43,13 @@
1044 */
1045 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
1046
1047+#ifdef CONFIG_PAX_ASLR
1048+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
1049+
1050+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1051+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
1052+#endif
1053+
1054 #define PT_IA_64_UNWIND 0x70000001
1055
1056 /* IA-64 relocations: */
1057diff -urNp linux-2.6.32.43/arch/ia64/include/asm/machvec.h linux-2.6.32.43/arch/ia64/include/asm/machvec.h
1058--- linux-2.6.32.43/arch/ia64/include/asm/machvec.h 2011-03-27 14:31:47.000000000 -0400
1059+++ linux-2.6.32.43/arch/ia64/include/asm/machvec.h 2011-04-17 15:56:45.000000000 -0400
1060@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event
1061 /* DMA-mapping interface: */
1062 typedef void ia64_mv_dma_init (void);
1063 typedef u64 ia64_mv_dma_get_required_mask (struct device *);
1064-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1065+typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *);
1066
1067 /*
1068 * WARNING: The legacy I/O space is _architected_. Platforms are
1069@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co
1070 # endif /* CONFIG_IA64_GENERIC */
1071
1072 extern void swiotlb_dma_init(void);
1073-extern struct dma_map_ops *dma_get_ops(struct device *);
1074+extern const struct dma_map_ops *dma_get_ops(struct device *);
1075
1076 /*
1077 * Define default versions so we can extend machvec for new platforms without having
1078diff -urNp linux-2.6.32.43/arch/ia64/include/asm/pgtable.h linux-2.6.32.43/arch/ia64/include/asm/pgtable.h
1079--- linux-2.6.32.43/arch/ia64/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1080+++ linux-2.6.32.43/arch/ia64/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1081@@ -12,7 +12,7 @@
1082 * David Mosberger-Tang <davidm@hpl.hp.com>
1083 */
1084
1085-
1086+#include <linux/const.h>
1087 #include <asm/mman.h>
1088 #include <asm/page.h>
1089 #include <asm/processor.h>
1090@@ -143,6 +143,17 @@
1091 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1092 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1093 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
1094+
1095+#ifdef CONFIG_PAX_PAGEEXEC
1096+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
1097+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1098+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
1099+#else
1100+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1101+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1102+# define PAGE_COPY_NOEXEC PAGE_COPY
1103+#endif
1104+
1105 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
1106 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
1107 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
1108diff -urNp linux-2.6.32.43/arch/ia64/include/asm/spinlock.h linux-2.6.32.43/arch/ia64/include/asm/spinlock.h
1109--- linux-2.6.32.43/arch/ia64/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
1110+++ linux-2.6.32.43/arch/ia64/include/asm/spinlock.h 2011-04-17 15:56:45.000000000 -0400
1111@@ -72,7 +72,7 @@ static __always_inline void __ticket_spi
1112 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
1113
1114 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
1115- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
1116+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
1117 }
1118
1119 static __always_inline void __ticket_spin_unlock_wait(raw_spinlock_t *lock)
1120diff -urNp linux-2.6.32.43/arch/ia64/include/asm/uaccess.h linux-2.6.32.43/arch/ia64/include/asm/uaccess.h
1121--- linux-2.6.32.43/arch/ia64/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
1122+++ linux-2.6.32.43/arch/ia64/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
1123@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _
1124 const void *__cu_from = (from); \
1125 long __cu_len = (n); \
1126 \
1127- if (__access_ok(__cu_to, __cu_len, get_fs())) \
1128+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \
1129 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
1130 __cu_len; \
1131 })
1132@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _
1133 long __cu_len = (n); \
1134 \
1135 __chk_user_ptr(__cu_from); \
1136- if (__access_ok(__cu_from, __cu_len, get_fs())) \
1137+ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \
1138 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
1139 __cu_len; \
1140 })
1141diff -urNp linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c
1142--- linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c 2011-03-27 14:31:47.000000000 -0400
1143+++ linux-2.6.32.43/arch/ia64/kernel/dma-mapping.c 2011-04-17 15:56:45.000000000 -0400
1144@@ -3,7 +3,7 @@
1145 /* Set this to 1 if there is a HW IOMMU in the system */
1146 int iommu_detected __read_mostly;
1147
1148-struct dma_map_ops *dma_ops;
1149+const struct dma_map_ops *dma_ops;
1150 EXPORT_SYMBOL(dma_ops);
1151
1152 #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
1153@@ -16,7 +16,7 @@ static int __init dma_init(void)
1154 }
1155 fs_initcall(dma_init);
1156
1157-struct dma_map_ops *dma_get_ops(struct device *dev)
1158+const struct dma_map_ops *dma_get_ops(struct device *dev)
1159 {
1160 return dma_ops;
1161 }
1162diff -urNp linux-2.6.32.43/arch/ia64/kernel/module.c linux-2.6.32.43/arch/ia64/kernel/module.c
1163--- linux-2.6.32.43/arch/ia64/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1164+++ linux-2.6.32.43/arch/ia64/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1165@@ -315,8 +315,7 @@ module_alloc (unsigned long size)
1166 void
1167 module_free (struct module *mod, void *module_region)
1168 {
1169- if (mod && mod->arch.init_unw_table &&
1170- module_region == mod->module_init) {
1171+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
1172 unw_remove_unwind_table(mod->arch.init_unw_table);
1173 mod->arch.init_unw_table = NULL;
1174 }
1175@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd
1176 }
1177
1178 static inline int
1179+in_init_rx (const struct module *mod, uint64_t addr)
1180+{
1181+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
1182+}
1183+
1184+static inline int
1185+in_init_rw (const struct module *mod, uint64_t addr)
1186+{
1187+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
1188+}
1189+
1190+static inline int
1191 in_init (const struct module *mod, uint64_t addr)
1192 {
1193- return addr - (uint64_t) mod->module_init < mod->init_size;
1194+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
1195+}
1196+
1197+static inline int
1198+in_core_rx (const struct module *mod, uint64_t addr)
1199+{
1200+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
1201+}
1202+
1203+static inline int
1204+in_core_rw (const struct module *mod, uint64_t addr)
1205+{
1206+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
1207 }
1208
1209 static inline int
1210 in_core (const struct module *mod, uint64_t addr)
1211 {
1212- return addr - (uint64_t) mod->module_core < mod->core_size;
1213+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
1214 }
1215
1216 static inline int
1217@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_
1218 break;
1219
1220 case RV_BDREL:
1221- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
1222+ if (in_init_rx(mod, val))
1223+ val -= (uint64_t) mod->module_init_rx;
1224+ else if (in_init_rw(mod, val))
1225+ val -= (uint64_t) mod->module_init_rw;
1226+ else if (in_core_rx(mod, val))
1227+ val -= (uint64_t) mod->module_core_rx;
1228+ else if (in_core_rw(mod, val))
1229+ val -= (uint64_t) mod->module_core_rw;
1230 break;
1231
1232 case RV_LTV:
1233@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs,
1234 * addresses have been selected...
1235 */
1236 uint64_t gp;
1237- if (mod->core_size > MAX_LTOFF)
1238+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
1239 /*
1240 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
1241 * at the end of the module.
1242 */
1243- gp = mod->core_size - MAX_LTOFF / 2;
1244+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
1245 else
1246- gp = mod->core_size / 2;
1247- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
1248+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
1249+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
1250 mod->arch.gp = gp;
1251 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
1252 }
1253diff -urNp linux-2.6.32.43/arch/ia64/kernel/pci-dma.c linux-2.6.32.43/arch/ia64/kernel/pci-dma.c
1254--- linux-2.6.32.43/arch/ia64/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
1255+++ linux-2.6.32.43/arch/ia64/kernel/pci-dma.c 2011-04-17 15:56:45.000000000 -0400
1256@@ -43,7 +43,7 @@ struct device fallback_dev = {
1257 .dma_mask = &fallback_dev.coherent_dma_mask,
1258 };
1259
1260-extern struct dma_map_ops intel_dma_ops;
1261+extern const struct dma_map_ops intel_dma_ops;
1262
1263 static int __init pci_iommu_init(void)
1264 {
1265@@ -96,15 +96,34 @@ int iommu_dma_supported(struct device *d
1266 }
1267 EXPORT_SYMBOL(iommu_dma_supported);
1268
1269+extern void *intel_alloc_coherent(struct device *hwdev, size_t size, dma_addr_t *dma_handle, gfp_t flags);
1270+extern void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle);
1271+extern int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1272+extern void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist, int nelems, enum dma_data_direction dir, struct dma_attrs *attrs);
1273+extern dma_addr_t intel_map_page(struct device *dev, struct page *page, unsigned long offset, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1274+extern void intel_unmap_page(struct device *dev, dma_addr_t dev_addr, size_t size, enum dma_data_direction dir, struct dma_attrs *attrs);
1275+extern int intel_mapping_error(struct device *dev, dma_addr_t dma_addr);
1276+
1277+static const struct dma_map_ops intel_iommu_dma_ops = {
1278+ /* from drivers/pci/intel-iommu.c:intel_dma_ops */
1279+ .alloc_coherent = intel_alloc_coherent,
1280+ .free_coherent = intel_free_coherent,
1281+ .map_sg = intel_map_sg,
1282+ .unmap_sg = intel_unmap_sg,
1283+ .map_page = intel_map_page,
1284+ .unmap_page = intel_unmap_page,
1285+ .mapping_error = intel_mapping_error,
1286+
1287+ .sync_single_for_cpu = machvec_dma_sync_single,
1288+ .sync_sg_for_cpu = machvec_dma_sync_sg,
1289+ .sync_single_for_device = machvec_dma_sync_single,
1290+ .sync_sg_for_device = machvec_dma_sync_sg,
1291+ .dma_supported = iommu_dma_supported,
1292+};
1293+
1294 void __init pci_iommu_alloc(void)
1295 {
1296- dma_ops = &intel_dma_ops;
1297-
1298- dma_ops->sync_single_for_cpu = machvec_dma_sync_single;
1299- dma_ops->sync_sg_for_cpu = machvec_dma_sync_sg;
1300- dma_ops->sync_single_for_device = machvec_dma_sync_single;
1301- dma_ops->sync_sg_for_device = machvec_dma_sync_sg;
1302- dma_ops->dma_supported = iommu_dma_supported;
1303+ dma_ops = &intel_iommu_dma_ops;
1304
1305 /*
1306 * The order of these functions is important for
1307diff -urNp linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c
1308--- linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
1309+++ linux-2.6.32.43/arch/ia64/kernel/pci-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
1310@@ -21,7 +21,7 @@ static void *ia64_swiotlb_alloc_coherent
1311 return swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
1312 }
1313
1314-struct dma_map_ops swiotlb_dma_ops = {
1315+const struct dma_map_ops swiotlb_dma_ops = {
1316 .alloc_coherent = ia64_swiotlb_alloc_coherent,
1317 .free_coherent = swiotlb_free_coherent,
1318 .map_page = swiotlb_map_page,
1319diff -urNp linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c
1320--- linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c 2011-03-27 14:31:47.000000000 -0400
1321+++ linux-2.6.32.43/arch/ia64/kernel/sys_ia64.c 2011-04-17 15:56:45.000000000 -0400
1322@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil
1323 if (REGION_NUMBER(addr) == RGN_HPAGE)
1324 addr = 0;
1325 #endif
1326+
1327+#ifdef CONFIG_PAX_RANDMMAP
1328+ if (mm->pax_flags & MF_PAX_RANDMMAP)
1329+ addr = mm->free_area_cache;
1330+ else
1331+#endif
1332+
1333 if (!addr)
1334 addr = mm->free_area_cache;
1335
1336@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil
1337 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
1338 /* At this point: (!vma || addr < vma->vm_end). */
1339 if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
1340- if (start_addr != TASK_UNMAPPED_BASE) {
1341+ if (start_addr != mm->mmap_base) {
1342 /* Start a new search --- just in case we missed some holes. */
1343- addr = TASK_UNMAPPED_BASE;
1344+ addr = mm->mmap_base;
1345 goto full_search;
1346 }
1347 return -ENOMEM;
1348 }
1349- if (!vma || addr + len <= vma->vm_start) {
1350+ if (check_heap_stack_gap(vma, addr, len)) {
1351 /* Remember the address where we stopped this search: */
1352 mm->free_area_cache = addr + len;
1353 return addr;
1354diff -urNp linux-2.6.32.43/arch/ia64/kernel/topology.c linux-2.6.32.43/arch/ia64/kernel/topology.c
1355--- linux-2.6.32.43/arch/ia64/kernel/topology.c 2011-03-27 14:31:47.000000000 -0400
1356+++ linux-2.6.32.43/arch/ia64/kernel/topology.c 2011-04-17 15:56:45.000000000 -0400
1357@@ -282,7 +282,7 @@ static ssize_t cache_show(struct kobject
1358 return ret;
1359 }
1360
1361-static struct sysfs_ops cache_sysfs_ops = {
1362+static const struct sysfs_ops cache_sysfs_ops = {
1363 .show = cache_show
1364 };
1365
1366diff -urNp linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S
1367--- linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
1368+++ linux-2.6.32.43/arch/ia64/kernel/vmlinux.lds.S 2011-04-17 15:56:45.000000000 -0400
1369@@ -190,7 +190,7 @@ SECTIONS
1370 /* Per-cpu data: */
1371 . = ALIGN(PERCPU_PAGE_SIZE);
1372 PERCPU_VADDR(PERCPU_ADDR, :percpu)
1373- __phys_per_cpu_start = __per_cpu_load;
1374+ __phys_per_cpu_start = per_cpu_load;
1375 . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits
1376 * into percpu page size
1377 */
1378diff -urNp linux-2.6.32.43/arch/ia64/mm/fault.c linux-2.6.32.43/arch/ia64/mm/fault.c
1379--- linux-2.6.32.43/arch/ia64/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1380+++ linux-2.6.32.43/arch/ia64/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1381@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned
1382 return pte_present(pte);
1383 }
1384
1385+#ifdef CONFIG_PAX_PAGEEXEC
1386+void pax_report_insns(void *pc, void *sp)
1387+{
1388+ unsigned long i;
1389+
1390+ printk(KERN_ERR "PAX: bytes at PC: ");
1391+ for (i = 0; i < 8; i++) {
1392+ unsigned int c;
1393+ if (get_user(c, (unsigned int *)pc+i))
1394+ printk(KERN_CONT "???????? ");
1395+ else
1396+ printk(KERN_CONT "%08x ", c);
1397+ }
1398+ printk("\n");
1399+}
1400+#endif
1401+
1402 void __kprobes
1403 ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
1404 {
1405@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres
1406 mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT)
1407 | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT));
1408
1409- if ((vma->vm_flags & mask) != mask)
1410+ if ((vma->vm_flags & mask) != mask) {
1411+
1412+#ifdef CONFIG_PAX_PAGEEXEC
1413+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
1414+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
1415+ goto bad_area;
1416+
1417+ up_read(&mm->mmap_sem);
1418+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
1419+ do_group_exit(SIGKILL);
1420+ }
1421+#endif
1422+
1423 goto bad_area;
1424
1425+ }
1426+
1427 survive:
1428 /*
1429 * If for any reason at all we couldn't handle the fault, make
1430diff -urNp linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c
1431--- linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
1432+++ linux-2.6.32.43/arch/ia64/mm/hugetlbpage.c 2011-04-17 15:56:45.000000000 -0400
1433@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area(
1434 /* At this point: (!vmm || addr < vmm->vm_end). */
1435 if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT)
1436 return -ENOMEM;
1437- if (!vmm || (addr + len) <= vmm->vm_start)
1438+ if (check_heap_stack_gap(vmm, addr, len))
1439 return addr;
1440 addr = ALIGN(vmm->vm_end, HPAGE_SIZE);
1441 }
1442diff -urNp linux-2.6.32.43/arch/ia64/mm/init.c linux-2.6.32.43/arch/ia64/mm/init.c
1443--- linux-2.6.32.43/arch/ia64/mm/init.c 2011-03-27 14:31:47.000000000 -0400
1444+++ linux-2.6.32.43/arch/ia64/mm/init.c 2011-04-17 15:56:45.000000000 -0400
1445@@ -122,6 +122,19 @@ ia64_init_addr_space (void)
1446 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
1447 vma->vm_end = vma->vm_start + PAGE_SIZE;
1448 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
1449+
1450+#ifdef CONFIG_PAX_PAGEEXEC
1451+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
1452+ vma->vm_flags &= ~VM_EXEC;
1453+
1454+#ifdef CONFIG_PAX_MPROTECT
1455+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
1456+ vma->vm_flags &= ~VM_MAYEXEC;
1457+#endif
1458+
1459+ }
1460+#endif
1461+
1462 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
1463 down_write(&current->mm->mmap_sem);
1464 if (insert_vm_struct(current->mm, vma)) {
1465diff -urNp linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c
1466--- linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c 2011-03-27 14:31:47.000000000 -0400
1467+++ linux-2.6.32.43/arch/ia64/sn/pci/pci_dma.c 2011-04-17 15:56:45.000000000 -0400
1468@@ -464,7 +464,7 @@ int sn_pci_legacy_write(struct pci_bus *
1469 return ret;
1470 }
1471
1472-static struct dma_map_ops sn_dma_ops = {
1473+static const struct dma_map_ops sn_dma_ops = {
1474 .alloc_coherent = sn_dma_alloc_coherent,
1475 .free_coherent = sn_dma_free_coherent,
1476 .map_page = sn_dma_map_page,
1477diff -urNp linux-2.6.32.43/arch/m32r/lib/usercopy.c linux-2.6.32.43/arch/m32r/lib/usercopy.c
1478--- linux-2.6.32.43/arch/m32r/lib/usercopy.c 2011-03-27 14:31:47.000000000 -0400
1479+++ linux-2.6.32.43/arch/m32r/lib/usercopy.c 2011-04-17 15:56:45.000000000 -0400
1480@@ -14,6 +14,9 @@
1481 unsigned long
1482 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
1483 {
1484+ if ((long)n < 0)
1485+ return n;
1486+
1487 prefetch(from);
1488 if (access_ok(VERIFY_WRITE, to, n))
1489 __copy_user(to,from,n);
1490@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to,
1491 unsigned long
1492 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
1493 {
1494+ if ((long)n < 0)
1495+ return n;
1496+
1497 prefetchw(to);
1498 if (access_ok(VERIFY_READ, from, n))
1499 __copy_user_zeroing(to,from,n);
1500diff -urNp linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c
1501--- linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c 2011-03-27 14:31:47.000000000 -0400
1502+++ linux-2.6.32.43/arch/mips/alchemy/devboards/pm.c 2011-04-17 15:56:45.000000000 -0400
1503@@ -78,7 +78,7 @@ static void db1x_pm_end(void)
1504
1505 }
1506
1507-static struct platform_suspend_ops db1x_pm_ops = {
1508+static const struct platform_suspend_ops db1x_pm_ops = {
1509 .valid = suspend_valid_only_mem,
1510 .begin = db1x_pm_begin,
1511 .enter = db1x_pm_enter,
1512diff -urNp linux-2.6.32.43/arch/mips/include/asm/elf.h linux-2.6.32.43/arch/mips/include/asm/elf.h
1513--- linux-2.6.32.43/arch/mips/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1514+++ linux-2.6.32.43/arch/mips/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1515@@ -368,4 +368,11 @@ extern int dump_task_fpu(struct task_str
1516 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1517 #endif
1518
1519+#ifdef CONFIG_PAX_ASLR
1520+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1521+
1522+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1523+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1524+#endif
1525+
1526 #endif /* _ASM_ELF_H */
1527diff -urNp linux-2.6.32.43/arch/mips/include/asm/page.h linux-2.6.32.43/arch/mips/include/asm/page.h
1528--- linux-2.6.32.43/arch/mips/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
1529+++ linux-2.6.32.43/arch/mips/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
1530@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa
1531 #ifdef CONFIG_CPU_MIPS32
1532 typedef struct { unsigned long pte_low, pte_high; } pte_t;
1533 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
1534- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
1535+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
1536 #else
1537 typedef struct { unsigned long long pte; } pte_t;
1538 #define pte_val(x) ((x).pte)
1539diff -urNp linux-2.6.32.43/arch/mips/include/asm/system.h linux-2.6.32.43/arch/mips/include/asm/system.h
1540--- linux-2.6.32.43/arch/mips/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
1541+++ linux-2.6.32.43/arch/mips/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
1542@@ -230,6 +230,6 @@ extern void per_cpu_trap_init(void);
1543 */
1544 #define __ARCH_WANT_UNLOCKED_CTXSW
1545
1546-extern unsigned long arch_align_stack(unsigned long sp);
1547+#define arch_align_stack(x) ((x) & ~0xfUL)
1548
1549 #endif /* _ASM_SYSTEM_H */
1550diff -urNp linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c
1551--- linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c 2011-03-27 14:31:47.000000000 -0400
1552+++ linux-2.6.32.43/arch/mips/kernel/binfmt_elfn32.c 2011-04-17 15:56:45.000000000 -0400
1553@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1554 #undef ELF_ET_DYN_BASE
1555 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1556
1557+#ifdef CONFIG_PAX_ASLR
1558+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1559+
1560+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1561+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1562+#endif
1563+
1564 #include <asm/processor.h>
1565 #include <linux/module.h>
1566 #include <linux/elfcore.h>
1567diff -urNp linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c
1568--- linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c 2011-03-27 14:31:47.000000000 -0400
1569+++ linux-2.6.32.43/arch/mips/kernel/binfmt_elfo32.c 2011-04-17 15:56:45.000000000 -0400
1570@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
1571 #undef ELF_ET_DYN_BASE
1572 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
1573
1574+#ifdef CONFIG_PAX_ASLR
1575+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
1576+
1577+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1578+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
1579+#endif
1580+
1581 #include <asm/processor.h>
1582
1583 /*
1584diff -urNp linux-2.6.32.43/arch/mips/kernel/kgdb.c linux-2.6.32.43/arch/mips/kernel/kgdb.c
1585--- linux-2.6.32.43/arch/mips/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
1586+++ linux-2.6.32.43/arch/mips/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
1587@@ -245,6 +245,7 @@ int kgdb_arch_handle_exception(int vecto
1588 return -1;
1589 }
1590
1591+/* cannot be const */
1592 struct kgdb_arch arch_kgdb_ops;
1593
1594 /*
1595diff -urNp linux-2.6.32.43/arch/mips/kernel/process.c linux-2.6.32.43/arch/mips/kernel/process.c
1596--- linux-2.6.32.43/arch/mips/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
1597+++ linux-2.6.32.43/arch/mips/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
1598@@ -470,15 +470,3 @@ unsigned long get_wchan(struct task_stru
1599 out:
1600 return pc;
1601 }
1602-
1603-/*
1604- * Don't forget that the stack pointer must be aligned on a 8 bytes
1605- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
1606- */
1607-unsigned long arch_align_stack(unsigned long sp)
1608-{
1609- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
1610- sp -= get_random_int() & ~PAGE_MASK;
1611-
1612- return sp & ALMASK;
1613-}
1614diff -urNp linux-2.6.32.43/arch/mips/kernel/syscall.c linux-2.6.32.43/arch/mips/kernel/syscall.c
1615--- linux-2.6.32.43/arch/mips/kernel/syscall.c 2011-03-27 14:31:47.000000000 -0400
1616+++ linux-2.6.32.43/arch/mips/kernel/syscall.c 2011-04-17 15:56:45.000000000 -0400
1617@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str
1618 do_color_align = 0;
1619 if (filp || (flags & MAP_SHARED))
1620 do_color_align = 1;
1621+
1622+#ifdef CONFIG_PAX_RANDMMAP
1623+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
1624+#endif
1625+
1626 if (addr) {
1627 if (do_color_align)
1628 addr = COLOUR_ALIGN(addr, pgoff);
1629 else
1630 addr = PAGE_ALIGN(addr);
1631 vmm = find_vma(current->mm, addr);
1632- if (task_size - len >= addr &&
1633- (!vmm || addr + len <= vmm->vm_start))
1634+ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len))
1635 return addr;
1636 }
1637- addr = TASK_UNMAPPED_BASE;
1638+ addr = current->mm->mmap_base;
1639 if (do_color_align)
1640 addr = COLOUR_ALIGN(addr, pgoff);
1641 else
1642@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str
1643 /* At this point: (!vmm || addr < vmm->vm_end). */
1644 if (task_size - len < addr)
1645 return -ENOMEM;
1646- if (!vmm || addr + len <= vmm->vm_start)
1647+ if (check_heap_stack_gap(vmm, addr, len))
1648 return addr;
1649 addr = vmm->vm_end;
1650 if (do_color_align)
1651diff -urNp linux-2.6.32.43/arch/mips/mm/fault.c linux-2.6.32.43/arch/mips/mm/fault.c
1652--- linux-2.6.32.43/arch/mips/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1653+++ linux-2.6.32.43/arch/mips/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1654@@ -26,6 +26,23 @@
1655 #include <asm/ptrace.h>
1656 #include <asm/highmem.h> /* For VMALLOC_END */
1657
1658+#ifdef CONFIG_PAX_PAGEEXEC
1659+void pax_report_insns(void *pc, void *sp)
1660+{
1661+ unsigned long i;
1662+
1663+ printk(KERN_ERR "PAX: bytes at PC: ");
1664+ for (i = 0; i < 5; i++) {
1665+ unsigned int c;
1666+ if (get_user(c, (unsigned int *)pc+i))
1667+ printk(KERN_CONT "???????? ");
1668+ else
1669+ printk(KERN_CONT "%08x ", c);
1670+ }
1671+ printk("\n");
1672+}
1673+#endif
1674+
1675 /*
1676 * This routine handles page faults. It determines the address,
1677 * and the problem, and then passes it off to one of the appropriate
1678diff -urNp linux-2.6.32.43/arch/parisc/include/asm/elf.h linux-2.6.32.43/arch/parisc/include/asm/elf.h
1679--- linux-2.6.32.43/arch/parisc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
1680+++ linux-2.6.32.43/arch/parisc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
1681@@ -343,6 +343,13 @@ struct pt_regs; /* forward declaration..
1682
1683 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
1684
1685+#ifdef CONFIG_PAX_ASLR
1686+#define PAX_ELF_ET_DYN_BASE 0x10000UL
1687+
1688+#define PAX_DELTA_MMAP_LEN 16
1689+#define PAX_DELTA_STACK_LEN 16
1690+#endif
1691+
1692 /* This yields a mask that user programs can use to figure out what
1693 instruction set this CPU supports. This could be done in user space,
1694 but it's not easy, and we've already done it here. */
1695diff -urNp linux-2.6.32.43/arch/parisc/include/asm/pgtable.h linux-2.6.32.43/arch/parisc/include/asm/pgtable.h
1696--- linux-2.6.32.43/arch/parisc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
1697+++ linux-2.6.32.43/arch/parisc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
1698@@ -207,6 +207,17 @@
1699 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
1700 #define PAGE_COPY PAGE_EXECREAD
1701 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
1702+
1703+#ifdef CONFIG_PAX_PAGEEXEC
1704+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
1705+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1706+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
1707+#else
1708+# define PAGE_SHARED_NOEXEC PAGE_SHARED
1709+# define PAGE_COPY_NOEXEC PAGE_COPY
1710+# define PAGE_READONLY_NOEXEC PAGE_READONLY
1711+#endif
1712+
1713 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
1714 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
1715 #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
1716diff -urNp linux-2.6.32.43/arch/parisc/kernel/module.c linux-2.6.32.43/arch/parisc/kernel/module.c
1717--- linux-2.6.32.43/arch/parisc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
1718+++ linux-2.6.32.43/arch/parisc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
1719@@ -95,16 +95,38 @@
1720
1721 /* three functions to determine where in the module core
1722 * or init pieces the location is */
1723+static inline int in_init_rx(struct module *me, void *loc)
1724+{
1725+ return (loc >= me->module_init_rx &&
1726+ loc < (me->module_init_rx + me->init_size_rx));
1727+}
1728+
1729+static inline int in_init_rw(struct module *me, void *loc)
1730+{
1731+ return (loc >= me->module_init_rw &&
1732+ loc < (me->module_init_rw + me->init_size_rw));
1733+}
1734+
1735 static inline int in_init(struct module *me, void *loc)
1736 {
1737- return (loc >= me->module_init &&
1738- loc <= (me->module_init + me->init_size));
1739+ return in_init_rx(me, loc) || in_init_rw(me, loc);
1740+}
1741+
1742+static inline int in_core_rx(struct module *me, void *loc)
1743+{
1744+ return (loc >= me->module_core_rx &&
1745+ loc < (me->module_core_rx + me->core_size_rx));
1746+}
1747+
1748+static inline int in_core_rw(struct module *me, void *loc)
1749+{
1750+ return (loc >= me->module_core_rw &&
1751+ loc < (me->module_core_rw + me->core_size_rw));
1752 }
1753
1754 static inline int in_core(struct module *me, void *loc)
1755 {
1756- return (loc >= me->module_core &&
1757- loc <= (me->module_core + me->core_size));
1758+ return in_core_rx(me, loc) || in_core_rw(me, loc);
1759 }
1760
1761 static inline int in_local(struct module *me, void *loc)
1762@@ -364,13 +386,13 @@ int module_frob_arch_sections(CONST Elf_
1763 }
1764
1765 /* align things a bit */
1766- me->core_size = ALIGN(me->core_size, 16);
1767- me->arch.got_offset = me->core_size;
1768- me->core_size += gots * sizeof(struct got_entry);
1769-
1770- me->core_size = ALIGN(me->core_size, 16);
1771- me->arch.fdesc_offset = me->core_size;
1772- me->core_size += fdescs * sizeof(Elf_Fdesc);
1773+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1774+ me->arch.got_offset = me->core_size_rw;
1775+ me->core_size_rw += gots * sizeof(struct got_entry);
1776+
1777+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
1778+ me->arch.fdesc_offset = me->core_size_rw;
1779+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
1780
1781 me->arch.got_max = gots;
1782 me->arch.fdesc_max = fdescs;
1783@@ -388,7 +410,7 @@ static Elf64_Word get_got(struct module
1784
1785 BUG_ON(value == 0);
1786
1787- got = me->module_core + me->arch.got_offset;
1788+ got = me->module_core_rw + me->arch.got_offset;
1789 for (i = 0; got[i].addr; i++)
1790 if (got[i].addr == value)
1791 goto out;
1792@@ -406,7 +428,7 @@ static Elf64_Word get_got(struct module
1793 #ifdef CONFIG_64BIT
1794 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
1795 {
1796- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
1797+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
1798
1799 if (!value) {
1800 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
1801@@ -424,7 +446,7 @@ static Elf_Addr get_fdesc(struct module
1802
1803 /* Create new one */
1804 fdesc->addr = value;
1805- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1806+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1807 return (Elf_Addr)fdesc;
1808 }
1809 #endif /* CONFIG_64BIT */
1810@@ -848,7 +870,7 @@ register_unwind_table(struct module *me,
1811
1812 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
1813 end = table + sechdrs[me->arch.unwind_section].sh_size;
1814- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
1815+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
1816
1817 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
1818 me->arch.unwind_section, table, end, gp);
1819diff -urNp linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c
1820--- linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c 2011-03-27 14:31:47.000000000 -0400
1821+++ linux-2.6.32.43/arch/parisc/kernel/sys_parisc.c 2011-04-17 15:56:45.000000000 -0400
1822@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u
1823 /* At this point: (!vma || addr < vma->vm_end). */
1824 if (TASK_SIZE - len < addr)
1825 return -ENOMEM;
1826- if (!vma || addr + len <= vma->vm_start)
1827+ if (check_heap_stack_gap(vma, addr, len))
1828 return addr;
1829 addr = vma->vm_end;
1830 }
1831@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str
1832 /* At this point: (!vma || addr < vma->vm_end). */
1833 if (TASK_SIZE - len < addr)
1834 return -ENOMEM;
1835- if (!vma || addr + len <= vma->vm_start)
1836+ if (check_heap_stack_gap(vma, addr, len))
1837 return addr;
1838 addr = DCACHE_ALIGN(vma->vm_end - offset) + offset;
1839 if (addr < vma->vm_end) /* handle wraparound */
1840@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str
1841 if (flags & MAP_FIXED)
1842 return addr;
1843 if (!addr)
1844- addr = TASK_UNMAPPED_BASE;
1845+ addr = current->mm->mmap_base;
1846
1847 if (filp) {
1848 addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
1849diff -urNp linux-2.6.32.43/arch/parisc/kernel/traps.c linux-2.6.32.43/arch/parisc/kernel/traps.c
1850--- linux-2.6.32.43/arch/parisc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
1851+++ linux-2.6.32.43/arch/parisc/kernel/traps.c 2011-04-17 15:56:45.000000000 -0400
1852@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod
1853
1854 down_read(&current->mm->mmap_sem);
1855 vma = find_vma(current->mm,regs->iaoq[0]);
1856- if (vma && (regs->iaoq[0] >= vma->vm_start)
1857- && (vma->vm_flags & VM_EXEC)) {
1858-
1859+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
1860 fault_address = regs->iaoq[0];
1861 fault_space = regs->iasq[0];
1862
1863diff -urNp linux-2.6.32.43/arch/parisc/mm/fault.c linux-2.6.32.43/arch/parisc/mm/fault.c
1864--- linux-2.6.32.43/arch/parisc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
1865+++ linux-2.6.32.43/arch/parisc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
1866@@ -15,6 +15,7 @@
1867 #include <linux/sched.h>
1868 #include <linux/interrupt.h>
1869 #include <linux/module.h>
1870+#include <linux/unistd.h>
1871
1872 #include <asm/uaccess.h>
1873 #include <asm/traps.h>
1874@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex
1875 static unsigned long
1876 parisc_acctyp(unsigned long code, unsigned int inst)
1877 {
1878- if (code == 6 || code == 16)
1879+ if (code == 6 || code == 7 || code == 16)
1880 return VM_EXEC;
1881
1882 switch (inst & 0xf0000000) {
1883@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign
1884 }
1885 #endif
1886
1887+#ifdef CONFIG_PAX_PAGEEXEC
1888+/*
1889+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
1890+ *
1891+ * returns 1 when task should be killed
1892+ * 2 when rt_sigreturn trampoline was detected
1893+ * 3 when unpatched PLT trampoline was detected
1894+ */
1895+static int pax_handle_fetch_fault(struct pt_regs *regs)
1896+{
1897+
1898+#ifdef CONFIG_PAX_EMUPLT
1899+ int err;
1900+
1901+ do { /* PaX: unpatched PLT emulation */
1902+ unsigned int bl, depwi;
1903+
1904+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
1905+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
1906+
1907+ if (err)
1908+ break;
1909+
1910+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
1911+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
1912+
1913+ err = get_user(ldw, (unsigned int *)addr);
1914+ err |= get_user(bv, (unsigned int *)(addr+4));
1915+ err |= get_user(ldw2, (unsigned int *)(addr+8));
1916+
1917+ if (err)
1918+ break;
1919+
1920+ if (ldw == 0x0E801096U &&
1921+ bv == 0xEAC0C000U &&
1922+ ldw2 == 0x0E881095U)
1923+ {
1924+ unsigned int resolver, map;
1925+
1926+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
1927+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
1928+ if (err)
1929+ break;
1930+
1931+ regs->gr[20] = instruction_pointer(regs)+8;
1932+ regs->gr[21] = map;
1933+ regs->gr[22] = resolver;
1934+ regs->iaoq[0] = resolver | 3UL;
1935+ regs->iaoq[1] = regs->iaoq[0] + 4;
1936+ return 3;
1937+ }
1938+ }
1939+ } while (0);
1940+#endif
1941+
1942+#ifdef CONFIG_PAX_EMUTRAMP
1943+
1944+#ifndef CONFIG_PAX_EMUSIGRT
1945+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
1946+ return 1;
1947+#endif
1948+
1949+ do { /* PaX: rt_sigreturn emulation */
1950+ unsigned int ldi1, ldi2, bel, nop;
1951+
1952+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
1953+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
1954+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
1955+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
1956+
1957+ if (err)
1958+ break;
1959+
1960+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
1961+ ldi2 == 0x3414015AU &&
1962+ bel == 0xE4008200U &&
1963+ nop == 0x08000240U)
1964+ {
1965+ regs->gr[25] = (ldi1 & 2) >> 1;
1966+ regs->gr[20] = __NR_rt_sigreturn;
1967+ regs->gr[31] = regs->iaoq[1] + 16;
1968+ regs->sr[0] = regs->iasq[1];
1969+ regs->iaoq[0] = 0x100UL;
1970+ regs->iaoq[1] = regs->iaoq[0] + 4;
1971+ regs->iasq[0] = regs->sr[2];
1972+ regs->iasq[1] = regs->sr[2];
1973+ return 2;
1974+ }
1975+ } while (0);
1976+#endif
1977+
1978+ return 1;
1979+}
1980+
1981+void pax_report_insns(void *pc, void *sp)
1982+{
1983+ unsigned long i;
1984+
1985+ printk(KERN_ERR "PAX: bytes at PC: ");
1986+ for (i = 0; i < 5; i++) {
1987+ unsigned int c;
1988+ if (get_user(c, (unsigned int *)pc+i))
1989+ printk(KERN_CONT "???????? ");
1990+ else
1991+ printk(KERN_CONT "%08x ", c);
1992+ }
1993+ printk("\n");
1994+}
1995+#endif
1996+
1997 int fixup_exception(struct pt_regs *regs)
1998 {
1999 const struct exception_table_entry *fix;
2000@@ -192,8 +303,33 @@ good_area:
2001
2002 acc_type = parisc_acctyp(code,regs->iir);
2003
2004- if ((vma->vm_flags & acc_type) != acc_type)
2005+ if ((vma->vm_flags & acc_type) != acc_type) {
2006+
2007+#ifdef CONFIG_PAX_PAGEEXEC
2008+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
2009+ (address & ~3UL) == instruction_pointer(regs))
2010+ {
2011+ up_read(&mm->mmap_sem);
2012+ switch (pax_handle_fetch_fault(regs)) {
2013+
2014+#ifdef CONFIG_PAX_EMUPLT
2015+ case 3:
2016+ return;
2017+#endif
2018+
2019+#ifdef CONFIG_PAX_EMUTRAMP
2020+ case 2:
2021+ return;
2022+#endif
2023+
2024+ }
2025+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
2026+ do_group_exit(SIGKILL);
2027+ }
2028+#endif
2029+
2030 goto bad_area;
2031+ }
2032
2033 /*
2034 * If for any reason at all we couldn't handle the fault, make
2035diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/device.h linux-2.6.32.43/arch/powerpc/include/asm/device.h
2036--- linux-2.6.32.43/arch/powerpc/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
2037+++ linux-2.6.32.43/arch/powerpc/include/asm/device.h 2011-04-17 15:56:45.000000000 -0400
2038@@ -14,7 +14,7 @@ struct dev_archdata {
2039 struct device_node *of_node;
2040
2041 /* DMA operations on that device */
2042- struct dma_map_ops *dma_ops;
2043+ const struct dma_map_ops *dma_ops;
2044
2045 /*
2046 * When an iommu is in use, dma_data is used as a ptr to the base of the
2047diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h
2048--- linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
2049+++ linux-2.6.32.43/arch/powerpc/include/asm/dma-mapping.h 2011-04-17 15:56:45.000000000 -0400
2050@@ -69,9 +69,9 @@ static inline unsigned long device_to_ma
2051 #ifdef CONFIG_PPC64
2052 extern struct dma_map_ops dma_iommu_ops;
2053 #endif
2054-extern struct dma_map_ops dma_direct_ops;
2055+extern const struct dma_map_ops dma_direct_ops;
2056
2057-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
2058+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
2059 {
2060 /* We don't handle the NULL dev case for ISA for now. We could
2061 * do it via an out of line call but it is not needed for now. The
2062@@ -84,7 +84,7 @@ static inline struct dma_map_ops *get_dm
2063 return dev->archdata.dma_ops;
2064 }
2065
2066-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
2067+static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops)
2068 {
2069 dev->archdata.dma_ops = ops;
2070 }
2071@@ -118,7 +118,7 @@ static inline void set_dma_offset(struct
2072
2073 static inline int dma_supported(struct device *dev, u64 mask)
2074 {
2075- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2076+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2077
2078 if (unlikely(dma_ops == NULL))
2079 return 0;
2080@@ -132,7 +132,7 @@ static inline int dma_supported(struct d
2081
2082 static inline int dma_set_mask(struct device *dev, u64 dma_mask)
2083 {
2084- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2085+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2086
2087 if (unlikely(dma_ops == NULL))
2088 return -EIO;
2089@@ -147,7 +147,7 @@ static inline int dma_set_mask(struct de
2090 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
2091 dma_addr_t *dma_handle, gfp_t flag)
2092 {
2093- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2094+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2095 void *cpu_addr;
2096
2097 BUG_ON(!dma_ops);
2098@@ -162,7 +162,7 @@ static inline void *dma_alloc_coherent(s
2099 static inline void dma_free_coherent(struct device *dev, size_t size,
2100 void *cpu_addr, dma_addr_t dma_handle)
2101 {
2102- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2103+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2104
2105 BUG_ON(!dma_ops);
2106
2107@@ -173,7 +173,7 @@ static inline void dma_free_coherent(str
2108
2109 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
2110 {
2111- struct dma_map_ops *dma_ops = get_dma_ops(dev);
2112+ const struct dma_map_ops *dma_ops = get_dma_ops(dev);
2113
2114 if (dma_ops->mapping_error)
2115 return dma_ops->mapping_error(dev, dma_addr);
2116diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/elf.h linux-2.6.32.43/arch/powerpc/include/asm/elf.h
2117--- linux-2.6.32.43/arch/powerpc/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
2118+++ linux-2.6.32.43/arch/powerpc/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
2119@@ -179,8 +179,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E
2120 the loader. We need to make sure that it is out of the way of the program
2121 that it will "exec", and that there is sufficient room for the brk. */
2122
2123-extern unsigned long randomize_et_dyn(unsigned long base);
2124-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
2125+#define ELF_ET_DYN_BASE (0x20000000)
2126+
2127+#ifdef CONFIG_PAX_ASLR
2128+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
2129+
2130+#ifdef __powerpc64__
2131+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2132+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28)
2133+#else
2134+#define PAX_DELTA_MMAP_LEN 15
2135+#define PAX_DELTA_STACK_LEN 15
2136+#endif
2137+#endif
2138
2139 /*
2140 * Our registers are always unsigned longs, whether we're a 32 bit
2141@@ -275,9 +286,6 @@ extern int arch_setup_additional_pages(s
2142 (0x7ff >> (PAGE_SHIFT - 12)) : \
2143 (0x3ffff >> (PAGE_SHIFT - 12)))
2144
2145-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
2146-#define arch_randomize_brk arch_randomize_brk
2147-
2148 #endif /* __KERNEL__ */
2149
2150 /*
2151diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/iommu.h linux-2.6.32.43/arch/powerpc/include/asm/iommu.h
2152--- linux-2.6.32.43/arch/powerpc/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
2153+++ linux-2.6.32.43/arch/powerpc/include/asm/iommu.h 2011-04-17 15:56:45.000000000 -0400
2154@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi
2155 extern void iommu_init_early_dart(void);
2156 extern void iommu_init_early_pasemi(void);
2157
2158+/* dma-iommu.c */
2159+extern int dma_iommu_dma_supported(struct device *dev, u64 mask);
2160+
2161 #ifdef CONFIG_PCI
2162 extern void pci_iommu_init(void);
2163 extern void pci_direct_iommu_init(void);
2164diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h
2165--- linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
2166+++ linux-2.6.32.43/arch/powerpc/include/asm/kmap_types.h 2011-04-17 15:56:45.000000000 -0400
2167@@ -26,6 +26,7 @@ enum km_type {
2168 KM_SOFTIRQ1,
2169 KM_PPC_SYNC_PAGE,
2170 KM_PPC_SYNC_ICACHE,
2171+ KM_CLEARPAGE,
2172 KM_TYPE_NR
2173 };
2174
2175diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/page_64.h linux-2.6.32.43/arch/powerpc/include/asm/page_64.h
2176--- linux-2.6.32.43/arch/powerpc/include/asm/page_64.h 2011-03-27 14:31:47.000000000 -0400
2177+++ linux-2.6.32.43/arch/powerpc/include/asm/page_64.h 2011-04-17 15:56:45.000000000 -0400
2178@@ -180,15 +180,18 @@ do { \
2179 * stack by default, so in the absense of a PT_GNU_STACK program header
2180 * we turn execute permission off.
2181 */
2182-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2183- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2184+#define VM_STACK_DEFAULT_FLAGS32 \
2185+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2186+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2187
2188 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2189 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2190
2191+#ifndef CONFIG_PAX_PAGEEXEC
2192 #define VM_STACK_DEFAULT_FLAGS \
2193 (test_thread_flag(TIF_32BIT) ? \
2194 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
2195+#endif
2196
2197 #include <asm-generic/getorder.h>
2198
2199diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/page.h linux-2.6.32.43/arch/powerpc/include/asm/page.h
2200--- linux-2.6.32.43/arch/powerpc/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
2201+++ linux-2.6.32.43/arch/powerpc/include/asm/page.h 2011-04-17 15:56:45.000000000 -0400
2202@@ -116,8 +116,9 @@ extern phys_addr_t kernstart_addr;
2203 * and needs to be executable. This means the whole heap ends
2204 * up being executable.
2205 */
2206-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
2207- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2208+#define VM_DATA_DEFAULT_FLAGS32 \
2209+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
2210+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2211
2212 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
2213 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
2214@@ -145,6 +146,9 @@ extern phys_addr_t kernstart_addr;
2215 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
2216 #endif
2217
2218+#define ktla_ktva(addr) (addr)
2219+#define ktva_ktla(addr) (addr)
2220+
2221 #ifndef __ASSEMBLY__
2222
2223 #undef STRICT_MM_TYPECHECKS
2224diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pci.h linux-2.6.32.43/arch/powerpc/include/asm/pci.h
2225--- linux-2.6.32.43/arch/powerpc/include/asm/pci.h 2011-03-27 14:31:47.000000000 -0400
2226+++ linux-2.6.32.43/arch/powerpc/include/asm/pci.h 2011-04-17 15:56:45.000000000 -0400
2227@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq
2228 }
2229
2230 #ifdef CONFIG_PCI
2231-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops);
2232-extern struct dma_map_ops *get_pci_dma_ops(void);
2233+extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops);
2234+extern const struct dma_map_ops *get_pci_dma_ops(void);
2235 #else /* CONFIG_PCI */
2236 #define set_pci_dma_ops(d)
2237 #define get_pci_dma_ops() NULL
2238diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h
2239--- linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
2240+++ linux-2.6.32.43/arch/powerpc/include/asm/pgtable.h 2011-04-17 15:56:45.000000000 -0400
2241@@ -2,6 +2,7 @@
2242 #define _ASM_POWERPC_PGTABLE_H
2243 #ifdef __KERNEL__
2244
2245+#include <linux/const.h>
2246 #ifndef __ASSEMBLY__
2247 #include <asm/processor.h> /* For TASK_SIZE */
2248 #include <asm/mmu.h>
2249diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h
2250--- linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h 2011-03-27 14:31:47.000000000 -0400
2251+++ linux-2.6.32.43/arch/powerpc/include/asm/pte-hash32.h 2011-04-17 15:56:45.000000000 -0400
2252@@ -21,6 +21,7 @@
2253 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
2254 #define _PAGE_USER 0x004 /* usermode access allowed */
2255 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
2256+#define _PAGE_EXEC _PAGE_GUARDED
2257 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
2258 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
2259 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
2260diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/reg.h linux-2.6.32.43/arch/powerpc/include/asm/reg.h
2261--- linux-2.6.32.43/arch/powerpc/include/asm/reg.h 2011-03-27 14:31:47.000000000 -0400
2262+++ linux-2.6.32.43/arch/powerpc/include/asm/reg.h 2011-04-17 15:56:45.000000000 -0400
2263@@ -191,6 +191,7 @@
2264 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
2265 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
2266 #define DSISR_NOHPTE 0x40000000 /* no translation found */
2267+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
2268 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
2269 #define DSISR_ISSTORE 0x02000000 /* access was a store */
2270 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
2271diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h
2272--- linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h 2011-03-27 14:31:47.000000000 -0400
2273+++ linux-2.6.32.43/arch/powerpc/include/asm/swiotlb.h 2011-04-17 15:56:45.000000000 -0400
2274@@ -13,7 +13,7 @@
2275
2276 #include <linux/swiotlb.h>
2277
2278-extern struct dma_map_ops swiotlb_dma_ops;
2279+extern const struct dma_map_ops swiotlb_dma_ops;
2280
2281 static inline void dma_mark_clean(void *addr, size_t size) {}
2282
2283diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/system.h linux-2.6.32.43/arch/powerpc/include/asm/system.h
2284--- linux-2.6.32.43/arch/powerpc/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
2285+++ linux-2.6.32.43/arch/powerpc/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
2286@@ -531,7 +531,7 @@ __cmpxchg_local(volatile void *ptr, unsi
2287 #define cmpxchg64_local(ptr, o, n) __cmpxchg64_local_generic((ptr), (o), (n))
2288 #endif
2289
2290-extern unsigned long arch_align_stack(unsigned long sp);
2291+#define arch_align_stack(x) ((x) & ~0xfUL)
2292
2293 /* Used in very early kernel initialization. */
2294 extern unsigned long reloc_offset(void);
2295diff -urNp linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h
2296--- linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
2297+++ linux-2.6.32.43/arch/powerpc/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
2298@@ -13,6 +13,8 @@
2299 #define VERIFY_READ 0
2300 #define VERIFY_WRITE 1
2301
2302+extern void check_object_size(const void *ptr, unsigned long n, bool to);
2303+
2304 /*
2305 * The fs value determines whether argument validity checking should be
2306 * performed or not. If get_fs() == USER_DS, checking is performed, with
2307@@ -327,52 +329,6 @@ do { \
2308 extern unsigned long __copy_tofrom_user(void __user *to,
2309 const void __user *from, unsigned long size);
2310
2311-#ifndef __powerpc64__
2312-
2313-static inline unsigned long copy_from_user(void *to,
2314- const void __user *from, unsigned long n)
2315-{
2316- unsigned long over;
2317-
2318- if (access_ok(VERIFY_READ, from, n))
2319- return __copy_tofrom_user((__force void __user *)to, from, n);
2320- if ((unsigned long)from < TASK_SIZE) {
2321- over = (unsigned long)from + n - TASK_SIZE;
2322- return __copy_tofrom_user((__force void __user *)to, from,
2323- n - over) + over;
2324- }
2325- return n;
2326-}
2327-
2328-static inline unsigned long copy_to_user(void __user *to,
2329- const void *from, unsigned long n)
2330-{
2331- unsigned long over;
2332-
2333- if (access_ok(VERIFY_WRITE, to, n))
2334- return __copy_tofrom_user(to, (__force void __user *)from, n);
2335- if ((unsigned long)to < TASK_SIZE) {
2336- over = (unsigned long)to + n - TASK_SIZE;
2337- return __copy_tofrom_user(to, (__force void __user *)from,
2338- n - over) + over;
2339- }
2340- return n;
2341-}
2342-
2343-#else /* __powerpc64__ */
2344-
2345-#define __copy_in_user(to, from, size) \
2346- __copy_tofrom_user((to), (from), (size))
2347-
2348-extern unsigned long copy_from_user(void *to, const void __user *from,
2349- unsigned long n);
2350-extern unsigned long copy_to_user(void __user *to, const void *from,
2351- unsigned long n);
2352-extern unsigned long copy_in_user(void __user *to, const void __user *from,
2353- unsigned long n);
2354-
2355-#endif /* __powerpc64__ */
2356-
2357 static inline unsigned long __copy_from_user_inatomic(void *to,
2358 const void __user *from, unsigned long n)
2359 {
2360@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_
2361 if (ret == 0)
2362 return 0;
2363 }
2364+
2365+ if (!__builtin_constant_p(n))
2366+ check_object_size(to, n, false);
2367+
2368 return __copy_tofrom_user((__force void __user *)to, from, n);
2369 }
2370
2371@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us
2372 if (ret == 0)
2373 return 0;
2374 }
2375+
2376+ if (!__builtin_constant_p(n))
2377+ check_object_size(from, n, true);
2378+
2379 return __copy_tofrom_user(to, (__force const void __user *)from, n);
2380 }
2381
2382@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us
2383 return __copy_to_user_inatomic(to, from, size);
2384 }
2385
2386+#ifndef __powerpc64__
2387+
2388+static inline unsigned long __must_check copy_from_user(void *to,
2389+ const void __user *from, unsigned long n)
2390+{
2391+ unsigned long over;
2392+
2393+ if ((long)n < 0)
2394+ return n;
2395+
2396+ if (access_ok(VERIFY_READ, from, n)) {
2397+ if (!__builtin_constant_p(n))
2398+ check_object_size(to, n, false);
2399+ return __copy_tofrom_user((__force void __user *)to, from, n);
2400+ }
2401+ if ((unsigned long)from < TASK_SIZE) {
2402+ over = (unsigned long)from + n - TASK_SIZE;
2403+ if (!__builtin_constant_p(n - over))
2404+ check_object_size(to, n - over, false);
2405+ return __copy_tofrom_user((__force void __user *)to, from,
2406+ n - over) + over;
2407+ }
2408+ return n;
2409+}
2410+
2411+static inline unsigned long __must_check copy_to_user(void __user *to,
2412+ const void *from, unsigned long n)
2413+{
2414+ unsigned long over;
2415+
2416+ if ((long)n < 0)
2417+ return n;
2418+
2419+ if (access_ok(VERIFY_WRITE, to, n)) {
2420+ if (!__builtin_constant_p(n))
2421+ check_object_size(from, n, true);
2422+ return __copy_tofrom_user(to, (__force void __user *)from, n);
2423+ }
2424+ if ((unsigned long)to < TASK_SIZE) {
2425+ over = (unsigned long)to + n - TASK_SIZE;
2426+ if (!__builtin_constant_p(n))
2427+ check_object_size(from, n - over, true);
2428+ return __copy_tofrom_user(to, (__force void __user *)from,
2429+ n - over) + over;
2430+ }
2431+ return n;
2432+}
2433+
2434+#else /* __powerpc64__ */
2435+
2436+#define __copy_in_user(to, from, size) \
2437+ __copy_tofrom_user((to), (from), (size))
2438+
2439+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2440+{
2441+ if ((long)n < 0 || n > INT_MAX)
2442+ return n;
2443+
2444+ if (!__builtin_constant_p(n))
2445+ check_object_size(to, n, false);
2446+
2447+ if (likely(access_ok(VERIFY_READ, from, n)))
2448+ n = __copy_from_user(to, from, n);
2449+ else
2450+ memset(to, 0, n);
2451+ return n;
2452+}
2453+
2454+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2455+{
2456+ if ((long)n < 0 || n > INT_MAX)
2457+ return n;
2458+
2459+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
2460+ if (!__builtin_constant_p(n))
2461+ check_object_size(from, n, true);
2462+ n = __copy_to_user(to, from, n);
2463+ }
2464+ return n;
2465+}
2466+
2467+extern unsigned long copy_in_user(void __user *to, const void __user *from,
2468+ unsigned long n);
2469+
2470+#endif /* __powerpc64__ */
2471+
2472 extern unsigned long __clear_user(void __user *addr, unsigned long size);
2473
2474 static inline unsigned long clear_user(void __user *addr, unsigned long size)
2475diff -urNp linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c
2476--- linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
2477+++ linux-2.6.32.43/arch/powerpc/kernel/cacheinfo.c 2011-04-17 15:56:45.000000000 -0400
2478@@ -642,7 +642,7 @@ static struct kobj_attribute *cache_inde
2479 &cache_assoc_attr,
2480 };
2481
2482-static struct sysfs_ops cache_index_ops = {
2483+static const struct sysfs_ops cache_index_ops = {
2484 .show = cache_index_show,
2485 };
2486
2487diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma.c linux-2.6.32.43/arch/powerpc/kernel/dma.c
2488--- linux-2.6.32.43/arch/powerpc/kernel/dma.c 2011-03-27 14:31:47.000000000 -0400
2489+++ linux-2.6.32.43/arch/powerpc/kernel/dma.c 2011-04-17 15:56:45.000000000 -0400
2490@@ -134,7 +134,7 @@ static inline void dma_direct_sync_singl
2491 }
2492 #endif
2493
2494-struct dma_map_ops dma_direct_ops = {
2495+const struct dma_map_ops dma_direct_ops = {
2496 .alloc_coherent = dma_direct_alloc_coherent,
2497 .free_coherent = dma_direct_free_coherent,
2498 .map_sg = dma_direct_map_sg,
2499diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c
2500--- linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c 2011-03-27 14:31:47.000000000 -0400
2501+++ linux-2.6.32.43/arch/powerpc/kernel/dma-iommu.c 2011-04-17 15:56:45.000000000 -0400
2502@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de
2503 }
2504
2505 /* We support DMA to/from any memory page via the iommu */
2506-static int dma_iommu_dma_supported(struct device *dev, u64 mask)
2507+int dma_iommu_dma_supported(struct device *dev, u64 mask)
2508 {
2509 struct iommu_table *tbl = get_iommu_table_base(dev);
2510
2511diff -urNp linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c
2512--- linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
2513+++ linux-2.6.32.43/arch/powerpc/kernel/dma-swiotlb.c 2011-04-17 15:56:45.000000000 -0400
2514@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable;
2515 * map_page, and unmap_page on highmem, use normal dma_ops
2516 * for everything else.
2517 */
2518-struct dma_map_ops swiotlb_dma_ops = {
2519+const struct dma_map_ops swiotlb_dma_ops = {
2520 .alloc_coherent = dma_direct_alloc_coherent,
2521 .free_coherent = dma_direct_free_coherent,
2522 .map_sg = swiotlb_map_sg_attrs,
2523diff -urNp linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S
2524--- linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S 2011-03-27 14:31:47.000000000 -0400
2525+++ linux-2.6.32.43/arch/powerpc/kernel/exceptions-64e.S 2011-04-17 15:56:45.000000000 -0400
2526@@ -455,6 +455,7 @@ storage_fault_common:
2527 std r14,_DAR(r1)
2528 std r15,_DSISR(r1)
2529 addi r3,r1,STACK_FRAME_OVERHEAD
2530+ bl .save_nvgprs
2531 mr r4,r14
2532 mr r5,r15
2533 ld r14,PACA_EXGEN+EX_R14(r13)
2534@@ -464,8 +465,7 @@ storage_fault_common:
2535 cmpdi r3,0
2536 bne- 1f
2537 b .ret_from_except_lite
2538-1: bl .save_nvgprs
2539- mr r5,r3
2540+1: mr r5,r3
2541 addi r3,r1,STACK_FRAME_OVERHEAD
2542 ld r4,_DAR(r1)
2543 bl .bad_page_fault
2544diff -urNp linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S
2545--- linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S 2011-03-27 14:31:47.000000000 -0400
2546+++ linux-2.6.32.43/arch/powerpc/kernel/exceptions-64s.S 2011-04-17 15:56:45.000000000 -0400
2547@@ -818,10 +818,10 @@ handle_page_fault:
2548 11: ld r4,_DAR(r1)
2549 ld r5,_DSISR(r1)
2550 addi r3,r1,STACK_FRAME_OVERHEAD
2551+ bl .save_nvgprs
2552 bl .do_page_fault
2553 cmpdi r3,0
2554 beq+ 13f
2555- bl .save_nvgprs
2556 mr r5,r3
2557 addi r3,r1,STACK_FRAME_OVERHEAD
2558 lwz r4,_DAR(r1)
2559diff -urNp linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c
2560--- linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c 2011-03-27 14:31:47.000000000 -0400
2561+++ linux-2.6.32.43/arch/powerpc/kernel/ibmebus.c 2011-04-17 15:56:45.000000000 -0400
2562@@ -127,7 +127,7 @@ static int ibmebus_dma_supported(struct
2563 return 1;
2564 }
2565
2566-static struct dma_map_ops ibmebus_dma_ops = {
2567+static const struct dma_map_ops ibmebus_dma_ops = {
2568 .alloc_coherent = ibmebus_alloc_coherent,
2569 .free_coherent = ibmebus_free_coherent,
2570 .map_sg = ibmebus_map_sg,
2571diff -urNp linux-2.6.32.43/arch/powerpc/kernel/kgdb.c linux-2.6.32.43/arch/powerpc/kernel/kgdb.c
2572--- linux-2.6.32.43/arch/powerpc/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
2573+++ linux-2.6.32.43/arch/powerpc/kernel/kgdb.c 2011-04-17 15:56:45.000000000 -0400
2574@@ -126,7 +126,7 @@ static int kgdb_handle_breakpoint(struct
2575 if (kgdb_handle_exception(0, SIGTRAP, 0, regs) != 0)
2576 return 0;
2577
2578- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2579+ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
2580 regs->nip += 4;
2581
2582 return 1;
2583@@ -353,7 +353,7 @@ int kgdb_arch_handle_exception(int vecto
2584 /*
2585 * Global data
2586 */
2587-struct kgdb_arch arch_kgdb_ops = {
2588+const struct kgdb_arch arch_kgdb_ops = {
2589 .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08},
2590 };
2591
2592diff -urNp linux-2.6.32.43/arch/powerpc/kernel/module_32.c linux-2.6.32.43/arch/powerpc/kernel/module_32.c
2593--- linux-2.6.32.43/arch/powerpc/kernel/module_32.c 2011-03-27 14:31:47.000000000 -0400
2594+++ linux-2.6.32.43/arch/powerpc/kernel/module_32.c 2011-04-17 15:56:45.000000000 -0400
2595@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr
2596 me->arch.core_plt_section = i;
2597 }
2598 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
2599- printk("Module doesn't contain .plt or .init.plt sections.\n");
2600+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
2601 return -ENOEXEC;
2602 }
2603
2604@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati
2605
2606 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
2607 /* Init, or core PLT? */
2608- if (location >= mod->module_core
2609- && location < mod->module_core + mod->core_size)
2610+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
2611+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
2612 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
2613- else
2614+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
2615+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
2616 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
2617+ else {
2618+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
2619+ return ~0UL;
2620+ }
2621
2622 /* Find this entry, or if that fails, the next avail. entry */
2623 while (entry->jump[0]) {
2624diff -urNp linux-2.6.32.43/arch/powerpc/kernel/module.c linux-2.6.32.43/arch/powerpc/kernel/module.c
2625--- linux-2.6.32.43/arch/powerpc/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
2626+++ linux-2.6.32.43/arch/powerpc/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
2627@@ -31,11 +31,24 @@
2628
2629 LIST_HEAD(module_bug_list);
2630
2631+#ifdef CONFIG_PAX_KERNEXEC
2632 void *module_alloc(unsigned long size)
2633 {
2634 if (size == 0)
2635 return NULL;
2636
2637+ return vmalloc(size);
2638+}
2639+
2640+void *module_alloc_exec(unsigned long size)
2641+#else
2642+void *module_alloc(unsigned long size)
2643+#endif
2644+
2645+{
2646+ if (size == 0)
2647+ return NULL;
2648+
2649 return vmalloc_exec(size);
2650 }
2651
2652@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi
2653 vfree(module_region);
2654 }
2655
2656+#ifdef CONFIG_PAX_KERNEXEC
2657+void module_free_exec(struct module *mod, void *module_region)
2658+{
2659+ module_free(mod, module_region);
2660+}
2661+#endif
2662+
2663 static const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
2664 const Elf_Shdr *sechdrs,
2665 const char *name)
2666diff -urNp linux-2.6.32.43/arch/powerpc/kernel/pci-common.c linux-2.6.32.43/arch/powerpc/kernel/pci-common.c
2667--- linux-2.6.32.43/arch/powerpc/kernel/pci-common.c 2011-03-27 14:31:47.000000000 -0400
2668+++ linux-2.6.32.43/arch/powerpc/kernel/pci-common.c 2011-04-17 15:56:45.000000000 -0400
2669@@ -50,14 +50,14 @@ resource_size_t isa_mem_base;
2670 unsigned int ppc_pci_flags = 0;
2671
2672
2673-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2674+static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops;
2675
2676-void set_pci_dma_ops(struct dma_map_ops *dma_ops)
2677+void set_pci_dma_ops(const struct dma_map_ops *dma_ops)
2678 {
2679 pci_dma_ops = dma_ops;
2680 }
2681
2682-struct dma_map_ops *get_pci_dma_ops(void)
2683+const struct dma_map_ops *get_pci_dma_ops(void)
2684 {
2685 return pci_dma_ops;
2686 }
2687diff -urNp linux-2.6.32.43/arch/powerpc/kernel/process.c linux-2.6.32.43/arch/powerpc/kernel/process.c
2688--- linux-2.6.32.43/arch/powerpc/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
2689+++ linux-2.6.32.43/arch/powerpc/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
2690@@ -539,8 +539,8 @@ void show_regs(struct pt_regs * regs)
2691 * Lookup NIP late so we have the best change of getting the
2692 * above info out without failing
2693 */
2694- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
2695- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
2696+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
2697+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
2698 #endif
2699 show_stack(current, (unsigned long *) regs->gpr[1]);
2700 if (!user_mode(regs))
2701@@ -1034,10 +1034,10 @@ void show_stack(struct task_struct *tsk,
2702 newsp = stack[0];
2703 ip = stack[STACK_FRAME_LR_SAVE];
2704 if (!firstframe || ip != lr) {
2705- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
2706+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
2707 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2708 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
2709- printk(" (%pS)",
2710+ printk(" (%pA)",
2711 (void *)current->ret_stack[curr_frame].ret);
2712 curr_frame--;
2713 }
2714@@ -1057,7 +1057,7 @@ void show_stack(struct task_struct *tsk,
2715 struct pt_regs *regs = (struct pt_regs *)
2716 (sp + STACK_FRAME_OVERHEAD);
2717 lr = regs->link;
2718- printk("--- Exception: %lx at %pS\n LR = %pS\n",
2719+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
2720 regs->trap, (void *)regs->nip, (void *)lr);
2721 firstframe = 1;
2722 }
2723@@ -1134,58 +1134,3 @@ void thread_info_cache_init(void)
2724 }
2725
2726 #endif /* THREAD_SHIFT < PAGE_SHIFT */
2727-
2728-unsigned long arch_align_stack(unsigned long sp)
2729-{
2730- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
2731- sp -= get_random_int() & ~PAGE_MASK;
2732- return sp & ~0xf;
2733-}
2734-
2735-static inline unsigned long brk_rnd(void)
2736-{
2737- unsigned long rnd = 0;
2738-
2739- /* 8MB for 32bit, 1GB for 64bit */
2740- if (is_32bit_task())
2741- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
2742- else
2743- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
2744-
2745- return rnd << PAGE_SHIFT;
2746-}
2747-
2748-unsigned long arch_randomize_brk(struct mm_struct *mm)
2749-{
2750- unsigned long base = mm->brk;
2751- unsigned long ret;
2752-
2753-#ifdef CONFIG_PPC_STD_MMU_64
2754- /*
2755- * If we are using 1TB segments and we are allowed to randomise
2756- * the heap, we can put it above 1TB so it is backed by a 1TB
2757- * segment. Otherwise the heap will be in the bottom 1TB
2758- * which always uses 256MB segments and this may result in a
2759- * performance penalty.
2760- */
2761- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
2762- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
2763-#endif
2764-
2765- ret = PAGE_ALIGN(base + brk_rnd());
2766-
2767- if (ret < mm->brk)
2768- return mm->brk;
2769-
2770- return ret;
2771-}
2772-
2773-unsigned long randomize_et_dyn(unsigned long base)
2774-{
2775- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
2776-
2777- if (ret < base)
2778- return base;
2779-
2780- return ret;
2781-}
2782diff -urNp linux-2.6.32.43/arch/powerpc/kernel/signal_32.c linux-2.6.32.43/arch/powerpc/kernel/signal_32.c
2783--- linux-2.6.32.43/arch/powerpc/kernel/signal_32.c 2011-03-27 14:31:47.000000000 -0400
2784+++ linux-2.6.32.43/arch/powerpc/kernel/signal_32.c 2011-04-17 15:56:45.000000000 -0400
2785@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig
2786 /* Save user registers on the stack */
2787 frame = &rt_sf->uc.uc_mcontext;
2788 addr = frame;
2789- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
2790+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2791 if (save_user_regs(regs, frame, 0, 1))
2792 goto badframe;
2793 regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp;
2794diff -urNp linux-2.6.32.43/arch/powerpc/kernel/signal_64.c linux-2.6.32.43/arch/powerpc/kernel/signal_64.c
2795--- linux-2.6.32.43/arch/powerpc/kernel/signal_64.c 2011-03-27 14:31:47.000000000 -0400
2796+++ linux-2.6.32.43/arch/powerpc/kernel/signal_64.c 2011-04-17 15:56:45.000000000 -0400
2797@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct
2798 current->thread.fpscr.val = 0;
2799
2800 /* Set up to return from userspace. */
2801- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
2802+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
2803 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
2804 } else {
2805 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
2806diff -urNp linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c
2807--- linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c 2011-03-27 14:31:47.000000000 -0400
2808+++ linux-2.6.32.43/arch/powerpc/kernel/sys_ppc32.c 2011-04-17 15:56:45.000000000 -0400
2809@@ -563,10 +563,10 @@ asmlinkage long compat_sys_sysctl(struct
2810 if (oldlenp) {
2811 if (!error) {
2812 if (get_user(oldlen, oldlenp) ||
2813- put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)))
2814+ put_user(oldlen, (compat_size_t __user *)compat_ptr(tmp.oldlenp)) ||
2815+ copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused)))
2816 error = -EFAULT;
2817 }
2818- copy_to_user(args->__unused, tmp.__unused, sizeof(tmp.__unused));
2819 }
2820 return error;
2821 }
2822diff -urNp linux-2.6.32.43/arch/powerpc/kernel/traps.c linux-2.6.32.43/arch/powerpc/kernel/traps.c
2823--- linux-2.6.32.43/arch/powerpc/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
2824+++ linux-2.6.32.43/arch/powerpc/kernel/traps.c 2011-06-13 21:33:37.000000000 -0400
2825@@ -99,6 +99,8 @@ static void pmac_backlight_unblank(void)
2826 static inline void pmac_backlight_unblank(void) { }
2827 #endif
2828
2829+extern void gr_handle_kernel_exploit(void);
2830+
2831 int die(const char *str, struct pt_regs *regs, long err)
2832 {
2833 static struct {
2834@@ -168,6 +170,8 @@ int die(const char *str, struct pt_regs
2835 if (panic_on_oops)
2836 panic("Fatal exception");
2837
2838+ gr_handle_kernel_exploit();
2839+
2840 oops_exit();
2841 do_exit(err);
2842
2843diff -urNp linux-2.6.32.43/arch/powerpc/kernel/vdso.c linux-2.6.32.43/arch/powerpc/kernel/vdso.c
2844--- linux-2.6.32.43/arch/powerpc/kernel/vdso.c 2011-03-27 14:31:47.000000000 -0400
2845+++ linux-2.6.32.43/arch/powerpc/kernel/vdso.c 2011-04-17 15:56:45.000000000 -0400
2846@@ -36,6 +36,7 @@
2847 #include <asm/firmware.h>
2848 #include <asm/vdso.h>
2849 #include <asm/vdso_datapage.h>
2850+#include <asm/mman.h>
2851
2852 #include "setup.h"
2853
2854@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l
2855 vdso_base = VDSO32_MBASE;
2856 #endif
2857
2858- current->mm->context.vdso_base = 0;
2859+ current->mm->context.vdso_base = ~0UL;
2860
2861 /* vDSO has a problem and was disabled, just don't "enable" it for the
2862 * process
2863@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l
2864 vdso_base = get_unmapped_area(NULL, vdso_base,
2865 (vdso_pages << PAGE_SHIFT) +
2866 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
2867- 0, 0);
2868+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
2869 if (IS_ERR_VALUE(vdso_base)) {
2870 rc = vdso_base;
2871 goto fail_mmapsem;
2872diff -urNp linux-2.6.32.43/arch/powerpc/kernel/vio.c linux-2.6.32.43/arch/powerpc/kernel/vio.c
2873--- linux-2.6.32.43/arch/powerpc/kernel/vio.c 2011-03-27 14:31:47.000000000 -0400
2874+++ linux-2.6.32.43/arch/powerpc/kernel/vio.c 2011-04-17 15:56:45.000000000 -0400
2875@@ -601,11 +601,12 @@ static void vio_dma_iommu_unmap_sg(struc
2876 vio_cmo_dealloc(viodev, alloc_size);
2877 }
2878
2879-struct dma_map_ops vio_dma_mapping_ops = {
2880+static const struct dma_map_ops vio_dma_mapping_ops = {
2881 .alloc_coherent = vio_dma_iommu_alloc_coherent,
2882 .free_coherent = vio_dma_iommu_free_coherent,
2883 .map_sg = vio_dma_iommu_map_sg,
2884 .unmap_sg = vio_dma_iommu_unmap_sg,
2885+ .dma_supported = dma_iommu_dma_supported,
2886 .map_page = vio_dma_iommu_map_page,
2887 .unmap_page = vio_dma_iommu_unmap_page,
2888
2889@@ -857,7 +858,6 @@ static void vio_cmo_bus_remove(struct vi
2890
2891 static void vio_cmo_set_dma_ops(struct vio_dev *viodev)
2892 {
2893- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported;
2894 viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops;
2895 }
2896
2897diff -urNp linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c
2898--- linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
2899+++ linux-2.6.32.43/arch/powerpc/lib/usercopy_64.c 2011-04-17 15:56:45.000000000 -0400
2900@@ -9,22 +9,6 @@
2901 #include <linux/module.h>
2902 #include <asm/uaccess.h>
2903
2904-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
2905-{
2906- if (likely(access_ok(VERIFY_READ, from, n)))
2907- n = __copy_from_user(to, from, n);
2908- else
2909- memset(to, 0, n);
2910- return n;
2911-}
2912-
2913-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
2914-{
2915- if (likely(access_ok(VERIFY_WRITE, to, n)))
2916- n = __copy_to_user(to, from, n);
2917- return n;
2918-}
2919-
2920 unsigned long copy_in_user(void __user *to, const void __user *from,
2921 unsigned long n)
2922 {
2923@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *
2924 return n;
2925 }
2926
2927-EXPORT_SYMBOL(copy_from_user);
2928-EXPORT_SYMBOL(copy_to_user);
2929 EXPORT_SYMBOL(copy_in_user);
2930
2931diff -urNp linux-2.6.32.43/arch/powerpc/mm/fault.c linux-2.6.32.43/arch/powerpc/mm/fault.c
2932--- linux-2.6.32.43/arch/powerpc/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
2933+++ linux-2.6.32.43/arch/powerpc/mm/fault.c 2011-04-17 15:56:45.000000000 -0400
2934@@ -30,6 +30,10 @@
2935 #include <linux/kprobes.h>
2936 #include <linux/kdebug.h>
2937 #include <linux/perf_event.h>
2938+#include <linux/slab.h>
2939+#include <linux/pagemap.h>
2940+#include <linux/compiler.h>
2941+#include <linux/unistd.h>
2942
2943 #include <asm/firmware.h>
2944 #include <asm/page.h>
2945@@ -40,6 +44,7 @@
2946 #include <asm/uaccess.h>
2947 #include <asm/tlbflush.h>
2948 #include <asm/siginfo.h>
2949+#include <asm/ptrace.h>
2950
2951
2952 #ifdef CONFIG_KPROBES
2953@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru
2954 }
2955 #endif
2956
2957+#ifdef CONFIG_PAX_PAGEEXEC
2958+/*
2959+ * PaX: decide what to do with offenders (regs->nip = fault address)
2960+ *
2961+ * returns 1 when task should be killed
2962+ */
2963+static int pax_handle_fetch_fault(struct pt_regs *regs)
2964+{
2965+ return 1;
2966+}
2967+
2968+void pax_report_insns(void *pc, void *sp)
2969+{
2970+ unsigned long i;
2971+
2972+ printk(KERN_ERR "PAX: bytes at PC: ");
2973+ for (i = 0; i < 5; i++) {
2974+ unsigned int c;
2975+ if (get_user(c, (unsigned int __user *)pc+i))
2976+ printk(KERN_CONT "???????? ");
2977+ else
2978+ printk(KERN_CONT "%08x ", c);
2979+ }
2980+ printk("\n");
2981+}
2982+#endif
2983+
2984 /*
2985 * Check whether the instruction at regs->nip is a store using
2986 * an update addressing form which will update r1.
2987@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re
2988 * indicate errors in DSISR but can validly be set in SRR1.
2989 */
2990 if (trap == 0x400)
2991- error_code &= 0x48200000;
2992+ error_code &= 0x58200000;
2993 else
2994 is_write = error_code & DSISR_ISSTORE;
2995 #else
2996@@ -250,7 +282,7 @@ good_area:
2997 * "undefined". Of those that can be set, this is the only
2998 * one which seems bad.
2999 */
3000- if (error_code & 0x10000000)
3001+ if (error_code & DSISR_GUARDED)
3002 /* Guarded storage error. */
3003 goto bad_area;
3004 #endif /* CONFIG_8xx */
3005@@ -265,7 +297,7 @@ good_area:
3006 * processors use the same I/D cache coherency mechanism
3007 * as embedded.
3008 */
3009- if (error_code & DSISR_PROTFAULT)
3010+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
3011 goto bad_area;
3012 #endif /* CONFIG_PPC_STD_MMU */
3013
3014@@ -335,6 +367,23 @@ bad_area:
3015 bad_area_nosemaphore:
3016 /* User mode accesses cause a SIGSEGV */
3017 if (user_mode(regs)) {
3018+
3019+#ifdef CONFIG_PAX_PAGEEXEC
3020+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
3021+#ifdef CONFIG_PPC_STD_MMU
3022+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
3023+#else
3024+ if (is_exec && regs->nip == address) {
3025+#endif
3026+ switch (pax_handle_fetch_fault(regs)) {
3027+ }
3028+
3029+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
3030+ do_group_exit(SIGKILL);
3031+ }
3032+ }
3033+#endif
3034+
3035 _exception(SIGSEGV, regs, code, address);
3036 return 0;
3037 }
3038diff -urNp linux-2.6.32.43/arch/powerpc/mm/mmap_64.c linux-2.6.32.43/arch/powerpc/mm/mmap_64.c
3039--- linux-2.6.32.43/arch/powerpc/mm/mmap_64.c 2011-03-27 14:31:47.000000000 -0400
3040+++ linux-2.6.32.43/arch/powerpc/mm/mmap_64.c 2011-04-17 15:56:45.000000000 -0400
3041@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str
3042 */
3043 if (mmap_is_legacy()) {
3044 mm->mmap_base = TASK_UNMAPPED_BASE;
3045+
3046+#ifdef CONFIG_PAX_RANDMMAP
3047+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3048+ mm->mmap_base += mm->delta_mmap;
3049+#endif
3050+
3051 mm->get_unmapped_area = arch_get_unmapped_area;
3052 mm->unmap_area = arch_unmap_area;
3053 } else {
3054 mm->mmap_base = mmap_base();
3055+
3056+#ifdef CONFIG_PAX_RANDMMAP
3057+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3058+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3059+#endif
3060+
3061 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3062 mm->unmap_area = arch_unmap_area_topdown;
3063 }
3064diff -urNp linux-2.6.32.43/arch/powerpc/mm/slice.c linux-2.6.32.43/arch/powerpc/mm/slice.c
3065--- linux-2.6.32.43/arch/powerpc/mm/slice.c 2011-03-27 14:31:47.000000000 -0400
3066+++ linux-2.6.32.43/arch/powerpc/mm/slice.c 2011-04-17 15:56:45.000000000 -0400
3067@@ -98,7 +98,7 @@ static int slice_area_is_free(struct mm_
3068 if ((mm->task_size - len) < addr)
3069 return 0;
3070 vma = find_vma(mm, addr);
3071- return (!vma || (addr + len) <= vma->vm_start);
3072+ return check_heap_stack_gap(vma, addr, len);
3073 }
3074
3075 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
3076@@ -256,7 +256,7 @@ full_search:
3077 addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT);
3078 continue;
3079 }
3080- if (!vma || addr + len <= vma->vm_start) {
3081+ if (check_heap_stack_gap(vma, addr, len)) {
3082 /*
3083 * Remember the place where we stopped the search:
3084 */
3085@@ -313,10 +313,14 @@ static unsigned long slice_find_area_top
3086 }
3087 }
3088
3089- addr = mm->mmap_base;
3090- while (addr > len) {
3091+ if (mm->mmap_base < len)
3092+ addr = -ENOMEM;
3093+ else
3094+ addr = mm->mmap_base - len;
3095+
3096+ while (!IS_ERR_VALUE(addr)) {
3097 /* Go down by chunk size */
3098- addr = _ALIGN_DOWN(addr - len, 1ul << pshift);
3099+ addr = _ALIGN_DOWN(addr, 1ul << pshift);
3100
3101 /* Check for hit with different page size */
3102 mask = slice_range_to_mask(addr, len);
3103@@ -336,7 +340,7 @@ static unsigned long slice_find_area_top
3104 * return with success:
3105 */
3106 vma = find_vma(mm, addr);
3107- if (!vma || (addr + len) <= vma->vm_start) {
3108+ if (check_heap_stack_gap(vma, addr, len)) {
3109 /* remember the address as a hint for next time */
3110 if (use_cache)
3111 mm->free_area_cache = addr;
3112@@ -348,7 +352,7 @@ static unsigned long slice_find_area_top
3113 mm->cached_hole_size = vma->vm_start - addr;
3114
3115 /* try just below the current vma->vm_start */
3116- addr = vma->vm_start;
3117+ addr = skip_heap_stack_gap(vma, len);
3118 }
3119
3120 /*
3121@@ -426,6 +430,11 @@ unsigned long slice_get_unmapped_area(un
3122 if (fixed && addr > (mm->task_size - len))
3123 return -EINVAL;
3124
3125+#ifdef CONFIG_PAX_RANDMMAP
3126+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
3127+ addr = 0;
3128+#endif
3129+
3130 /* If hint, make sure it matches our alignment restrictions */
3131 if (!fixed && addr) {
3132 addr = _ALIGN_UP(addr, 1ul << pshift);
3133diff -urNp linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c
3134--- linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-03-27 14:31:47.000000000 -0400
3135+++ linux-2.6.32.43/arch/powerpc/platforms/52xx/lite5200_pm.c 2011-04-17 15:56:45.000000000 -0400
3136@@ -235,7 +235,7 @@ static void lite5200_pm_end(void)
3137 lite5200_pm_target_state = PM_SUSPEND_ON;
3138 }
3139
3140-static struct platform_suspend_ops lite5200_pm_ops = {
3141+static const struct platform_suspend_ops lite5200_pm_ops = {
3142 .valid = lite5200_pm_valid,
3143 .begin = lite5200_pm_begin,
3144 .prepare = lite5200_pm_prepare,
3145diff -urNp linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c
3146--- linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-03-27 14:31:47.000000000 -0400
3147+++ linux-2.6.32.43/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2011-04-17 15:56:45.000000000 -0400
3148@@ -180,7 +180,7 @@ void mpc52xx_pm_finish(void)
3149 iounmap(mbar);
3150 }
3151
3152-static struct platform_suspend_ops mpc52xx_pm_ops = {
3153+static const struct platform_suspend_ops mpc52xx_pm_ops = {
3154 .valid = mpc52xx_pm_valid,
3155 .prepare = mpc52xx_pm_prepare,
3156 .enter = mpc52xx_pm_enter,
3157diff -urNp linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c
3158--- linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c 2011-03-27 14:31:47.000000000 -0400
3159+++ linux-2.6.32.43/arch/powerpc/platforms/83xx/suspend.c 2011-04-17 15:56:45.000000000 -0400
3160@@ -273,7 +273,7 @@ static int mpc83xx_is_pci_agent(void)
3161 return ret;
3162 }
3163
3164-static struct platform_suspend_ops mpc83xx_suspend_ops = {
3165+static const struct platform_suspend_ops mpc83xx_suspend_ops = {
3166 .valid = mpc83xx_suspend_valid,
3167 .begin = mpc83xx_suspend_begin,
3168 .enter = mpc83xx_suspend_enter,
3169diff -urNp linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c
3170--- linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c 2011-03-27 14:31:47.000000000 -0400
3171+++ linux-2.6.32.43/arch/powerpc/platforms/cell/iommu.c 2011-04-17 15:56:45.000000000 -0400
3172@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc
3173
3174 static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask);
3175
3176-struct dma_map_ops dma_iommu_fixed_ops = {
3177+const struct dma_map_ops dma_iommu_fixed_ops = {
3178 .alloc_coherent = dma_fixed_alloc_coherent,
3179 .free_coherent = dma_fixed_free_coherent,
3180 .map_sg = dma_fixed_map_sg,
3181diff -urNp linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c
3182--- linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c 2011-03-27 14:31:47.000000000 -0400
3183+++ linux-2.6.32.43/arch/powerpc/platforms/ps3/system-bus.c 2011-04-17 15:56:45.000000000 -0400
3184@@ -694,7 +694,7 @@ static int ps3_dma_supported(struct devi
3185 return mask >= DMA_BIT_MASK(32);
3186 }
3187
3188-static struct dma_map_ops ps3_sb_dma_ops = {
3189+static const struct dma_map_ops ps3_sb_dma_ops = {
3190 .alloc_coherent = ps3_alloc_coherent,
3191 .free_coherent = ps3_free_coherent,
3192 .map_sg = ps3_sb_map_sg,
3193@@ -704,7 +704,7 @@ static struct dma_map_ops ps3_sb_dma_ops
3194 .unmap_page = ps3_unmap_page,
3195 };
3196
3197-static struct dma_map_ops ps3_ioc0_dma_ops = {
3198+static const struct dma_map_ops ps3_ioc0_dma_ops = {
3199 .alloc_coherent = ps3_alloc_coherent,
3200 .free_coherent = ps3_free_coherent,
3201 .map_sg = ps3_ioc0_map_sg,
3202diff -urNp linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig
3203--- linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig 2011-03-27 14:31:47.000000000 -0400
3204+++ linux-2.6.32.43/arch/powerpc/platforms/pseries/Kconfig 2011-04-17 15:56:45.000000000 -0400
3205@@ -2,6 +2,8 @@ config PPC_PSERIES
3206 depends on PPC64 && PPC_BOOK3S
3207 bool "IBM pSeries & new (POWER5-based) iSeries"
3208 select MPIC
3209+ select PCI_MSI
3210+ select XICS
3211 select PPC_I8259
3212 select PPC_RTAS
3213 select RTAS_ERROR_LOGGING
3214diff -urNp linux-2.6.32.43/arch/s390/include/asm/elf.h linux-2.6.32.43/arch/s390/include/asm/elf.h
3215--- linux-2.6.32.43/arch/s390/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
3216+++ linux-2.6.32.43/arch/s390/include/asm/elf.h 2011-04-17 15:56:45.000000000 -0400
3217@@ -164,6 +164,13 @@ extern unsigned int vdso_enabled;
3218 that it will "exec", and that there is sufficient room for the brk. */
3219 #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
3220
3221+#ifdef CONFIG_PAX_ASLR
3222+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
3223+
3224+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3225+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 )
3226+#endif
3227+
3228 /* This yields a mask that user programs can use to figure out what
3229 instruction set this CPU supports. */
3230
3231diff -urNp linux-2.6.32.43/arch/s390/include/asm/setup.h linux-2.6.32.43/arch/s390/include/asm/setup.h
3232--- linux-2.6.32.43/arch/s390/include/asm/setup.h 2011-03-27 14:31:47.000000000 -0400
3233+++ linux-2.6.32.43/arch/s390/include/asm/setup.h 2011-04-17 15:56:45.000000000 -0400
3234@@ -50,13 +50,13 @@ extern unsigned long memory_end;
3235 void detect_memory_layout(struct mem_chunk chunk[]);
3236
3237 #ifdef CONFIG_S390_SWITCH_AMODE
3238-extern unsigned int switch_amode;
3239+#define switch_amode (1)
3240 #else
3241 #define switch_amode (0)
3242 #endif
3243
3244 #ifdef CONFIG_S390_EXEC_PROTECT
3245-extern unsigned int s390_noexec;
3246+#define s390_noexec (1)
3247 #else
3248 #define s390_noexec (0)
3249 #endif
3250diff -urNp linux-2.6.32.43/arch/s390/include/asm/uaccess.h linux-2.6.32.43/arch/s390/include/asm/uaccess.h
3251--- linux-2.6.32.43/arch/s390/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
3252+++ linux-2.6.32.43/arch/s390/include/asm/uaccess.h 2011-04-17 15:56:45.000000000 -0400
3253@@ -232,6 +232,10 @@ static inline unsigned long __must_check
3254 copy_to_user(void __user *to, const void *from, unsigned long n)
3255 {
3256 might_fault();
3257+
3258+ if ((long)n < 0)
3259+ return n;
3260+
3261 if (access_ok(VERIFY_WRITE, to, n))
3262 n = __copy_to_user(to, from, n);
3263 return n;
3264@@ -257,6 +261,9 @@ copy_to_user(void __user *to, const void
3265 static inline unsigned long __must_check
3266 __copy_from_user(void *to, const void __user *from, unsigned long n)
3267 {
3268+ if ((long)n < 0)
3269+ return n;
3270+
3271 if (__builtin_constant_p(n) && (n <= 256))
3272 return uaccess.copy_from_user_small(n, from, to);
3273 else
3274@@ -283,6 +290,10 @@ static inline unsigned long __must_check
3275 copy_from_user(void *to, const void __user *from, unsigned long n)
3276 {
3277 might_fault();
3278+
3279+ if ((long)n < 0)
3280+ return n;
3281+
3282 if (access_ok(VERIFY_READ, from, n))
3283 n = __copy_from_user(to, from, n);
3284 else
3285diff -urNp linux-2.6.32.43/arch/s390/Kconfig linux-2.6.32.43/arch/s390/Kconfig
3286--- linux-2.6.32.43/arch/s390/Kconfig 2011-03-27 14:31:47.000000000 -0400
3287+++ linux-2.6.32.43/arch/s390/Kconfig 2011-04-17 15:56:45.000000000 -0400
3288@@ -194,28 +194,26 @@ config AUDIT_ARCH
3289
3290 config S390_SWITCH_AMODE
3291 bool "Switch kernel/user addressing modes"
3292+ default y
3293 help
3294 This option allows to switch the addressing modes of kernel and user
3295- space. The kernel parameter switch_amode=on will enable this feature,
3296- default is disabled. Enabling this (via kernel parameter) on machines
3297- earlier than IBM System z9-109 EC/BC will reduce system performance.
3298+ space. Enabling this on machines earlier than IBM System z9-109 EC/BC
3299+ will reduce system performance.
3300
3301 Note that this option will also be selected by selecting the execute
3302- protection option below. Enabling the execute protection via the
3303- noexec kernel parameter will also switch the addressing modes,
3304- independent of the switch_amode kernel parameter.
3305+ protection option below. Enabling the execute protection will also
3306+ switch the addressing modes, independent of this option.
3307
3308
3309 config S390_EXEC_PROTECT
3310 bool "Data execute protection"
3311+ default y
3312 select S390_SWITCH_AMODE
3313 help
3314 This option allows to enable a buffer overflow protection for user
3315 space programs and it also selects the addressing mode option above.
3316- The kernel parameter noexec=on will enable this feature and also
3317- switch the addressing modes, default is disabled. Enabling this (via
3318- kernel parameter) on machines earlier than IBM System z9-109 EC/BC
3319- will reduce system performance.
3320+ Enabling this on machines earlier than IBM System z9-109 EC/BC will
3321+ reduce system performance.
3322
3323 comment "Code generation options"
3324
3325diff -urNp linux-2.6.32.43/arch/s390/kernel/module.c linux-2.6.32.43/arch/s390/kernel/module.c
3326--- linux-2.6.32.43/arch/s390/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
3327+++ linux-2.6.32.43/arch/s390/kernel/module.c 2011-04-17 15:56:45.000000000 -0400
3328@@ -166,11 +166,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr,
3329
3330 /* Increase core size by size of got & plt and set start
3331 offsets for got and plt. */
3332- me->core_size = ALIGN(me->core_size, 4);
3333- me->arch.got_offset = me->core_size;
3334- me->core_size += me->arch.got_size;
3335- me->arch.plt_offset = me->core_size;
3336- me->core_size += me->arch.plt_size;
3337+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
3338+ me->arch.got_offset = me->core_size_rw;
3339+ me->core_size_rw += me->arch.got_size;
3340+ me->arch.plt_offset = me->core_size_rx;
3341+ me->core_size_rx += me->arch.plt_size;
3342 return 0;
3343 }
3344
3345@@ -256,7 +256,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3346 if (info->got_initialized == 0) {
3347 Elf_Addr *gotent;
3348
3349- gotent = me->module_core + me->arch.got_offset +
3350+ gotent = me->module_core_rw + me->arch.got_offset +
3351 info->got_offset;
3352 *gotent = val;
3353 info->got_initialized = 1;
3354@@ -280,7 +280,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3355 else if (r_type == R_390_GOTENT ||
3356 r_type == R_390_GOTPLTENT)
3357 *(unsigned int *) loc =
3358- (val + (Elf_Addr) me->module_core - loc) >> 1;
3359+ (val + (Elf_Addr) me->module_core_rw - loc) >> 1;
3360 else if (r_type == R_390_GOT64 ||
3361 r_type == R_390_GOTPLT64)
3362 *(unsigned long *) loc = val;
3363@@ -294,7 +294,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3364 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
3365 if (info->plt_initialized == 0) {
3366 unsigned int *ip;
3367- ip = me->module_core + me->arch.plt_offset +
3368+ ip = me->module_core_rx + me->arch.plt_offset +
3369 info->plt_offset;
3370 #ifndef CONFIG_64BIT
3371 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
3372@@ -319,7 +319,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3373 val - loc + 0xffffUL < 0x1ffffeUL) ||
3374 (r_type == R_390_PLT32DBL &&
3375 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
3376- val = (Elf_Addr) me->module_core +
3377+ val = (Elf_Addr) me->module_core_rx +
3378 me->arch.plt_offset +
3379 info->plt_offset;
3380 val += rela->r_addend - loc;
3381@@ -341,7 +341,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3382 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
3383 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
3384 val = val + rela->r_addend -
3385- ((Elf_Addr) me->module_core + me->arch.got_offset);
3386+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
3387 if (r_type == R_390_GOTOFF16)
3388 *(unsigned short *) loc = val;
3389 else if (r_type == R_390_GOTOFF32)
3390@@ -351,7 +351,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base
3391 break;
3392 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
3393 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
3394- val = (Elf_Addr) me->module_core + me->arch.got_offset +
3395+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
3396 rela->r_addend - loc;
3397 if (r_type == R_390_GOTPC)
3398 *(unsigned int *) loc = val;
3399diff -urNp linux-2.6.32.43/arch/s390/kernel/setup.c linux-2.6.32.43/arch/s390/kernel/setup.c
3400--- linux-2.6.32.43/arch/s390/kernel/setup.c 2011-03-27 14:31:47.000000000 -0400
3401+++ linux-2.6.32.43/arch/s390/kernel/setup.c 2011-04-17 15:56:45.000000000 -0400
3402@@ -306,9 +306,6 @@ static int __init early_parse_mem(char *
3403 early_param("mem", early_parse_mem);
3404
3405 #ifdef CONFIG_S390_SWITCH_AMODE
3406-unsigned int switch_amode = 0;
3407-EXPORT_SYMBOL_GPL(switch_amode);
3408-
3409 static int set_amode_and_uaccess(unsigned long user_amode,
3410 unsigned long user32_amode)
3411 {
3412@@ -334,17 +331,6 @@ static int set_amode_and_uaccess(unsigne
3413 return 0;
3414 }
3415 }
3416-
3417-/*
3418- * Switch kernel/user addressing modes?
3419- */
3420-static int __init early_parse_switch_amode(char *p)
3421-{
3422- switch_amode = 1;
3423- return 0;
3424-}
3425-early_param("switch_amode", early_parse_switch_amode);
3426-
3427 #else /* CONFIG_S390_SWITCH_AMODE */
3428 static inline int set_amode_and_uaccess(unsigned long user_amode,
3429 unsigned long user32_amode)
3430@@ -353,24 +339,6 @@ static inline int set_amode_and_uaccess(
3431 }
3432 #endif /* CONFIG_S390_SWITCH_AMODE */
3433
3434-#ifdef CONFIG_S390_EXEC_PROTECT
3435-unsigned int s390_noexec = 0;
3436-EXPORT_SYMBOL_GPL(s390_noexec);
3437-
3438-/*
3439- * Enable execute protection?
3440- */
3441-static int __init early_parse_noexec(char *p)
3442-{
3443- if (!strncmp(p, "off", 3))
3444- return 0;
3445- switch_amode = 1;
3446- s390_noexec = 1;
3447- return 0;
3448-}
3449-early_param("noexec", early_parse_noexec);
3450-#endif /* CONFIG_S390_EXEC_PROTECT */
3451-
3452 static void setup_addressing_mode(void)
3453 {
3454 if (s390_noexec) {
3455diff -urNp linux-2.6.32.43/arch/s390/mm/mmap.c linux-2.6.32.43/arch/s390/mm/mmap.c
3456--- linux-2.6.32.43/arch/s390/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3457+++ linux-2.6.32.43/arch/s390/mm/mmap.c 2011-04-17 15:56:45.000000000 -0400
3458@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str
3459 */
3460 if (mmap_is_legacy()) {
3461 mm->mmap_base = TASK_UNMAPPED_BASE;
3462+
3463+#ifdef CONFIG_PAX_RANDMMAP
3464+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3465+ mm->mmap_base += mm->delta_mmap;
3466+#endif
3467+
3468 mm->get_unmapped_area = arch_get_unmapped_area;
3469 mm->unmap_area = arch_unmap_area;
3470 } else {
3471 mm->mmap_base = mmap_base();
3472+
3473+#ifdef CONFIG_PAX_RANDMMAP
3474+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3475+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3476+#endif
3477+
3478 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3479 mm->unmap_area = arch_unmap_area_topdown;
3480 }
3481@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str
3482 */
3483 if (mmap_is_legacy()) {
3484 mm->mmap_base = TASK_UNMAPPED_BASE;
3485+
3486+#ifdef CONFIG_PAX_RANDMMAP
3487+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3488+ mm->mmap_base += mm->delta_mmap;
3489+#endif
3490+
3491 mm->get_unmapped_area = s390_get_unmapped_area;
3492 mm->unmap_area = arch_unmap_area;
3493 } else {
3494 mm->mmap_base = mmap_base();
3495+
3496+#ifdef CONFIG_PAX_RANDMMAP
3497+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3498+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3499+#endif
3500+
3501 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
3502 mm->unmap_area = arch_unmap_area_topdown;
3503 }
3504diff -urNp linux-2.6.32.43/arch/score/include/asm/system.h linux-2.6.32.43/arch/score/include/asm/system.h
3505--- linux-2.6.32.43/arch/score/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
3506+++ linux-2.6.32.43/arch/score/include/asm/system.h 2011-04-17 15:56:45.000000000 -0400
3507@@ -17,7 +17,7 @@ do { \
3508 #define finish_arch_switch(prev) do {} while (0)
3509
3510 typedef void (*vi_handler_t)(void);
3511-extern unsigned long arch_align_stack(unsigned long sp);
3512+#define arch_align_stack(x) (x)
3513
3514 #define mb() barrier()
3515 #define rmb() barrier()
3516diff -urNp linux-2.6.32.43/arch/score/kernel/process.c linux-2.6.32.43/arch/score/kernel/process.c
3517--- linux-2.6.32.43/arch/score/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
3518+++ linux-2.6.32.43/arch/score/kernel/process.c 2011-04-17 15:56:45.000000000 -0400
3519@@ -161,8 +161,3 @@ unsigned long get_wchan(struct task_stru
3520
3521 return task_pt_regs(task)->cp0_epc;
3522 }
3523-
3524-unsigned long arch_align_stack(unsigned long sp)
3525-{
3526- return sp;
3527-}
3528diff -urNp linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c
3529--- linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c 2011-03-27 14:31:47.000000000 -0400
3530+++ linux-2.6.32.43/arch/sh/boards/mach-hp6xx/pm.c 2011-04-17 15:56:45.000000000 -0400
3531@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_
3532 return 0;
3533 }
3534
3535-static struct platform_suspend_ops hp6x0_pm_ops = {
3536+static const struct platform_suspend_ops hp6x0_pm_ops = {
3537 .enter = hp6x0_pm_enter,
3538 .valid = suspend_valid_only_mem,
3539 };
3540diff -urNp linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c
3541--- linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c 2011-03-27 14:31:47.000000000 -0400
3542+++ linux-2.6.32.43/arch/sh/kernel/cpu/sh4/sq.c 2011-04-17 15:56:46.000000000 -0400
3543@@ -327,7 +327,7 @@ static struct attribute *sq_sysfs_attrs[
3544 NULL,
3545 };
3546
3547-static struct sysfs_ops sq_sysfs_ops = {
3548+static const struct sysfs_ops sq_sysfs_ops = {
3549 .show = sq_sysfs_show,
3550 .store = sq_sysfs_store,
3551 };
3552diff -urNp linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c
3553--- linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c 2011-03-27 14:31:47.000000000 -0400
3554+++ linux-2.6.32.43/arch/sh/kernel/cpu/shmobile/pm.c 2011-04-17 15:56:46.000000000 -0400
3555@@ -58,7 +58,7 @@ static int sh_pm_enter(suspend_state_t s
3556 return 0;
3557 }
3558
3559-static struct platform_suspend_ops sh_pm_ops = {
3560+static const struct platform_suspend_ops sh_pm_ops = {
3561 .enter = sh_pm_enter,
3562 .valid = suspend_valid_only_mem,
3563 };
3564diff -urNp linux-2.6.32.43/arch/sh/kernel/kgdb.c linux-2.6.32.43/arch/sh/kernel/kgdb.c
3565--- linux-2.6.32.43/arch/sh/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
3566+++ linux-2.6.32.43/arch/sh/kernel/kgdb.c 2011-04-17 15:56:46.000000000 -0400
3567@@ -271,7 +271,7 @@ void kgdb_arch_exit(void)
3568 {
3569 }
3570
3571-struct kgdb_arch arch_kgdb_ops = {
3572+const struct kgdb_arch arch_kgdb_ops = {
3573 /* Breakpoint instruction: trapa #0x3c */
3574 #ifdef CONFIG_CPU_LITTLE_ENDIAN
3575 .gdb_bpt_instr = { 0x3c, 0xc3 },
3576diff -urNp linux-2.6.32.43/arch/sh/mm/mmap.c linux-2.6.32.43/arch/sh/mm/mmap.c
3577--- linux-2.6.32.43/arch/sh/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
3578+++ linux-2.6.32.43/arch/sh/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
3579@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str
3580 addr = PAGE_ALIGN(addr);
3581
3582 vma = find_vma(mm, addr);
3583- if (TASK_SIZE - len >= addr &&
3584- (!vma || addr + len <= vma->vm_start))
3585+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3586 return addr;
3587 }
3588
3589@@ -106,7 +105,7 @@ full_search:
3590 }
3591 return -ENOMEM;
3592 }
3593- if (likely(!vma || addr + len <= vma->vm_start)) {
3594+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3595 /*
3596 * Remember the place where we stopped the search:
3597 */
3598@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi
3599 addr = PAGE_ALIGN(addr);
3600
3601 vma = find_vma(mm, addr);
3602- if (TASK_SIZE - len >= addr &&
3603- (!vma || addr + len <= vma->vm_start))
3604+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len))
3605 return addr;
3606 }
3607
3608@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi
3609 /* make sure it can fit in the remaining address space */
3610 if (likely(addr > len)) {
3611 vma = find_vma(mm, addr-len);
3612- if (!vma || addr <= vma->vm_start) {
3613+ if (check_heap_stack_gap(vma, addr - len, len)) {
3614 /* remember the address as a hint for next time */
3615 return (mm->free_area_cache = addr-len);
3616 }
3617@@ -188,18 +186,18 @@ arch_get_unmapped_area_topdown(struct fi
3618 if (unlikely(mm->mmap_base < len))
3619 goto bottomup;
3620
3621- addr = mm->mmap_base-len;
3622- if (do_colour_align)
3623- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3624+ addr = mm->mmap_base - len;
3625
3626 do {
3627+ if (do_colour_align)
3628+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3629 /*
3630 * Lookup failure means no vma is above this address,
3631 * else if new region fits below vma->vm_start,
3632 * return with success:
3633 */
3634 vma = find_vma(mm, addr);
3635- if (likely(!vma || addr+len <= vma->vm_start)) {
3636+ if (likely(check_heap_stack_gap(vma, addr, len))) {
3637 /* remember the address as a hint for next time */
3638 return (mm->free_area_cache = addr);
3639 }
3640@@ -209,10 +207,8 @@ arch_get_unmapped_area_topdown(struct fi
3641 mm->cached_hole_size = vma->vm_start - addr;
3642
3643 /* try just below the current vma->vm_start */
3644- addr = vma->vm_start-len;
3645- if (do_colour_align)
3646- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
3647- } while (likely(len < vma->vm_start));
3648+ addr = skip_heap_stack_gap(vma, len);
3649+ } while (!IS_ERR_VALUE(addr));
3650
3651 bottomup:
3652 /*
3653diff -urNp linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h
3654--- linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
3655+++ linux-2.6.32.43/arch/sparc/include/asm/atomic_64.h 2011-07-13 22:22:56.000000000 -0400
3656@@ -14,18 +14,40 @@
3657 #define ATOMIC64_INIT(i) { (i) }
3658
3659 #define atomic_read(v) ((v)->counter)
3660+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
3661+{
3662+ return v->counter;
3663+}
3664 #define atomic64_read(v) ((v)->counter)
3665+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
3666+{
3667+ return v->counter;
3668+}
3669
3670 #define atomic_set(v, i) (((v)->counter) = i)
3671+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
3672+{
3673+ v->counter = i;
3674+}
3675 #define atomic64_set(v, i) (((v)->counter) = i)
3676+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
3677+{
3678+ v->counter = i;
3679+}
3680
3681 extern void atomic_add(int, atomic_t *);
3682+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
3683 extern void atomic64_add(long, atomic64_t *);
3684+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
3685 extern void atomic_sub(int, atomic_t *);
3686+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
3687 extern void atomic64_sub(long, atomic64_t *);
3688+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
3689
3690 extern int atomic_add_ret(int, atomic_t *);
3691+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
3692 extern long atomic64_add_ret(long, atomic64_t *);
3693+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
3694 extern int atomic_sub_ret(int, atomic_t *);
3695 extern long atomic64_sub_ret(long, atomic64_t *);
3696
3697@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomi
3698 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
3699
3700 #define atomic_inc_return(v) atomic_add_ret(1, v)
3701+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
3702+{
3703+ return atomic_add_ret_unchecked(1, v);
3704+}
3705 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
3706+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
3707+{
3708+ return atomic64_add_ret_unchecked(1, v);
3709+}
3710
3711 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
3712 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
3713
3714 #define atomic_add_return(i, v) atomic_add_ret(i, v)
3715+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
3716+{
3717+ return atomic_add_ret_unchecked(i, v);
3718+}
3719 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
3720+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
3721+{
3722+ return atomic64_add_ret_unchecked(i, v);
3723+}
3724
3725 /*
3726 * atomic_inc_and_test - increment and test
3727@@ -50,6 +88,7 @@ extern long atomic64_sub_ret(long, atomi
3728 * other cases.
3729 */
3730 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
3731+#define atomic_inc_and_test_unchecked(v) (atomic_inc_return_unchecked(v) == 0)
3732 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
3733
3734 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
3735@@ -59,30 +98,59 @@ extern long atomic64_sub_ret(long, atomi
3736 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
3737
3738 #define atomic_inc(v) atomic_add(1, v)
3739+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
3740+{
3741+ atomic_add_unchecked(1, v);
3742+}
3743 #define atomic64_inc(v) atomic64_add(1, v)
3744+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
3745+{
3746+ atomic64_add_unchecked(1, v);
3747+}
3748
3749 #define atomic_dec(v) atomic_sub(1, v)
3750+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
3751+{
3752+ atomic_sub_unchecked(1, v);
3753+}
3754 #define atomic64_dec(v) atomic64_sub(1, v)
3755+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
3756+{
3757+ atomic64_sub_unchecked(1, v);
3758+}
3759
3760 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
3761 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
3762
3763 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3764+#define atomic_cmpxchg_unchecked(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
3765 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
3766+#define atomic_xchg_unchecked(v, new) (xchg(&((v)->counter), new))
3767
3768 static inline int atomic_add_unless(atomic_t *v, int a, int u)
3769 {
3770- int c, old;
3771+ int c, old, new;
3772 c = atomic_read(v);
3773 for (;;) {
3774- if (unlikely(c == (u)))
3775+ if (unlikely(c == u))
3776 break;
3777- old = atomic_cmpxchg((v), c, c + (a));
3778+
3779+ asm volatile("addcc %2, %0, %0\n"
3780+
3781+#ifdef CONFIG_PAX_REFCOUNT
3782+ "tvs %%icc, 6\n"
3783+#endif
3784+
3785+ : "=r" (new)
3786+ : "0" (c), "ir" (a)
3787+ : "cc");
3788+
3789+ old = atomic_cmpxchg(v, c, new);
3790 if (likely(old == c))
3791 break;
3792 c = old;
3793 }
3794- return c != (u);
3795+ return c != u;
3796 }
3797
3798 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
3799@@ -93,17 +161,28 @@ static inline int atomic_add_unless(atom
3800
3801 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
3802 {
3803- long c, old;
3804+ long c, old, new;
3805 c = atomic64_read(v);
3806 for (;;) {
3807- if (unlikely(c == (u)))
3808+ if (unlikely(c == u))
3809 break;
3810- old = atomic64_cmpxchg((v), c, c + (a));
3811+
3812+ asm volatile("addcc %2, %0, %0\n"
3813+
3814+#ifdef CONFIG_PAX_REFCOUNT
3815+ "tvs %%xcc, 6\n"
3816+#endif
3817+
3818+ : "=r" (new)
3819+ : "0" (c), "ir" (a)
3820+ : "cc");
3821+
3822+ old = atomic64_cmpxchg(v, c, new);
3823 if (likely(old == c))
3824 break;
3825 c = old;
3826 }
3827- return c != (u);
3828+ return c != u;
3829 }
3830
3831 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
3832diff -urNp linux-2.6.32.43/arch/sparc/include/asm/cache.h linux-2.6.32.43/arch/sparc/include/asm/cache.h
3833--- linux-2.6.32.43/arch/sparc/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
3834+++ linux-2.6.32.43/arch/sparc/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
3835@@ -8,7 +8,7 @@
3836 #define _SPARC_CACHE_H
3837
3838 #define L1_CACHE_SHIFT 5
3839-#define L1_CACHE_BYTES 32
3840+#define L1_CACHE_BYTES 32UL
3841 #define L1_CACHE_ALIGN(x) ((((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)))
3842
3843 #ifdef CONFIG_SPARC32
3844diff -urNp linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h
3845--- linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
3846+++ linux-2.6.32.43/arch/sparc/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
3847@@ -14,10 +14,10 @@ extern int dma_set_mask(struct device *d
3848 #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
3849 #define dma_is_consistent(d, h) (1)
3850
3851-extern struct dma_map_ops *dma_ops, pci32_dma_ops;
3852+extern const struct dma_map_ops *dma_ops, pci32_dma_ops;
3853 extern struct bus_type pci_bus_type;
3854
3855-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
3856+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
3857 {
3858 #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
3859 if (dev->bus == &pci_bus_type)
3860@@ -31,7 +31,7 @@ static inline struct dma_map_ops *get_dm
3861 static inline void *dma_alloc_coherent(struct device *dev, size_t size,
3862 dma_addr_t *dma_handle, gfp_t flag)
3863 {
3864- struct dma_map_ops *ops = get_dma_ops(dev);
3865+ const struct dma_map_ops *ops = get_dma_ops(dev);
3866 void *cpu_addr;
3867
3868 cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag);
3869@@ -42,7 +42,7 @@ static inline void *dma_alloc_coherent(s
3870 static inline void dma_free_coherent(struct device *dev, size_t size,
3871 void *cpu_addr, dma_addr_t dma_handle)
3872 {
3873- struct dma_map_ops *ops = get_dma_ops(dev);
3874+ const struct dma_map_ops *ops = get_dma_ops(dev);
3875
3876 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
3877 ops->free_coherent(dev, size, cpu_addr, dma_handle);
3878diff -urNp linux-2.6.32.43/arch/sparc/include/asm/elf_32.h linux-2.6.32.43/arch/sparc/include/asm/elf_32.h
3879--- linux-2.6.32.43/arch/sparc/include/asm/elf_32.h 2011-03-27 14:31:47.000000000 -0400
3880+++ linux-2.6.32.43/arch/sparc/include/asm/elf_32.h 2011-04-17 15:56:46.000000000 -0400
3881@@ -116,6 +116,13 @@ typedef struct {
3882
3883 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
3884
3885+#ifdef CONFIG_PAX_ASLR
3886+#define PAX_ELF_ET_DYN_BASE 0x10000UL
3887+
3888+#define PAX_DELTA_MMAP_LEN 16
3889+#define PAX_DELTA_STACK_LEN 16
3890+#endif
3891+
3892 /* This yields a mask that user programs can use to figure out what
3893 instruction set this cpu supports. This can NOT be done in userspace
3894 on Sparc. */
3895diff -urNp linux-2.6.32.43/arch/sparc/include/asm/elf_64.h linux-2.6.32.43/arch/sparc/include/asm/elf_64.h
3896--- linux-2.6.32.43/arch/sparc/include/asm/elf_64.h 2011-03-27 14:31:47.000000000 -0400
3897+++ linux-2.6.32.43/arch/sparc/include/asm/elf_64.h 2011-04-17 15:56:46.000000000 -0400
3898@@ -163,6 +163,12 @@ typedef struct {
3899 #define ELF_ET_DYN_BASE 0x0000010000000000UL
3900 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
3901
3902+#ifdef CONFIG_PAX_ASLR
3903+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
3904+
3905+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
3906+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
3907+#endif
3908
3909 /* This yields a mask that user programs can use to figure out what
3910 instruction set this cpu supports. */
3911diff -urNp linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h
3912--- linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
3913+++ linux-2.6.32.43/arch/sparc/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
3914@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd)
3915 BTFIXUPDEF_INT(page_none)
3916 BTFIXUPDEF_INT(page_copy)
3917 BTFIXUPDEF_INT(page_readonly)
3918+
3919+#ifdef CONFIG_PAX_PAGEEXEC
3920+BTFIXUPDEF_INT(page_shared_noexec)
3921+BTFIXUPDEF_INT(page_copy_noexec)
3922+BTFIXUPDEF_INT(page_readonly_noexec)
3923+#endif
3924+
3925 BTFIXUPDEF_INT(page_kernel)
3926
3927 #define PMD_SHIFT SUN4C_PMD_SHIFT
3928@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED;
3929 #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
3930 #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
3931
3932+#ifdef CONFIG_PAX_PAGEEXEC
3933+extern pgprot_t PAGE_SHARED_NOEXEC;
3934+# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
3935+# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
3936+#else
3937+# define PAGE_SHARED_NOEXEC PAGE_SHARED
3938+# define PAGE_COPY_NOEXEC PAGE_COPY
3939+# define PAGE_READONLY_NOEXEC PAGE_READONLY
3940+#endif
3941+
3942 extern unsigned long page_kernel;
3943
3944 #ifdef MODULE
3945diff -urNp linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h
3946--- linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h 2011-03-27 14:31:47.000000000 -0400
3947+++ linux-2.6.32.43/arch/sparc/include/asm/pgtsrmmu.h 2011-04-17 15:56:46.000000000 -0400
3948@@ -115,6 +115,13 @@
3949 SRMMU_EXEC | SRMMU_REF)
3950 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
3951 SRMMU_EXEC | SRMMU_REF)
3952+
3953+#ifdef CONFIG_PAX_PAGEEXEC
3954+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
3955+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3956+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
3957+#endif
3958+
3959 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
3960 SRMMU_DIRTY | SRMMU_REF)
3961
3962diff -urNp linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h
3963--- linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h 2011-03-27 14:31:47.000000000 -0400
3964+++ linux-2.6.32.43/arch/sparc/include/asm/spinlock_64.h 2011-05-04 17:56:20.000000000 -0400
3965@@ -92,14 +92,19 @@ static inline void __raw_spin_lock_flags
3966
3967 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
3968
3969-static void inline arch_read_lock(raw_rwlock_t *lock)
3970+static inline void arch_read_lock(raw_rwlock_t *lock)
3971 {
3972 unsigned long tmp1, tmp2;
3973
3974 __asm__ __volatile__ (
3975 "1: ldsw [%2], %0\n"
3976 " brlz,pn %0, 2f\n"
3977-"4: add %0, 1, %1\n"
3978+"4: addcc %0, 1, %1\n"
3979+
3980+#ifdef CONFIG_PAX_REFCOUNT
3981+" tvs %%icc, 6\n"
3982+#endif
3983+
3984 " cas [%2], %0, %1\n"
3985 " cmp %0, %1\n"
3986 " bne,pn %%icc, 1b\n"
3987@@ -112,7 +117,7 @@ static void inline arch_read_lock(raw_rw
3988 " .previous"
3989 : "=&r" (tmp1), "=&r" (tmp2)
3990 : "r" (lock)
3991- : "memory");
3992+ : "memory", "cc");
3993 }
3994
3995 static int inline arch_read_trylock(raw_rwlock_t *lock)
3996@@ -123,7 +128,12 @@ static int inline arch_read_trylock(raw_
3997 "1: ldsw [%2], %0\n"
3998 " brlz,a,pn %0, 2f\n"
3999 " mov 0, %0\n"
4000-" add %0, 1, %1\n"
4001+" addcc %0, 1, %1\n"
4002+
4003+#ifdef CONFIG_PAX_REFCOUNT
4004+" tvs %%icc, 6\n"
4005+#endif
4006+
4007 " cas [%2], %0, %1\n"
4008 " cmp %0, %1\n"
4009 " bne,pn %%icc, 1b\n"
4010@@ -136,13 +146,18 @@ static int inline arch_read_trylock(raw_
4011 return tmp1;
4012 }
4013
4014-static void inline arch_read_unlock(raw_rwlock_t *lock)
4015+static inline void arch_read_unlock(raw_rwlock_t *lock)
4016 {
4017 unsigned long tmp1, tmp2;
4018
4019 __asm__ __volatile__(
4020 "1: lduw [%2], %0\n"
4021-" sub %0, 1, %1\n"
4022+" subcc %0, 1, %1\n"
4023+
4024+#ifdef CONFIG_PAX_REFCOUNT
4025+" tvs %%icc, 6\n"
4026+#endif
4027+
4028 " cas [%2], %0, %1\n"
4029 " cmp %0, %1\n"
4030 " bne,pn %%xcc, 1b\n"
4031@@ -152,7 +167,7 @@ static void inline arch_read_unlock(raw_
4032 : "memory");
4033 }
4034
4035-static void inline arch_write_lock(raw_rwlock_t *lock)
4036+static inline void arch_write_lock(raw_rwlock_t *lock)
4037 {
4038 unsigned long mask, tmp1, tmp2;
4039
4040@@ -177,7 +192,7 @@ static void inline arch_write_lock(raw_r
4041 : "memory");
4042 }
4043
4044-static void inline arch_write_unlock(raw_rwlock_t *lock)
4045+static inline void arch_write_unlock(raw_rwlock_t *lock)
4046 {
4047 __asm__ __volatile__(
4048 " stw %%g0, [%0]"
4049diff -urNp linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h
4050--- linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h 2011-03-27 14:31:47.000000000 -0400
4051+++ linux-2.6.32.43/arch/sparc/include/asm/thread_info_32.h 2011-06-04 20:46:01.000000000 -0400
4052@@ -50,6 +50,8 @@ struct thread_info {
4053 unsigned long w_saved;
4054
4055 struct restart_block restart_block;
4056+
4057+ unsigned long lowest_stack;
4058 };
4059
4060 /*
4061diff -urNp linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h
4062--- linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h 2011-03-27 14:31:47.000000000 -0400
4063+++ linux-2.6.32.43/arch/sparc/include/asm/thread_info_64.h 2011-06-04 20:46:21.000000000 -0400
4064@@ -68,6 +68,8 @@ struct thread_info {
4065 struct pt_regs *kern_una_regs;
4066 unsigned int kern_una_insn;
4067
4068+ unsigned long lowest_stack;
4069+
4070 unsigned long fpregs[0] __attribute__ ((aligned(64)));
4071 };
4072
4073diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h
4074--- linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
4075+++ linux-2.6.32.43/arch/sparc/include/asm/uaccess_32.h 2011-04-17 15:56:46.000000000 -0400
4076@@ -249,27 +249,46 @@ extern unsigned long __copy_user(void __
4077
4078 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
4079 {
4080- if (n && __access_ok((unsigned long) to, n))
4081+ if ((long)n < 0)
4082+ return n;
4083+
4084+ if (n && __access_ok((unsigned long) to, n)) {
4085+ if (!__builtin_constant_p(n))
4086+ check_object_size(from, n, true);
4087 return __copy_user(to, (__force void __user *) from, n);
4088- else
4089+ } else
4090 return n;
4091 }
4092
4093 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
4094 {
4095+ if ((long)n < 0)
4096+ return n;
4097+
4098+ if (!__builtin_constant_p(n))
4099+ check_object_size(from, n, true);
4100+
4101 return __copy_user(to, (__force void __user *) from, n);
4102 }
4103
4104 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
4105 {
4106- if (n && __access_ok((unsigned long) from, n))
4107+ if ((long)n < 0)
4108+ return n;
4109+
4110+ if (n && __access_ok((unsigned long) from, n)) {
4111+ if (!__builtin_constant_p(n))
4112+ check_object_size(to, n, false);
4113 return __copy_user((__force void __user *) to, from, n);
4114- else
4115+ } else
4116 return n;
4117 }
4118
4119 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
4120 {
4121+ if ((long)n < 0)
4122+ return n;
4123+
4124 return __copy_user((__force void __user *) to, from, n);
4125 }
4126
4127diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h
4128--- linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
4129+++ linux-2.6.32.43/arch/sparc/include/asm/uaccess_64.h 2011-04-17 15:56:46.000000000 -0400
4130@@ -9,6 +9,7 @@
4131 #include <linux/compiler.h>
4132 #include <linux/string.h>
4133 #include <linux/thread_info.h>
4134+#include <linux/kernel.h>
4135 #include <asm/asi.h>
4136 #include <asm/system.h>
4137 #include <asm/spitfire.h>
4138@@ -212,8 +213,15 @@ extern unsigned long copy_from_user_fixu
4139 static inline unsigned long __must_check
4140 copy_from_user(void *to, const void __user *from, unsigned long size)
4141 {
4142- unsigned long ret = ___copy_from_user(to, from, size);
4143+ unsigned long ret;
4144
4145+ if ((long)size < 0 || size > INT_MAX)
4146+ return size;
4147+
4148+ if (!__builtin_constant_p(size))
4149+ check_object_size(to, size, false);
4150+
4151+ ret = ___copy_from_user(to, from, size);
4152 if (unlikely(ret))
4153 ret = copy_from_user_fixup(to, from, size);
4154 return ret;
4155@@ -228,8 +236,15 @@ extern unsigned long copy_to_user_fixup(
4156 static inline unsigned long __must_check
4157 copy_to_user(void __user *to, const void *from, unsigned long size)
4158 {
4159- unsigned long ret = ___copy_to_user(to, from, size);
4160+ unsigned long ret;
4161+
4162+ if ((long)size < 0 || size > INT_MAX)
4163+ return size;
4164+
4165+ if (!__builtin_constant_p(size))
4166+ check_object_size(from, size, true);
4167
4168+ ret = ___copy_to_user(to, from, size);
4169 if (unlikely(ret))
4170 ret = copy_to_user_fixup(to, from, size);
4171 return ret;
4172diff -urNp linux-2.6.32.43/arch/sparc/include/asm/uaccess.h linux-2.6.32.43/arch/sparc/include/asm/uaccess.h
4173--- linux-2.6.32.43/arch/sparc/include/asm/uaccess.h 2011-03-27 14:31:47.000000000 -0400
4174+++ linux-2.6.32.43/arch/sparc/include/asm/uaccess.h 2011-04-17 15:56:46.000000000 -0400
4175@@ -1,5 +1,13 @@
4176 #ifndef ___ASM_SPARC_UACCESS_H
4177 #define ___ASM_SPARC_UACCESS_H
4178+
4179+#ifdef __KERNEL__
4180+#ifndef __ASSEMBLY__
4181+#include <linux/types.h>
4182+extern void check_object_size(const void *ptr, unsigned long n, bool to);
4183+#endif
4184+#endif
4185+
4186 #if defined(__sparc__) && defined(__arch64__)
4187 #include <asm/uaccess_64.h>
4188 #else
4189diff -urNp linux-2.6.32.43/arch/sparc/kernel/iommu.c linux-2.6.32.43/arch/sparc/kernel/iommu.c
4190--- linux-2.6.32.43/arch/sparc/kernel/iommu.c 2011-03-27 14:31:47.000000000 -0400
4191+++ linux-2.6.32.43/arch/sparc/kernel/iommu.c 2011-04-17 15:56:46.000000000 -0400
4192@@ -826,7 +826,7 @@ static void dma_4u_sync_sg_for_cpu(struc
4193 spin_unlock_irqrestore(&iommu->lock, flags);
4194 }
4195
4196-static struct dma_map_ops sun4u_dma_ops = {
4197+static const struct dma_map_ops sun4u_dma_ops = {
4198 .alloc_coherent = dma_4u_alloc_coherent,
4199 .free_coherent = dma_4u_free_coherent,
4200 .map_page = dma_4u_map_page,
4201@@ -837,7 +837,7 @@ static struct dma_map_ops sun4u_dma_ops
4202 .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu,
4203 };
4204
4205-struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4206+const struct dma_map_ops *dma_ops = &sun4u_dma_ops;
4207 EXPORT_SYMBOL(dma_ops);
4208
4209 extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask);
4210diff -urNp linux-2.6.32.43/arch/sparc/kernel/ioport.c linux-2.6.32.43/arch/sparc/kernel/ioport.c
4211--- linux-2.6.32.43/arch/sparc/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
4212+++ linux-2.6.32.43/arch/sparc/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
4213@@ -392,7 +392,7 @@ static void sbus_sync_sg_for_device(stru
4214 BUG();
4215 }
4216
4217-struct dma_map_ops sbus_dma_ops = {
4218+const struct dma_map_ops sbus_dma_ops = {
4219 .alloc_coherent = sbus_alloc_coherent,
4220 .free_coherent = sbus_free_coherent,
4221 .map_page = sbus_map_page,
4222@@ -403,7 +403,7 @@ struct dma_map_ops sbus_dma_ops = {
4223 .sync_sg_for_device = sbus_sync_sg_for_device,
4224 };
4225
4226-struct dma_map_ops *dma_ops = &sbus_dma_ops;
4227+const struct dma_map_ops *dma_ops = &sbus_dma_ops;
4228 EXPORT_SYMBOL(dma_ops);
4229
4230 static int __init sparc_register_ioport(void)
4231@@ -640,7 +640,7 @@ static void pci32_sync_sg_for_device(str
4232 }
4233 }
4234
4235-struct dma_map_ops pci32_dma_ops = {
4236+const struct dma_map_ops pci32_dma_ops = {
4237 .alloc_coherent = pci32_alloc_coherent,
4238 .free_coherent = pci32_free_coherent,
4239 .map_page = pci32_map_page,
4240diff -urNp linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c
4241--- linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c 2011-03-27 14:31:47.000000000 -0400
4242+++ linux-2.6.32.43/arch/sparc/kernel/kgdb_32.c 2011-04-17 15:56:46.000000000 -0400
4243@@ -158,7 +158,7 @@ void kgdb_arch_exit(void)
4244 {
4245 }
4246
4247-struct kgdb_arch arch_kgdb_ops = {
4248+const struct kgdb_arch arch_kgdb_ops = {
4249 /* Breakpoint instruction: ta 0x7d */
4250 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d },
4251 };
4252diff -urNp linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c
4253--- linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c 2011-03-27 14:31:47.000000000 -0400
4254+++ linux-2.6.32.43/arch/sparc/kernel/kgdb_64.c 2011-04-17 15:56:46.000000000 -0400
4255@@ -180,7 +180,7 @@ void kgdb_arch_exit(void)
4256 {
4257 }
4258
4259-struct kgdb_arch arch_kgdb_ops = {
4260+const struct kgdb_arch arch_kgdb_ops = {
4261 /* Breakpoint instruction: ta 0x72 */
4262 .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 },
4263 };
4264diff -urNp linux-2.6.32.43/arch/sparc/kernel/Makefile linux-2.6.32.43/arch/sparc/kernel/Makefile
4265--- linux-2.6.32.43/arch/sparc/kernel/Makefile 2011-03-27 14:31:47.000000000 -0400
4266+++ linux-2.6.32.43/arch/sparc/kernel/Makefile 2011-04-17 15:56:46.000000000 -0400
4267@@ -3,7 +3,7 @@
4268 #
4269
4270 asflags-y := -ansi
4271-ccflags-y := -Werror
4272+#ccflags-y := -Werror
4273
4274 extra-y := head_$(BITS).o
4275 extra-y += init_task.o
4276diff -urNp linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c
4277--- linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c 2011-03-27 14:31:47.000000000 -0400
4278+++ linux-2.6.32.43/arch/sparc/kernel/pci_sun4v.c 2011-04-17 15:56:46.000000000 -0400
4279@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic
4280 spin_unlock_irqrestore(&iommu->lock, flags);
4281 }
4282
4283-static struct dma_map_ops sun4v_dma_ops = {
4284+static const struct dma_map_ops sun4v_dma_ops = {
4285 .alloc_coherent = dma_4v_alloc_coherent,
4286 .free_coherent = dma_4v_free_coherent,
4287 .map_page = dma_4v_map_page,
4288diff -urNp linux-2.6.32.43/arch/sparc/kernel/process_32.c linux-2.6.32.43/arch/sparc/kernel/process_32.c
4289--- linux-2.6.32.43/arch/sparc/kernel/process_32.c 2011-03-27 14:31:47.000000000 -0400
4290+++ linux-2.6.32.43/arch/sparc/kernel/process_32.c 2011-04-17 15:56:46.000000000 -0400
4291@@ -196,7 +196,7 @@ void __show_backtrace(unsigned long fp)
4292 rw->ins[4], rw->ins[5],
4293 rw->ins[6],
4294 rw->ins[7]);
4295- printk("%pS\n", (void *) rw->ins[7]);
4296+ printk("%pA\n", (void *) rw->ins[7]);
4297 rw = (struct reg_window32 *) rw->ins[6];
4298 }
4299 spin_unlock_irqrestore(&sparc_backtrace_lock, flags);
4300@@ -263,14 +263,14 @@ void show_regs(struct pt_regs *r)
4301
4302 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
4303 r->psr, r->pc, r->npc, r->y, print_tainted());
4304- printk("PC: <%pS>\n", (void *) r->pc);
4305+ printk("PC: <%pA>\n", (void *) r->pc);
4306 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4307 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
4308 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
4309 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4310 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
4311 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
4312- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
4313+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
4314
4315 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
4316 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
4317@@ -305,7 +305,7 @@ void show_stack(struct task_struct *tsk,
4318 rw = (struct reg_window32 *) fp;
4319 pc = rw->ins[7];
4320 printk("[%08lx : ", pc);
4321- printk("%pS ] ", (void *) pc);
4322+ printk("%pA ] ", (void *) pc);
4323 fp = rw->ins[6];
4324 } while (++count < 16);
4325 printk("\n");
4326diff -urNp linux-2.6.32.43/arch/sparc/kernel/process_64.c linux-2.6.32.43/arch/sparc/kernel/process_64.c
4327--- linux-2.6.32.43/arch/sparc/kernel/process_64.c 2011-03-27 14:31:47.000000000 -0400
4328+++ linux-2.6.32.43/arch/sparc/kernel/process_64.c 2011-04-17 15:56:46.000000000 -0400
4329@@ -180,14 +180,14 @@ static void show_regwindow(struct pt_reg
4330 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
4331 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
4332 if (regs->tstate & TSTATE_PRIV)
4333- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
4334+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
4335 }
4336
4337 void show_regs(struct pt_regs *regs)
4338 {
4339 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
4340 regs->tpc, regs->tnpc, regs->y, print_tainted());
4341- printk("TPC: <%pS>\n", (void *) regs->tpc);
4342+ printk("TPC: <%pA>\n", (void *) regs->tpc);
4343 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
4344 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
4345 regs->u_regs[3]);
4346@@ -200,7 +200,7 @@ void show_regs(struct pt_regs *regs)
4347 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
4348 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
4349 regs->u_regs[15]);
4350- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
4351+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
4352 show_regwindow(regs);
4353 }
4354
4355@@ -284,7 +284,7 @@ void arch_trigger_all_cpu_backtrace(void
4356 ((tp && tp->task) ? tp->task->pid : -1));
4357
4358 if (gp->tstate & TSTATE_PRIV) {
4359- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
4360+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
4361 (void *) gp->tpc,
4362 (void *) gp->o7,
4363 (void *) gp->i7,
4364diff -urNp linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c
4365--- linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c 2011-03-27 14:31:47.000000000 -0400
4366+++ linux-2.6.32.43/arch/sparc/kernel/sys_sparc_32.c 2011-04-17 15:56:46.000000000 -0400
4367@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str
4368 if (ARCH_SUN4C && len > 0x20000000)
4369 return -ENOMEM;
4370 if (!addr)
4371- addr = TASK_UNMAPPED_BASE;
4372+ addr = current->mm->mmap_base;
4373
4374 if (flags & MAP_SHARED)
4375 addr = COLOUR_ALIGN(addr);
4376@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str
4377 }
4378 if (TASK_SIZE - PAGE_SIZE - len < addr)
4379 return -ENOMEM;
4380- if (!vmm || addr + len <= vmm->vm_start)
4381+ if (check_heap_stack_gap(vmm, addr, len))
4382 return addr;
4383 addr = vmm->vm_end;
4384 if (flags & MAP_SHARED)
4385diff -urNp linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c
4386--- linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c 2011-03-27 14:31:47.000000000 -0400
4387+++ linux-2.6.32.43/arch/sparc/kernel/sys_sparc_64.c 2011-04-17 15:56:46.000000000 -0400
4388@@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str
4389 /* We do not accept a shared mapping if it would violate
4390 * cache aliasing constraints.
4391 */
4392- if ((flags & MAP_SHARED) &&
4393+ if ((filp || (flags & MAP_SHARED)) &&
4394 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4395 return -EINVAL;
4396 return addr;
4397@@ -140,6 +140,10 @@ unsigned long arch_get_unmapped_area(str
4398 if (filp || (flags & MAP_SHARED))
4399 do_color_align = 1;
4400
4401+#ifdef CONFIG_PAX_RANDMMAP
4402+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
4403+#endif
4404+
4405 if (addr) {
4406 if (do_color_align)
4407 addr = COLOUR_ALIGN(addr, pgoff);
4408@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str
4409 addr = PAGE_ALIGN(addr);
4410
4411 vma = find_vma(mm, addr);
4412- if (task_size - len >= addr &&
4413- (!vma || addr + len <= vma->vm_start))
4414+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4415 return addr;
4416 }
4417
4418 if (len > mm->cached_hole_size) {
4419- start_addr = addr = mm->free_area_cache;
4420+ start_addr = addr = mm->free_area_cache;
4421 } else {
4422- start_addr = addr = TASK_UNMAPPED_BASE;
4423+ start_addr = addr = mm->mmap_base;
4424 mm->cached_hole_size = 0;
4425 }
4426
4427@@ -175,14 +178,14 @@ full_search:
4428 vma = find_vma(mm, VA_EXCLUDE_END);
4429 }
4430 if (unlikely(task_size < addr)) {
4431- if (start_addr != TASK_UNMAPPED_BASE) {
4432- start_addr = addr = TASK_UNMAPPED_BASE;
4433+ if (start_addr != mm->mmap_base) {
4434+ start_addr = addr = mm->mmap_base;
4435 mm->cached_hole_size = 0;
4436 goto full_search;
4437 }
4438 return -ENOMEM;
4439 }
4440- if (likely(!vma || addr + len <= vma->vm_start)) {
4441+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4442 /*
4443 * Remember the place where we stopped the search:
4444 */
4445@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi
4446 /* We do not accept a shared mapping if it would violate
4447 * cache aliasing constraints.
4448 */
4449- if ((flags & MAP_SHARED) &&
4450+ if ((filp || (flags & MAP_SHARED)) &&
4451 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
4452 return -EINVAL;
4453 return addr;
4454@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi
4455 addr = PAGE_ALIGN(addr);
4456
4457 vma = find_vma(mm, addr);
4458- if (task_size - len >= addr &&
4459- (!vma || addr + len <= vma->vm_start))
4460+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
4461 return addr;
4462 }
4463
4464@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi
4465 /* make sure it can fit in the remaining address space */
4466 if (likely(addr > len)) {
4467 vma = find_vma(mm, addr-len);
4468- if (!vma || addr <= vma->vm_start) {
4469+ if (check_heap_stack_gap(vma, addr - len, len)) {
4470 /* remember the address as a hint for next time */
4471 return (mm->free_area_cache = addr-len);
4472 }
4473@@ -268,18 +270,18 @@ arch_get_unmapped_area_topdown(struct fi
4474 if (unlikely(mm->mmap_base < len))
4475 goto bottomup;
4476
4477- addr = mm->mmap_base-len;
4478- if (do_color_align)
4479- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4480+ addr = mm->mmap_base - len;
4481
4482 do {
4483+ if (do_color_align)
4484+ addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4485 /*
4486 * Lookup failure means no vma is above this address,
4487 * else if new region fits below vma->vm_start,
4488 * return with success:
4489 */
4490 vma = find_vma(mm, addr);
4491- if (likely(!vma || addr+len <= vma->vm_start)) {
4492+ if (likely(check_heap_stack_gap(vma, addr, len))) {
4493 /* remember the address as a hint for next time */
4494 return (mm->free_area_cache = addr);
4495 }
4496@@ -289,10 +291,8 @@ arch_get_unmapped_area_topdown(struct fi
4497 mm->cached_hole_size = vma->vm_start - addr;
4498
4499 /* try just below the current vma->vm_start */
4500- addr = vma->vm_start-len;
4501- if (do_color_align)
4502- addr = COLOUR_ALIGN_DOWN(addr, pgoff);
4503- } while (likely(len < vma->vm_start));
4504+ addr = skip_heap_stack_gap(vma, len);
4505+ } while (!IS_ERR_VALUE(addr));
4506
4507 bottomup:
4508 /*
4509@@ -384,6 +384,12 @@ void arch_pick_mmap_layout(struct mm_str
4510 current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY ||
4511 sysctl_legacy_va_layout) {
4512 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
4513+
4514+#ifdef CONFIG_PAX_RANDMMAP
4515+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4516+ mm->mmap_base += mm->delta_mmap;
4517+#endif
4518+
4519 mm->get_unmapped_area = arch_get_unmapped_area;
4520 mm->unmap_area = arch_unmap_area;
4521 } else {
4522@@ -398,6 +404,12 @@ void arch_pick_mmap_layout(struct mm_str
4523 gap = (task_size / 6 * 5);
4524
4525 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
4526+
4527+#ifdef CONFIG_PAX_RANDMMAP
4528+ if (mm->pax_flags & MF_PAX_RANDMMAP)
4529+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
4530+#endif
4531+
4532 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
4533 mm->unmap_area = arch_unmap_area_topdown;
4534 }
4535diff -urNp linux-2.6.32.43/arch/sparc/kernel/traps_32.c linux-2.6.32.43/arch/sparc/kernel/traps_32.c
4536--- linux-2.6.32.43/arch/sparc/kernel/traps_32.c 2011-03-27 14:31:47.000000000 -0400
4537+++ linux-2.6.32.43/arch/sparc/kernel/traps_32.c 2011-06-13 21:25:39.000000000 -0400
4538@@ -44,6 +44,8 @@ static void instruction_dump(unsigned lo
4539 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
4540 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
4541
4542+extern void gr_handle_kernel_exploit(void);
4543+
4544 void die_if_kernel(char *str, struct pt_regs *regs)
4545 {
4546 static int die_counter;
4547@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_
4548 count++ < 30 &&
4549 (((unsigned long) rw) >= PAGE_OFFSET) &&
4550 !(((unsigned long) rw) & 0x7)) {
4551- printk("Caller[%08lx]: %pS\n", rw->ins[7],
4552+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
4553 (void *) rw->ins[7]);
4554 rw = (struct reg_window32 *)rw->ins[6];
4555 }
4556 }
4557 printk("Instruction DUMP:");
4558 instruction_dump ((unsigned long *) regs->pc);
4559- if(regs->psr & PSR_PS)
4560+ if(regs->psr & PSR_PS) {
4561+ gr_handle_kernel_exploit();
4562 do_exit(SIGKILL);
4563+ }
4564 do_exit(SIGSEGV);
4565 }
4566
4567diff -urNp linux-2.6.32.43/arch/sparc/kernel/traps_64.c linux-2.6.32.43/arch/sparc/kernel/traps_64.c
4568--- linux-2.6.32.43/arch/sparc/kernel/traps_64.c 2011-03-27 14:31:47.000000000 -0400
4569+++ linux-2.6.32.43/arch/sparc/kernel/traps_64.c 2011-06-13 21:24:11.000000000 -0400
4570@@ -73,7 +73,7 @@ static void dump_tl1_traplog(struct tl1_
4571 i + 1,
4572 p->trapstack[i].tstate, p->trapstack[i].tpc,
4573 p->trapstack[i].tnpc, p->trapstack[i].tt);
4574- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
4575+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
4576 }
4577 }
4578
4579@@ -93,6 +93,12 @@ void bad_trap(struct pt_regs *regs, long
4580
4581 lvl -= 0x100;
4582 if (regs->tstate & TSTATE_PRIV) {
4583+
4584+#ifdef CONFIG_PAX_REFCOUNT
4585+ if (lvl == 6)
4586+ pax_report_refcount_overflow(regs);
4587+#endif
4588+
4589 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
4590 die_if_kernel(buffer, regs);
4591 }
4592@@ -111,11 +117,16 @@ void bad_trap(struct pt_regs *regs, long
4593 void bad_trap_tl1(struct pt_regs *regs, long lvl)
4594 {
4595 char buffer[32];
4596-
4597+
4598 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
4599 0, lvl, SIGTRAP) == NOTIFY_STOP)
4600 return;
4601
4602+#ifdef CONFIG_PAX_REFCOUNT
4603+ if (lvl == 6)
4604+ pax_report_refcount_overflow(regs);
4605+#endif
4606+
4607 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
4608
4609 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
4610@@ -1139,7 +1150,7 @@ static void cheetah_log_errors(struct pt
4611 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
4612 printk("%s" "ERROR(%d): ",
4613 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
4614- printk("TPC<%pS>\n", (void *) regs->tpc);
4615+ printk("TPC<%pA>\n", (void *) regs->tpc);
4616 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
4617 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
4618 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
4619@@ -1746,7 +1757,7 @@ void cheetah_plus_parity_error(int type,
4620 smp_processor_id(),
4621 (type & 0x1) ? 'I' : 'D',
4622 regs->tpc);
4623- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
4624+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
4625 panic("Irrecoverable Cheetah+ parity error.");
4626 }
4627
4628@@ -1754,7 +1765,7 @@ void cheetah_plus_parity_error(int type,
4629 smp_processor_id(),
4630 (type & 0x1) ? 'I' : 'D',
4631 regs->tpc);
4632- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
4633+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
4634 }
4635
4636 struct sun4v_error_entry {
4637@@ -1961,9 +1972,9 @@ void sun4v_itlb_error_report(struct pt_r
4638
4639 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
4640 regs->tpc, tl);
4641- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
4642+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
4643 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4644- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
4645+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
4646 (void *) regs->u_regs[UREG_I7]);
4647 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
4648 "pte[%lx] error[%lx]\n",
4649@@ -1985,9 +1996,9 @@ void sun4v_dtlb_error_report(struct pt_r
4650
4651 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
4652 regs->tpc, tl);
4653- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
4654+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
4655 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
4656- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
4657+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
4658 (void *) regs->u_regs[UREG_I7]);
4659 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
4660 "pte[%lx] error[%lx]\n",
4661@@ -2191,7 +2202,7 @@ void show_stack(struct task_struct *tsk,
4662 fp = (unsigned long)sf->fp + STACK_BIAS;
4663 }
4664
4665- printk(" [%016lx] %pS\n", pc, (void *) pc);
4666+ printk(" [%016lx] %pA\n", pc, (void *) pc);
4667 } while (++count < 16);
4668 }
4669
4670@@ -2233,6 +2244,8 @@ static inline struct reg_window *kernel_
4671 return (struct reg_window *) (fp + STACK_BIAS);
4672 }
4673
4674+extern void gr_handle_kernel_exploit(void);
4675+
4676 void die_if_kernel(char *str, struct pt_regs *regs)
4677 {
4678 static int die_counter;
4679@@ -2260,7 +2273,7 @@ void die_if_kernel(char *str, struct pt_
4680 while (rw &&
4681 count++ < 30&&
4682 is_kernel_stack(current, rw)) {
4683- printk("Caller[%016lx]: %pS\n", rw->ins[7],
4684+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
4685 (void *) rw->ins[7]);
4686
4687 rw = kernel_stack_up(rw);
4688@@ -2273,8 +2286,11 @@ void die_if_kernel(char *str, struct pt_
4689 }
4690 user_instruction_dump ((unsigned int __user *) regs->tpc);
4691 }
4692- if (regs->tstate & TSTATE_PRIV)
4693+ if (regs->tstate & TSTATE_PRIV) {
4694+ gr_handle_kernel_exploit();
4695 do_exit(SIGKILL);
4696+ }
4697+
4698 do_exit(SIGSEGV);
4699 }
4700 EXPORT_SYMBOL(die_if_kernel);
4701diff -urNp linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S
4702--- linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S 2011-03-27 14:31:47.000000000 -0400
4703+++ linux-2.6.32.43/arch/sparc/kernel/una_asm_64.S 2011-07-13 22:20:05.000000000 -0400
4704@@ -127,7 +127,7 @@ do_int_load:
4705 wr %o5, 0x0, %asi
4706 retl
4707 mov 0, %o0
4708- .size __do_int_load, .-__do_int_load
4709+ .size do_int_load, .-do_int_load
4710
4711 .section __ex_table,"a"
4712 .word 4b, __retl_efault
4713diff -urNp linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c
4714--- linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c 2011-03-27 14:31:47.000000000 -0400
4715+++ linux-2.6.32.43/arch/sparc/kernel/unaligned_64.c 2011-04-17 15:56:46.000000000 -0400
4716@@ -288,7 +288,7 @@ static void log_unaligned(struct pt_regs
4717 if (count < 5) {
4718 last_time = jiffies;
4719 count++;
4720- printk("Kernel unaligned access at TPC[%lx] %pS\n",
4721+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
4722 regs->tpc, (void *) regs->tpc);
4723 }
4724 }
4725diff -urNp linux-2.6.32.43/arch/sparc/lib/atomic_64.S linux-2.6.32.43/arch/sparc/lib/atomic_64.S
4726--- linux-2.6.32.43/arch/sparc/lib/atomic_64.S 2011-03-27 14:31:47.000000000 -0400
4727+++ linux-2.6.32.43/arch/sparc/lib/atomic_64.S 2011-04-17 15:56:46.000000000 -0400
4728@@ -18,7 +18,12 @@
4729 atomic_add: /* %o0 = increment, %o1 = atomic_ptr */
4730 BACKOFF_SETUP(%o2)
4731 1: lduw [%o1], %g1
4732- add %g1, %o0, %g7
4733+ addcc %g1, %o0, %g7
4734+
4735+#ifdef CONFIG_PAX_REFCOUNT
4736+ tvs %icc, 6
4737+#endif
4738+
4739 cas [%o1], %g1, %g7
4740 cmp %g1, %g7
4741 bne,pn %icc, 2f
4742@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at
4743 2: BACKOFF_SPIN(%o2, %o3, 1b)
4744 .size atomic_add, .-atomic_add
4745
4746+ .globl atomic_add_unchecked
4747+ .type atomic_add_unchecked,#function
4748+atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4749+ BACKOFF_SETUP(%o2)
4750+1: lduw [%o1], %g1
4751+ add %g1, %o0, %g7
4752+ cas [%o1], %g1, %g7
4753+ cmp %g1, %g7
4754+ bne,pn %icc, 2f
4755+ nop
4756+ retl
4757+ nop
4758+2: BACKOFF_SPIN(%o2, %o3, 1b)
4759+ .size atomic_add_unchecked, .-atomic_add_unchecked
4760+
4761 .globl atomic_sub
4762 .type atomic_sub,#function
4763 atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4764 BACKOFF_SETUP(%o2)
4765 1: lduw [%o1], %g1
4766- sub %g1, %o0, %g7
4767+ subcc %g1, %o0, %g7
4768+
4769+#ifdef CONFIG_PAX_REFCOUNT
4770+ tvs %icc, 6
4771+#endif
4772+
4773 cas [%o1], %g1, %g7
4774 cmp %g1, %g7
4775 bne,pn %icc, 2f
4776@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at
4777 2: BACKOFF_SPIN(%o2, %o3, 1b)
4778 .size atomic_sub, .-atomic_sub
4779
4780+ .globl atomic_sub_unchecked
4781+ .type atomic_sub_unchecked,#function
4782+atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4783+ BACKOFF_SETUP(%o2)
4784+1: lduw [%o1], %g1
4785+ sub %g1, %o0, %g7
4786+ cas [%o1], %g1, %g7
4787+ cmp %g1, %g7
4788+ bne,pn %icc, 2f
4789+ nop
4790+ retl
4791+ nop
4792+2: BACKOFF_SPIN(%o2, %o3, 1b)
4793+ .size atomic_sub_unchecked, .-atomic_sub_unchecked
4794+
4795 .globl atomic_add_ret
4796 .type atomic_add_ret,#function
4797 atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4798 BACKOFF_SETUP(%o2)
4799 1: lduw [%o1], %g1
4800- add %g1, %o0, %g7
4801+ addcc %g1, %o0, %g7
4802+
4803+#ifdef CONFIG_PAX_REFCOUNT
4804+ tvs %icc, 6
4805+#endif
4806+
4807 cas [%o1], %g1, %g7
4808 cmp %g1, %g7
4809 bne,pn %icc, 2f
4810@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1
4811 2: BACKOFF_SPIN(%o2, %o3, 1b)
4812 .size atomic_add_ret, .-atomic_add_ret
4813
4814+ .globl atomic_add_ret_unchecked
4815+ .type atomic_add_ret_unchecked,#function
4816+atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4817+ BACKOFF_SETUP(%o2)
4818+1: lduw [%o1], %g1
4819+ addcc %g1, %o0, %g7
4820+ cas [%o1], %g1, %g7
4821+ cmp %g1, %g7
4822+ bne,pn %icc, 2f
4823+ add %g7, %o0, %g7
4824+ sra %g7, 0, %o0
4825+ retl
4826+ nop
4827+2: BACKOFF_SPIN(%o2, %o3, 1b)
4828+ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked
4829+
4830 .globl atomic_sub_ret
4831 .type atomic_sub_ret,#function
4832 atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4833 BACKOFF_SETUP(%o2)
4834 1: lduw [%o1], %g1
4835- sub %g1, %o0, %g7
4836+ subcc %g1, %o0, %g7
4837+
4838+#ifdef CONFIG_PAX_REFCOUNT
4839+ tvs %icc, 6
4840+#endif
4841+
4842 cas [%o1], %g1, %g7
4843 cmp %g1, %g7
4844 bne,pn %icc, 2f
4845@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1
4846 atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */
4847 BACKOFF_SETUP(%o2)
4848 1: ldx [%o1], %g1
4849- add %g1, %o0, %g7
4850+ addcc %g1, %o0, %g7
4851+
4852+#ifdef CONFIG_PAX_REFCOUNT
4853+ tvs %xcc, 6
4854+#endif
4855+
4856 casx [%o1], %g1, %g7
4857 cmp %g1, %g7
4858 bne,pn %xcc, 2f
4859@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 =
4860 2: BACKOFF_SPIN(%o2, %o3, 1b)
4861 .size atomic64_add, .-atomic64_add
4862
4863+ .globl atomic64_add_unchecked
4864+ .type atomic64_add_unchecked,#function
4865+atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4866+ BACKOFF_SETUP(%o2)
4867+1: ldx [%o1], %g1
4868+ addcc %g1, %o0, %g7
4869+ casx [%o1], %g1, %g7
4870+ cmp %g1, %g7
4871+ bne,pn %xcc, 2f
4872+ nop
4873+ retl
4874+ nop
4875+2: BACKOFF_SPIN(%o2, %o3, 1b)
4876+ .size atomic64_add_unchecked, .-atomic64_add_unchecked
4877+
4878 .globl atomic64_sub
4879 .type atomic64_sub,#function
4880 atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */
4881 BACKOFF_SETUP(%o2)
4882 1: ldx [%o1], %g1
4883- sub %g1, %o0, %g7
4884+ subcc %g1, %o0, %g7
4885+
4886+#ifdef CONFIG_PAX_REFCOUNT
4887+ tvs %xcc, 6
4888+#endif
4889+
4890 casx [%o1], %g1, %g7
4891 cmp %g1, %g7
4892 bne,pn %xcc, 2f
4893@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 =
4894 2: BACKOFF_SPIN(%o2, %o3, 1b)
4895 .size atomic64_sub, .-atomic64_sub
4896
4897+ .globl atomic64_sub_unchecked
4898+ .type atomic64_sub_unchecked,#function
4899+atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */
4900+ BACKOFF_SETUP(%o2)
4901+1: ldx [%o1], %g1
4902+ subcc %g1, %o0, %g7
4903+ casx [%o1], %g1, %g7
4904+ cmp %g1, %g7
4905+ bne,pn %xcc, 2f
4906+ nop
4907+ retl
4908+ nop
4909+2: BACKOFF_SPIN(%o2, %o3, 1b)
4910+ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked
4911+
4912 .globl atomic64_add_ret
4913 .type atomic64_add_ret,#function
4914 atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */
4915 BACKOFF_SETUP(%o2)
4916 1: ldx [%o1], %g1
4917- add %g1, %o0, %g7
4918+ addcc %g1, %o0, %g7
4919+
4920+#ifdef CONFIG_PAX_REFCOUNT
4921+ tvs %xcc, 6
4922+#endif
4923+
4924 casx [%o1], %g1, %g7
4925 cmp %g1, %g7
4926 bne,pn %xcc, 2f
4927@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o
4928 2: BACKOFF_SPIN(%o2, %o3, 1b)
4929 .size atomic64_add_ret, .-atomic64_add_ret
4930
4931+ .globl atomic64_add_ret_unchecked
4932+ .type atomic64_add_ret_unchecked,#function
4933+atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */
4934+ BACKOFF_SETUP(%o2)
4935+1: ldx [%o1], %g1
4936+ addcc %g1, %o0, %g7
4937+ casx [%o1], %g1, %g7
4938+ cmp %g1, %g7
4939+ bne,pn %xcc, 2f
4940+ add %g7, %o0, %g7
4941+ mov %g7, %o0
4942+ retl
4943+ nop
4944+2: BACKOFF_SPIN(%o2, %o3, 1b)
4945+ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked
4946+
4947 .globl atomic64_sub_ret
4948 .type atomic64_sub_ret,#function
4949 atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */
4950 BACKOFF_SETUP(%o2)
4951 1: ldx [%o1], %g1
4952- sub %g1, %o0, %g7
4953+ subcc %g1, %o0, %g7
4954+
4955+#ifdef CONFIG_PAX_REFCOUNT
4956+ tvs %xcc, 6
4957+#endif
4958+
4959 casx [%o1], %g1, %g7
4960 cmp %g1, %g7
4961 bne,pn %xcc, 2f
4962diff -urNp linux-2.6.32.43/arch/sparc/lib/ksyms.c linux-2.6.32.43/arch/sparc/lib/ksyms.c
4963--- linux-2.6.32.43/arch/sparc/lib/ksyms.c 2011-03-27 14:31:47.000000000 -0400
4964+++ linux-2.6.32.43/arch/sparc/lib/ksyms.c 2011-04-17 15:56:46.000000000 -0400
4965@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write);
4966
4967 /* Atomic counter implementation. */
4968 EXPORT_SYMBOL(atomic_add);
4969+EXPORT_SYMBOL(atomic_add_unchecked);
4970 EXPORT_SYMBOL(atomic_add_ret);
4971 EXPORT_SYMBOL(atomic_sub);
4972+EXPORT_SYMBOL(atomic_sub_unchecked);
4973 EXPORT_SYMBOL(atomic_sub_ret);
4974 EXPORT_SYMBOL(atomic64_add);
4975+EXPORT_SYMBOL(atomic64_add_unchecked);
4976 EXPORT_SYMBOL(atomic64_add_ret);
4977+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
4978 EXPORT_SYMBOL(atomic64_sub);
4979+EXPORT_SYMBOL(atomic64_sub_unchecked);
4980 EXPORT_SYMBOL(atomic64_sub_ret);
4981
4982 /* Atomic bit operations. */
4983diff -urNp linux-2.6.32.43/arch/sparc/lib/Makefile linux-2.6.32.43/arch/sparc/lib/Makefile
4984--- linux-2.6.32.43/arch/sparc/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
4985+++ linux-2.6.32.43/arch/sparc/lib/Makefile 2011-05-17 19:26:34.000000000 -0400
4986@@ -2,7 +2,7 @@
4987 #
4988
4989 asflags-y := -ansi -DST_DIV0=0x02
4990-ccflags-y := -Werror
4991+#ccflags-y := -Werror
4992
4993 lib-$(CONFIG_SPARC32) += mul.o rem.o sdiv.o udiv.o umul.o urem.o ashrdi3.o
4994 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
4995diff -urNp linux-2.6.32.43/arch/sparc/lib/rwsem_64.S linux-2.6.32.43/arch/sparc/lib/rwsem_64.S
4996--- linux-2.6.32.43/arch/sparc/lib/rwsem_64.S 2011-03-27 14:31:47.000000000 -0400
4997+++ linux-2.6.32.43/arch/sparc/lib/rwsem_64.S 2011-04-17 15:56:46.000000000 -0400
4998@@ -11,7 +11,12 @@
4999 .globl __down_read
5000 __down_read:
5001 1: lduw [%o0], %g1
5002- add %g1, 1, %g7
5003+ addcc %g1, 1, %g7
5004+
5005+#ifdef CONFIG_PAX_REFCOUNT
5006+ tvs %icc, 6
5007+#endif
5008+
5009 cas [%o0], %g1, %g7
5010 cmp %g1, %g7
5011 bne,pn %icc, 1b
5012@@ -33,7 +38,12 @@ __down_read:
5013 .globl __down_read_trylock
5014 __down_read_trylock:
5015 1: lduw [%o0], %g1
5016- add %g1, 1, %g7
5017+ addcc %g1, 1, %g7
5018+
5019+#ifdef CONFIG_PAX_REFCOUNT
5020+ tvs %icc, 6
5021+#endif
5022+
5023 cmp %g7, 0
5024 bl,pn %icc, 2f
5025 mov 0, %o1
5026@@ -51,7 +61,12 @@ __down_write:
5027 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5028 1:
5029 lduw [%o0], %g3
5030- add %g3, %g1, %g7
5031+ addcc %g3, %g1, %g7
5032+
5033+#ifdef CONFIG_PAX_REFCOUNT
5034+ tvs %icc, 6
5035+#endif
5036+
5037 cas [%o0], %g3, %g7
5038 cmp %g3, %g7
5039 bne,pn %icc, 1b
5040@@ -77,7 +92,12 @@ __down_write_trylock:
5041 cmp %g3, 0
5042 bne,pn %icc, 2f
5043 mov 0, %o1
5044- add %g3, %g1, %g7
5045+ addcc %g3, %g1, %g7
5046+
5047+#ifdef CONFIG_PAX_REFCOUNT
5048+ tvs %icc, 6
5049+#endif
5050+
5051 cas [%o0], %g3, %g7
5052 cmp %g3, %g7
5053 bne,pn %icc, 1b
5054@@ -90,7 +110,12 @@ __down_write_trylock:
5055 __up_read:
5056 1:
5057 lduw [%o0], %g1
5058- sub %g1, 1, %g7
5059+ subcc %g1, 1, %g7
5060+
5061+#ifdef CONFIG_PAX_REFCOUNT
5062+ tvs %icc, 6
5063+#endif
5064+
5065 cas [%o0], %g1, %g7
5066 cmp %g1, %g7
5067 bne,pn %icc, 1b
5068@@ -118,7 +143,12 @@ __up_write:
5069 or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1
5070 1:
5071 lduw [%o0], %g3
5072- sub %g3, %g1, %g7
5073+ subcc %g3, %g1, %g7
5074+
5075+#ifdef CONFIG_PAX_REFCOUNT
5076+ tvs %icc, 6
5077+#endif
5078+
5079 cas [%o0], %g3, %g7
5080 cmp %g3, %g7
5081 bne,pn %icc, 1b
5082@@ -143,7 +173,12 @@ __downgrade_write:
5083 or %g1, %lo(RWSEM_WAITING_BIAS), %g1
5084 1:
5085 lduw [%o0], %g3
5086- sub %g3, %g1, %g7
5087+ subcc %g3, %g1, %g7
5088+
5089+#ifdef CONFIG_PAX_REFCOUNT
5090+ tvs %icc, 6
5091+#endif
5092+
5093 cas [%o0], %g3, %g7
5094 cmp %g3, %g7
5095 bne,pn %icc, 1b
5096diff -urNp linux-2.6.32.43/arch/sparc/Makefile linux-2.6.32.43/arch/sparc/Makefile
5097--- linux-2.6.32.43/arch/sparc/Makefile 2011-03-27 14:31:47.000000000 -0400
5098+++ linux-2.6.32.43/arch/sparc/Makefile 2011-04-17 15:56:46.000000000 -0400
5099@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc
5100 # Export what is needed by arch/sparc/boot/Makefile
5101 export VMLINUX_INIT VMLINUX_MAIN
5102 VMLINUX_INIT := $(head-y) $(init-y)
5103-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/
5104+VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
5105 VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y)
5106 VMLINUX_MAIN += $(drivers-y) $(net-y)
5107
5108diff -urNp linux-2.6.32.43/arch/sparc/mm/fault_32.c linux-2.6.32.43/arch/sparc/mm/fault_32.c
5109--- linux-2.6.32.43/arch/sparc/mm/fault_32.c 2011-03-27 14:31:47.000000000 -0400
5110+++ linux-2.6.32.43/arch/sparc/mm/fault_32.c 2011-04-17 15:56:46.000000000 -0400
5111@@ -21,6 +21,9 @@
5112 #include <linux/interrupt.h>
5113 #include <linux/module.h>
5114 #include <linux/kdebug.h>
5115+#include <linux/slab.h>
5116+#include <linux/pagemap.h>
5117+#include <linux/compiler.h>
5118
5119 #include <asm/system.h>
5120 #include <asm/page.h>
5121@@ -167,6 +170,267 @@ static unsigned long compute_si_addr(str
5122 return safe_compute_effective_address(regs, insn);
5123 }
5124
5125+#ifdef CONFIG_PAX_PAGEEXEC
5126+#ifdef CONFIG_PAX_DLRESOLVE
5127+static void pax_emuplt_close(struct vm_area_struct *vma)
5128+{
5129+ vma->vm_mm->call_dl_resolve = 0UL;
5130+}
5131+
5132+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5133+{
5134+ unsigned int *kaddr;
5135+
5136+ vmf->page = alloc_page(GFP_HIGHUSER);
5137+ if (!vmf->page)
5138+ return VM_FAULT_OOM;
5139+
5140+ kaddr = kmap(vmf->page);
5141+ memset(kaddr, 0, PAGE_SIZE);
5142+ kaddr[0] = 0x9DE3BFA8U; /* save */
5143+ flush_dcache_page(vmf->page);
5144+ kunmap(vmf->page);
5145+ return VM_FAULT_MAJOR;
5146+}
5147+
5148+static const struct vm_operations_struct pax_vm_ops = {
5149+ .close = pax_emuplt_close,
5150+ .fault = pax_emuplt_fault
5151+};
5152+
5153+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5154+{
5155+ int ret;
5156+
5157+ vma->vm_mm = current->mm;
5158+ vma->vm_start = addr;
5159+ vma->vm_end = addr + PAGE_SIZE;
5160+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5161+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5162+ vma->vm_ops = &pax_vm_ops;
5163+
5164+ ret = insert_vm_struct(current->mm, vma);
5165+ if (ret)
5166+ return ret;
5167+
5168+ ++current->mm->total_vm;
5169+ return 0;
5170+}
5171+#endif
5172+
5173+/*
5174+ * PaX: decide what to do with offenders (regs->pc = fault address)
5175+ *
5176+ * returns 1 when task should be killed
5177+ * 2 when patched PLT trampoline was detected
5178+ * 3 when unpatched PLT trampoline was detected
5179+ */
5180+static int pax_handle_fetch_fault(struct pt_regs *regs)
5181+{
5182+
5183+#ifdef CONFIG_PAX_EMUPLT
5184+ int err;
5185+
5186+ do { /* PaX: patched PLT emulation #1 */
5187+ unsigned int sethi1, sethi2, jmpl;
5188+
5189+ err = get_user(sethi1, (unsigned int *)regs->pc);
5190+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
5191+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
5192+
5193+ if (err)
5194+ break;
5195+
5196+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5197+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5198+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5199+ {
5200+ unsigned int addr;
5201+
5202+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5203+ addr = regs->u_regs[UREG_G1];
5204+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5205+ regs->pc = addr;
5206+ regs->npc = addr+4;
5207+ return 2;
5208+ }
5209+ } while (0);
5210+
5211+ { /* PaX: patched PLT emulation #2 */
5212+ unsigned int ba;
5213+
5214+ err = get_user(ba, (unsigned int *)regs->pc);
5215+
5216+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5217+ unsigned int addr;
5218+
5219+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5220+ regs->pc = addr;
5221+ regs->npc = addr+4;
5222+ return 2;
5223+ }
5224+ }
5225+
5226+ do { /* PaX: patched PLT emulation #3 */
5227+ unsigned int sethi, jmpl, nop;
5228+
5229+ err = get_user(sethi, (unsigned int *)regs->pc);
5230+ err |= get_user(jmpl, (unsigned int *)(regs->pc+4));
5231+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5232+
5233+ if (err)
5234+ break;
5235+
5236+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5237+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5238+ nop == 0x01000000U)
5239+ {
5240+ unsigned int addr;
5241+
5242+ addr = (sethi & 0x003FFFFFU) << 10;
5243+ regs->u_regs[UREG_G1] = addr;
5244+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5245+ regs->pc = addr;
5246+ regs->npc = addr+4;
5247+ return 2;
5248+ }
5249+ } while (0);
5250+
5251+ do { /* PaX: unpatched PLT emulation step 1 */
5252+ unsigned int sethi, ba, nop;
5253+
5254+ err = get_user(sethi, (unsigned int *)regs->pc);
5255+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
5256+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
5257+
5258+ if (err)
5259+ break;
5260+
5261+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5262+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5263+ nop == 0x01000000U)
5264+ {
5265+ unsigned int addr, save, call;
5266+
5267+ if ((ba & 0xFFC00000U) == 0x30800000U)
5268+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
5269+ else
5270+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
5271+
5272+ err = get_user(save, (unsigned int *)addr);
5273+ err |= get_user(call, (unsigned int *)(addr+4));
5274+ err |= get_user(nop, (unsigned int *)(addr+8));
5275+ if (err)
5276+ break;
5277+
5278+#ifdef CONFIG_PAX_DLRESOLVE
5279+ if (save == 0x9DE3BFA8U &&
5280+ (call & 0xC0000000U) == 0x40000000U &&
5281+ nop == 0x01000000U)
5282+ {
5283+ struct vm_area_struct *vma;
5284+ unsigned long call_dl_resolve;
5285+
5286+ down_read(&current->mm->mmap_sem);
5287+ call_dl_resolve = current->mm->call_dl_resolve;
5288+ up_read(&current->mm->mmap_sem);
5289+ if (likely(call_dl_resolve))
5290+ goto emulate;
5291+
5292+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5293+
5294+ down_write(&current->mm->mmap_sem);
5295+ if (current->mm->call_dl_resolve) {
5296+ call_dl_resolve = current->mm->call_dl_resolve;
5297+ up_write(&current->mm->mmap_sem);
5298+ if (vma)
5299+ kmem_cache_free(vm_area_cachep, vma);
5300+ goto emulate;
5301+ }
5302+
5303+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5304+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5305+ up_write(&current->mm->mmap_sem);
5306+ if (vma)
5307+ kmem_cache_free(vm_area_cachep, vma);
5308+ return 1;
5309+ }
5310+
5311+ if (pax_insert_vma(vma, call_dl_resolve)) {
5312+ up_write(&current->mm->mmap_sem);
5313+ kmem_cache_free(vm_area_cachep, vma);
5314+ return 1;
5315+ }
5316+
5317+ current->mm->call_dl_resolve = call_dl_resolve;
5318+ up_write(&current->mm->mmap_sem);
5319+
5320+emulate:
5321+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5322+ regs->pc = call_dl_resolve;
5323+ regs->npc = addr+4;
5324+ return 3;
5325+ }
5326+#endif
5327+
5328+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5329+ if ((save & 0xFFC00000U) == 0x05000000U &&
5330+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5331+ nop == 0x01000000U)
5332+ {
5333+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5334+ regs->u_regs[UREG_G2] = addr + 4;
5335+ addr = (save & 0x003FFFFFU) << 10;
5336+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
5337+ regs->pc = addr;
5338+ regs->npc = addr+4;
5339+ return 3;
5340+ }
5341+ }
5342+ } while (0);
5343+
5344+ do { /* PaX: unpatched PLT emulation step 2 */
5345+ unsigned int save, call, nop;
5346+
5347+ err = get_user(save, (unsigned int *)(regs->pc-4));
5348+ err |= get_user(call, (unsigned int *)regs->pc);
5349+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
5350+ if (err)
5351+ break;
5352+
5353+ if (save == 0x9DE3BFA8U &&
5354+ (call & 0xC0000000U) == 0x40000000U &&
5355+ nop == 0x01000000U)
5356+ {
5357+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
5358+
5359+ regs->u_regs[UREG_RETPC] = regs->pc;
5360+ regs->pc = dl_resolve;
5361+ regs->npc = dl_resolve+4;
5362+ return 3;
5363+ }
5364+ } while (0);
5365+#endif
5366+
5367+ return 1;
5368+}
5369+
5370+void pax_report_insns(void *pc, void *sp)
5371+{
5372+ unsigned long i;
5373+
5374+ printk(KERN_ERR "PAX: bytes at PC: ");
5375+ for (i = 0; i < 8; i++) {
5376+ unsigned int c;
5377+ if (get_user(c, (unsigned int *)pc+i))
5378+ printk(KERN_CONT "???????? ");
5379+ else
5380+ printk(KERN_CONT "%08x ", c);
5381+ }
5382+ printk("\n");
5383+}
5384+#endif
5385+
5386 asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
5387 unsigned long address)
5388 {
5389@@ -231,6 +495,24 @@ good_area:
5390 if(!(vma->vm_flags & VM_WRITE))
5391 goto bad_area;
5392 } else {
5393+
5394+#ifdef CONFIG_PAX_PAGEEXEC
5395+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
5396+ up_read(&mm->mmap_sem);
5397+ switch (pax_handle_fetch_fault(regs)) {
5398+
5399+#ifdef CONFIG_PAX_EMUPLT
5400+ case 2:
5401+ case 3:
5402+ return;
5403+#endif
5404+
5405+ }
5406+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
5407+ do_group_exit(SIGKILL);
5408+ }
5409+#endif
5410+
5411 /* Allow reads even for write-only mappings */
5412 if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
5413 goto bad_area;
5414diff -urNp linux-2.6.32.43/arch/sparc/mm/fault_64.c linux-2.6.32.43/arch/sparc/mm/fault_64.c
5415--- linux-2.6.32.43/arch/sparc/mm/fault_64.c 2011-03-27 14:31:47.000000000 -0400
5416+++ linux-2.6.32.43/arch/sparc/mm/fault_64.c 2011-04-17 15:56:46.000000000 -0400
5417@@ -20,6 +20,9 @@
5418 #include <linux/kprobes.h>
5419 #include <linux/kdebug.h>
5420 #include <linux/percpu.h>
5421+#include <linux/slab.h>
5422+#include <linux/pagemap.h>
5423+#include <linux/compiler.h>
5424
5425 #include <asm/page.h>
5426 #include <asm/pgtable.h>
5427@@ -78,7 +81,7 @@ static void bad_kernel_pc(struct pt_regs
5428 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
5429 regs->tpc);
5430 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
5431- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
5432+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
5433 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
5434 dump_stack();
5435 unhandled_fault(regs->tpc, current, regs);
5436@@ -249,6 +252,456 @@ static void noinline bogus_32bit_fault_a
5437 show_regs(regs);
5438 }
5439
5440+#ifdef CONFIG_PAX_PAGEEXEC
5441+#ifdef CONFIG_PAX_DLRESOLVE
5442+static void pax_emuplt_close(struct vm_area_struct *vma)
5443+{
5444+ vma->vm_mm->call_dl_resolve = 0UL;
5445+}
5446+
5447+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
5448+{
5449+ unsigned int *kaddr;
5450+
5451+ vmf->page = alloc_page(GFP_HIGHUSER);
5452+ if (!vmf->page)
5453+ return VM_FAULT_OOM;
5454+
5455+ kaddr = kmap(vmf->page);
5456+ memset(kaddr, 0, PAGE_SIZE);
5457+ kaddr[0] = 0x9DE3BFA8U; /* save */
5458+ flush_dcache_page(vmf->page);
5459+ kunmap(vmf->page);
5460+ return VM_FAULT_MAJOR;
5461+}
5462+
5463+static const struct vm_operations_struct pax_vm_ops = {
5464+ .close = pax_emuplt_close,
5465+ .fault = pax_emuplt_fault
5466+};
5467+
5468+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
5469+{
5470+ int ret;
5471+
5472+ vma->vm_mm = current->mm;
5473+ vma->vm_start = addr;
5474+ vma->vm_end = addr + PAGE_SIZE;
5475+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
5476+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5477+ vma->vm_ops = &pax_vm_ops;
5478+
5479+ ret = insert_vm_struct(current->mm, vma);
5480+ if (ret)
5481+ return ret;
5482+
5483+ ++current->mm->total_vm;
5484+ return 0;
5485+}
5486+#endif
5487+
5488+/*
5489+ * PaX: decide what to do with offenders (regs->tpc = fault address)
5490+ *
5491+ * returns 1 when task should be killed
5492+ * 2 when patched PLT trampoline was detected
5493+ * 3 when unpatched PLT trampoline was detected
5494+ */
5495+static int pax_handle_fetch_fault(struct pt_regs *regs)
5496+{
5497+
5498+#ifdef CONFIG_PAX_EMUPLT
5499+ int err;
5500+
5501+ do { /* PaX: patched PLT emulation #1 */
5502+ unsigned int sethi1, sethi2, jmpl;
5503+
5504+ err = get_user(sethi1, (unsigned int *)regs->tpc);
5505+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
5506+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
5507+
5508+ if (err)
5509+ break;
5510+
5511+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
5512+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
5513+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
5514+ {
5515+ unsigned long addr;
5516+
5517+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
5518+ addr = regs->u_regs[UREG_G1];
5519+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5520+
5521+ if (test_thread_flag(TIF_32BIT))
5522+ addr &= 0xFFFFFFFFUL;
5523+
5524+ regs->tpc = addr;
5525+ regs->tnpc = addr+4;
5526+ return 2;
5527+ }
5528+ } while (0);
5529+
5530+ { /* PaX: patched PLT emulation #2 */
5531+ unsigned int ba;
5532+
5533+ err = get_user(ba, (unsigned int *)regs->tpc);
5534+
5535+ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
5536+ unsigned long addr;
5537+
5538+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5539+
5540+ if (test_thread_flag(TIF_32BIT))
5541+ addr &= 0xFFFFFFFFUL;
5542+
5543+ regs->tpc = addr;
5544+ regs->tnpc = addr+4;
5545+ return 2;
5546+ }
5547+ }
5548+
5549+ do { /* PaX: patched PLT emulation #3 */
5550+ unsigned int sethi, jmpl, nop;
5551+
5552+ err = get_user(sethi, (unsigned int *)regs->tpc);
5553+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4));
5554+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5555+
5556+ if (err)
5557+ break;
5558+
5559+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5560+ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
5561+ nop == 0x01000000U)
5562+ {
5563+ unsigned long addr;
5564+
5565+ addr = (sethi & 0x003FFFFFU) << 10;
5566+ regs->u_regs[UREG_G1] = addr;
5567+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5568+
5569+ if (test_thread_flag(TIF_32BIT))
5570+ addr &= 0xFFFFFFFFUL;
5571+
5572+ regs->tpc = addr;
5573+ regs->tnpc = addr+4;
5574+ return 2;
5575+ }
5576+ } while (0);
5577+
5578+ do { /* PaX: patched PLT emulation #4 */
5579+ unsigned int sethi, mov1, call, mov2;
5580+
5581+ err = get_user(sethi, (unsigned int *)regs->tpc);
5582+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
5583+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
5584+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
5585+
5586+ if (err)
5587+ break;
5588+
5589+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5590+ mov1 == 0x8210000FU &&
5591+ (call & 0xC0000000U) == 0x40000000U &&
5592+ mov2 == 0x9E100001U)
5593+ {
5594+ unsigned long addr;
5595+
5596+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
5597+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5598+
5599+ if (test_thread_flag(TIF_32BIT))
5600+ addr &= 0xFFFFFFFFUL;
5601+
5602+ regs->tpc = addr;
5603+ regs->tnpc = addr+4;
5604+ return 2;
5605+ }
5606+ } while (0);
5607+
5608+ do { /* PaX: patched PLT emulation #5 */
5609+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
5610+
5611+ err = get_user(sethi, (unsigned int *)regs->tpc);
5612+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5613+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5614+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
5615+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
5616+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
5617+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
5618+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
5619+
5620+ if (err)
5621+ break;
5622+
5623+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5624+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5625+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5626+ (or1 & 0xFFFFE000U) == 0x82106000U &&
5627+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5628+ sllx == 0x83287020U &&
5629+ jmpl == 0x81C04005U &&
5630+ nop == 0x01000000U)
5631+ {
5632+ unsigned long addr;
5633+
5634+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5635+ regs->u_regs[UREG_G1] <<= 32;
5636+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5637+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5638+ regs->tpc = addr;
5639+ regs->tnpc = addr+4;
5640+ return 2;
5641+ }
5642+ } while (0);
5643+
5644+ do { /* PaX: patched PLT emulation #6 */
5645+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
5646+
5647+ err = get_user(sethi, (unsigned int *)regs->tpc);
5648+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
5649+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
5650+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
5651+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
5652+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
5653+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
5654+
5655+ if (err)
5656+ break;
5657+
5658+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5659+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
5660+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5661+ sllx == 0x83287020U &&
5662+ (or & 0xFFFFE000U) == 0x8A116000U &&
5663+ jmpl == 0x81C04005U &&
5664+ nop == 0x01000000U)
5665+ {
5666+ unsigned long addr;
5667+
5668+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
5669+ regs->u_regs[UREG_G1] <<= 32;
5670+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
5671+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
5672+ regs->tpc = addr;
5673+ regs->tnpc = addr+4;
5674+ return 2;
5675+ }
5676+ } while (0);
5677+
5678+ do { /* PaX: unpatched PLT emulation step 1 */
5679+ unsigned int sethi, ba, nop;
5680+
5681+ err = get_user(sethi, (unsigned int *)regs->tpc);
5682+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5683+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5684+
5685+ if (err)
5686+ break;
5687+
5688+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5689+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
5690+ nop == 0x01000000U)
5691+ {
5692+ unsigned long addr;
5693+ unsigned int save, call;
5694+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
5695+
5696+ if ((ba & 0xFFC00000U) == 0x30800000U)
5697+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
5698+ else
5699+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5700+
5701+ if (test_thread_flag(TIF_32BIT))
5702+ addr &= 0xFFFFFFFFUL;
5703+
5704+ err = get_user(save, (unsigned int *)addr);
5705+ err |= get_user(call, (unsigned int *)(addr+4));
5706+ err |= get_user(nop, (unsigned int *)(addr+8));
5707+ if (err)
5708+ break;
5709+
5710+#ifdef CONFIG_PAX_DLRESOLVE
5711+ if (save == 0x9DE3BFA8U &&
5712+ (call & 0xC0000000U) == 0x40000000U &&
5713+ nop == 0x01000000U)
5714+ {
5715+ struct vm_area_struct *vma;
5716+ unsigned long call_dl_resolve;
5717+
5718+ down_read(&current->mm->mmap_sem);
5719+ call_dl_resolve = current->mm->call_dl_resolve;
5720+ up_read(&current->mm->mmap_sem);
5721+ if (likely(call_dl_resolve))
5722+ goto emulate;
5723+
5724+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
5725+
5726+ down_write(&current->mm->mmap_sem);
5727+ if (current->mm->call_dl_resolve) {
5728+ call_dl_resolve = current->mm->call_dl_resolve;
5729+ up_write(&current->mm->mmap_sem);
5730+ if (vma)
5731+ kmem_cache_free(vm_area_cachep, vma);
5732+ goto emulate;
5733+ }
5734+
5735+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
5736+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
5737+ up_write(&current->mm->mmap_sem);
5738+ if (vma)
5739+ kmem_cache_free(vm_area_cachep, vma);
5740+ return 1;
5741+ }
5742+
5743+ if (pax_insert_vma(vma, call_dl_resolve)) {
5744+ up_write(&current->mm->mmap_sem);
5745+ kmem_cache_free(vm_area_cachep, vma);
5746+ return 1;
5747+ }
5748+
5749+ current->mm->call_dl_resolve = call_dl_resolve;
5750+ up_write(&current->mm->mmap_sem);
5751+
5752+emulate:
5753+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5754+ regs->tpc = call_dl_resolve;
5755+ regs->tnpc = addr+4;
5756+ return 3;
5757+ }
5758+#endif
5759+
5760+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
5761+ if ((save & 0xFFC00000U) == 0x05000000U &&
5762+ (call & 0xFFFFE000U) == 0x85C0A000U &&
5763+ nop == 0x01000000U)
5764+ {
5765+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5766+ regs->u_regs[UREG_G2] = addr + 4;
5767+ addr = (save & 0x003FFFFFU) << 10;
5768+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
5769+
5770+ if (test_thread_flag(TIF_32BIT))
5771+ addr &= 0xFFFFFFFFUL;
5772+
5773+ regs->tpc = addr;
5774+ regs->tnpc = addr+4;
5775+ return 3;
5776+ }
5777+
5778+ /* PaX: 64-bit PLT stub */
5779+ err = get_user(sethi1, (unsigned int *)addr);
5780+ err |= get_user(sethi2, (unsigned int *)(addr+4));
5781+ err |= get_user(or1, (unsigned int *)(addr+8));
5782+ err |= get_user(or2, (unsigned int *)(addr+12));
5783+ err |= get_user(sllx, (unsigned int *)(addr+16));
5784+ err |= get_user(add, (unsigned int *)(addr+20));
5785+ err |= get_user(jmpl, (unsigned int *)(addr+24));
5786+ err |= get_user(nop, (unsigned int *)(addr+28));
5787+ if (err)
5788+ break;
5789+
5790+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
5791+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
5792+ (or1 & 0xFFFFE000U) == 0x88112000U &&
5793+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
5794+ sllx == 0x89293020U &&
5795+ add == 0x8A010005U &&
5796+ jmpl == 0x89C14000U &&
5797+ nop == 0x01000000U)
5798+ {
5799+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
5800+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
5801+ regs->u_regs[UREG_G4] <<= 32;
5802+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
5803+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
5804+ regs->u_regs[UREG_G4] = addr + 24;
5805+ addr = regs->u_regs[UREG_G5];
5806+ regs->tpc = addr;
5807+ regs->tnpc = addr+4;
5808+ return 3;
5809+ }
5810+ }
5811+ } while (0);
5812+
5813+#ifdef CONFIG_PAX_DLRESOLVE
5814+ do { /* PaX: unpatched PLT emulation step 2 */
5815+ unsigned int save, call, nop;
5816+
5817+ err = get_user(save, (unsigned int *)(regs->tpc-4));
5818+ err |= get_user(call, (unsigned int *)regs->tpc);
5819+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
5820+ if (err)
5821+ break;
5822+
5823+ if (save == 0x9DE3BFA8U &&
5824+ (call & 0xC0000000U) == 0x40000000U &&
5825+ nop == 0x01000000U)
5826+ {
5827+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
5828+
5829+ if (test_thread_flag(TIF_32BIT))
5830+ dl_resolve &= 0xFFFFFFFFUL;
5831+
5832+ regs->u_regs[UREG_RETPC] = regs->tpc;
5833+ regs->tpc = dl_resolve;
5834+ regs->tnpc = dl_resolve+4;
5835+ return 3;
5836+ }
5837+ } while (0);
5838+#endif
5839+
5840+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
5841+ unsigned int sethi, ba, nop;
5842+
5843+ err = get_user(sethi, (unsigned int *)regs->tpc);
5844+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
5845+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
5846+
5847+ if (err)
5848+ break;
5849+
5850+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
5851+ (ba & 0xFFF00000U) == 0x30600000U &&
5852+ nop == 0x01000000U)
5853+ {
5854+ unsigned long addr;
5855+
5856+ addr = (sethi & 0x003FFFFFU) << 10;
5857+ regs->u_regs[UREG_G1] = addr;
5858+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
5859+
5860+ if (test_thread_flag(TIF_32BIT))
5861+ addr &= 0xFFFFFFFFUL;
5862+
5863+ regs->tpc = addr;
5864+ regs->tnpc = addr+4;
5865+ return 2;
5866+ }
5867+ } while (0);
5868+
5869+#endif
5870+
5871+ return 1;
5872+}
5873+
5874+void pax_report_insns(void *pc, void *sp)
5875+{
5876+ unsigned long i;
5877+
5878+ printk(KERN_ERR "PAX: bytes at PC: ");
5879+ for (i = 0; i < 8; i++) {
5880+ unsigned int c;
5881+ if (get_user(c, (unsigned int *)pc+i))
5882+ printk(KERN_CONT "???????? ");
5883+ else
5884+ printk(KERN_CONT "%08x ", c);
5885+ }
5886+ printk("\n");
5887+}
5888+#endif
5889+
5890 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
5891 {
5892 struct mm_struct *mm = current->mm;
5893@@ -315,6 +768,29 @@ asmlinkage void __kprobes do_sparc64_fau
5894 if (!vma)
5895 goto bad_area;
5896
5897+#ifdef CONFIG_PAX_PAGEEXEC
5898+ /* PaX: detect ITLB misses on non-exec pages */
5899+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
5900+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
5901+ {
5902+ if (address != regs->tpc)
5903+ goto good_area;
5904+
5905+ up_read(&mm->mmap_sem);
5906+ switch (pax_handle_fetch_fault(regs)) {
5907+
5908+#ifdef CONFIG_PAX_EMUPLT
5909+ case 2:
5910+ case 3:
5911+ return;
5912+#endif
5913+
5914+ }
5915+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
5916+ do_group_exit(SIGKILL);
5917+ }
5918+#endif
5919+
5920 /* Pure DTLB misses do not tell us whether the fault causing
5921 * load/store/atomic was a write or not, it only says that there
5922 * was no match. So in such a case we (carefully) read the
5923diff -urNp linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c
5924--- linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
5925+++ linux-2.6.32.43/arch/sparc/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
5926@@ -69,7 +69,7 @@ full_search:
5927 }
5928 return -ENOMEM;
5929 }
5930- if (likely(!vma || addr + len <= vma->vm_start)) {
5931+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5932 /*
5933 * Remember the place where we stopped the search:
5934 */
5935@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct
5936 /* make sure it can fit in the remaining address space */
5937 if (likely(addr > len)) {
5938 vma = find_vma(mm, addr-len);
5939- if (!vma || addr <= vma->vm_start) {
5940+ if (check_heap_stack_gap(vma, addr - len, len)) {
5941 /* remember the address as a hint for next time */
5942 return (mm->free_area_cache = addr-len);
5943 }
5944@@ -117,16 +117,17 @@ hugetlb_get_unmapped_area_topdown(struct
5945 if (unlikely(mm->mmap_base < len))
5946 goto bottomup;
5947
5948- addr = (mm->mmap_base-len) & HPAGE_MASK;
5949+ addr = mm->mmap_base - len;
5950
5951 do {
5952+ addr &= HPAGE_MASK;
5953 /*
5954 * Lookup failure means no vma is above this address,
5955 * else if new region fits below vma->vm_start,
5956 * return with success:
5957 */
5958 vma = find_vma(mm, addr);
5959- if (likely(!vma || addr+len <= vma->vm_start)) {
5960+ if (likely(check_heap_stack_gap(vma, addr, len))) {
5961 /* remember the address as a hint for next time */
5962 return (mm->free_area_cache = addr);
5963 }
5964@@ -136,8 +137,8 @@ hugetlb_get_unmapped_area_topdown(struct
5965 mm->cached_hole_size = vma->vm_start - addr;
5966
5967 /* try just below the current vma->vm_start */
5968- addr = (vma->vm_start-len) & HPAGE_MASK;
5969- } while (likely(len < vma->vm_start));
5970+ addr = skip_heap_stack_gap(vma, len);
5971+ } while (!IS_ERR_VALUE(addr));
5972
5973 bottomup:
5974 /*
5975@@ -183,8 +184,7 @@ hugetlb_get_unmapped_area(struct file *f
5976 if (addr) {
5977 addr = ALIGN(addr, HPAGE_SIZE);
5978 vma = find_vma(mm, addr);
5979- if (task_size - len >= addr &&
5980- (!vma || addr + len <= vma->vm_start))
5981+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
5982 return addr;
5983 }
5984 if (mm->get_unmapped_area == arch_get_unmapped_area)
5985diff -urNp linux-2.6.32.43/arch/sparc/mm/init_32.c linux-2.6.32.43/arch/sparc/mm/init_32.c
5986--- linux-2.6.32.43/arch/sparc/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
5987+++ linux-2.6.32.43/arch/sparc/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
5988@@ -317,6 +317,9 @@ extern void device_scan(void);
5989 pgprot_t PAGE_SHARED __read_mostly;
5990 EXPORT_SYMBOL(PAGE_SHARED);
5991
5992+pgprot_t PAGE_SHARED_NOEXEC __read_mostly;
5993+EXPORT_SYMBOL(PAGE_SHARED_NOEXEC);
5994+
5995 void __init paging_init(void)
5996 {
5997 switch(sparc_cpu_model) {
5998@@ -345,17 +348,17 @@ void __init paging_init(void)
5999
6000 /* Initialize the protection map with non-constant, MMU dependent values. */
6001 protection_map[0] = PAGE_NONE;
6002- protection_map[1] = PAGE_READONLY;
6003- protection_map[2] = PAGE_COPY;
6004- protection_map[3] = PAGE_COPY;
6005+ protection_map[1] = PAGE_READONLY_NOEXEC;
6006+ protection_map[2] = PAGE_COPY_NOEXEC;
6007+ protection_map[3] = PAGE_COPY_NOEXEC;
6008 protection_map[4] = PAGE_READONLY;
6009 protection_map[5] = PAGE_READONLY;
6010 protection_map[6] = PAGE_COPY;
6011 protection_map[7] = PAGE_COPY;
6012 protection_map[8] = PAGE_NONE;
6013- protection_map[9] = PAGE_READONLY;
6014- protection_map[10] = PAGE_SHARED;
6015- protection_map[11] = PAGE_SHARED;
6016+ protection_map[9] = PAGE_READONLY_NOEXEC;
6017+ protection_map[10] = PAGE_SHARED_NOEXEC;
6018+ protection_map[11] = PAGE_SHARED_NOEXEC;
6019 protection_map[12] = PAGE_READONLY;
6020 protection_map[13] = PAGE_READONLY;
6021 protection_map[14] = PAGE_SHARED;
6022diff -urNp linux-2.6.32.43/arch/sparc/mm/Makefile linux-2.6.32.43/arch/sparc/mm/Makefile
6023--- linux-2.6.32.43/arch/sparc/mm/Makefile 2011-03-27 14:31:47.000000000 -0400
6024+++ linux-2.6.32.43/arch/sparc/mm/Makefile 2011-04-17 15:56:46.000000000 -0400
6025@@ -2,7 +2,7 @@
6026 #
6027
6028 asflags-y := -ansi
6029-ccflags-y := -Werror
6030+#ccflags-y := -Werror
6031
6032 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o
6033 obj-y += fault_$(BITS).o
6034diff -urNp linux-2.6.32.43/arch/sparc/mm/srmmu.c linux-2.6.32.43/arch/sparc/mm/srmmu.c
6035--- linux-2.6.32.43/arch/sparc/mm/srmmu.c 2011-03-27 14:31:47.000000000 -0400
6036+++ linux-2.6.32.43/arch/sparc/mm/srmmu.c 2011-04-17 15:56:46.000000000 -0400
6037@@ -2200,6 +2200,13 @@ void __init ld_mmu_srmmu(void)
6038 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
6039 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
6040 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
6041+
6042+#ifdef CONFIG_PAX_PAGEEXEC
6043+ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC);
6044+ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
6045+ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
6046+#endif
6047+
6048 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
6049 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
6050
6051diff -urNp linux-2.6.32.43/arch/um/include/asm/kmap_types.h linux-2.6.32.43/arch/um/include/asm/kmap_types.h
6052--- linux-2.6.32.43/arch/um/include/asm/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
6053+++ linux-2.6.32.43/arch/um/include/asm/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
6054@@ -23,6 +23,7 @@ enum km_type {
6055 KM_IRQ1,
6056 KM_SOFTIRQ0,
6057 KM_SOFTIRQ1,
6058+ KM_CLEARPAGE,
6059 KM_TYPE_NR
6060 };
6061
6062diff -urNp linux-2.6.32.43/arch/um/include/asm/page.h linux-2.6.32.43/arch/um/include/asm/page.h
6063--- linux-2.6.32.43/arch/um/include/asm/page.h 2011-03-27 14:31:47.000000000 -0400
6064+++ linux-2.6.32.43/arch/um/include/asm/page.h 2011-04-17 15:56:46.000000000 -0400
6065@@ -14,6 +14,9 @@
6066 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
6067 #define PAGE_MASK (~(PAGE_SIZE-1))
6068
6069+#define ktla_ktva(addr) (addr)
6070+#define ktva_ktla(addr) (addr)
6071+
6072 #ifndef __ASSEMBLY__
6073
6074 struct page;
6075diff -urNp linux-2.6.32.43/arch/um/kernel/process.c linux-2.6.32.43/arch/um/kernel/process.c
6076--- linux-2.6.32.43/arch/um/kernel/process.c 2011-03-27 14:31:47.000000000 -0400
6077+++ linux-2.6.32.43/arch/um/kernel/process.c 2011-04-17 15:56:46.000000000 -0400
6078@@ -393,22 +393,6 @@ int singlestepping(void * t)
6079 return 2;
6080 }
6081
6082-/*
6083- * Only x86 and x86_64 have an arch_align_stack().
6084- * All other arches have "#define arch_align_stack(x) (x)"
6085- * in their asm/system.h
6086- * As this is included in UML from asm-um/system-generic.h,
6087- * we can use it to behave as the subarch does.
6088- */
6089-#ifndef arch_align_stack
6090-unsigned long arch_align_stack(unsigned long sp)
6091-{
6092- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6093- sp -= get_random_int() % 8192;
6094- return sp & ~0xf;
6095-}
6096-#endif
6097-
6098 unsigned long get_wchan(struct task_struct *p)
6099 {
6100 unsigned long stack_page, sp, ip;
6101diff -urNp linux-2.6.32.43/arch/um/sys-i386/syscalls.c linux-2.6.32.43/arch/um/sys-i386/syscalls.c
6102--- linux-2.6.32.43/arch/um/sys-i386/syscalls.c 2011-03-27 14:31:47.000000000 -0400
6103+++ linux-2.6.32.43/arch/um/sys-i386/syscalls.c 2011-04-17 15:56:46.000000000 -0400
6104@@ -11,6 +11,21 @@
6105 #include "asm/uaccess.h"
6106 #include "asm/unistd.h"
6107
6108+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
6109+{
6110+ unsigned long pax_task_size = TASK_SIZE;
6111+
6112+#ifdef CONFIG_PAX_SEGMEXEC
6113+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
6114+ pax_task_size = SEGMEXEC_TASK_SIZE;
6115+#endif
6116+
6117+ if (len > pax_task_size || addr > pax_task_size - len)
6118+ return -EINVAL;
6119+
6120+ return 0;
6121+}
6122+
6123 /*
6124 * Perform the select(nd, in, out, ex, tv) and mmap() system
6125 * calls. Linux/i386 didn't use to be able to handle more than
6126diff -urNp linux-2.6.32.43/arch/x86/boot/bitops.h linux-2.6.32.43/arch/x86/boot/bitops.h
6127--- linux-2.6.32.43/arch/x86/boot/bitops.h 2011-03-27 14:31:47.000000000 -0400
6128+++ linux-2.6.32.43/arch/x86/boot/bitops.h 2011-04-17 15:56:46.000000000 -0400
6129@@ -26,7 +26,7 @@ static inline int variable_test_bit(int
6130 u8 v;
6131 const u32 *p = (const u32 *)addr;
6132
6133- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6134+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
6135 return v;
6136 }
6137
6138@@ -37,7 +37,7 @@ static inline int variable_test_bit(int
6139
6140 static inline void set_bit(int nr, void *addr)
6141 {
6142- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6143+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
6144 }
6145
6146 #endif /* BOOT_BITOPS_H */
6147diff -urNp linux-2.6.32.43/arch/x86/boot/boot.h linux-2.6.32.43/arch/x86/boot/boot.h
6148--- linux-2.6.32.43/arch/x86/boot/boot.h 2011-03-27 14:31:47.000000000 -0400
6149+++ linux-2.6.32.43/arch/x86/boot/boot.h 2011-04-17 15:56:46.000000000 -0400
6150@@ -82,7 +82,7 @@ static inline void io_delay(void)
6151 static inline u16 ds(void)
6152 {
6153 u16 seg;
6154- asm("movw %%ds,%0" : "=rm" (seg));
6155+ asm volatile("movw %%ds,%0" : "=rm" (seg));
6156 return seg;
6157 }
6158
6159@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t
6160 static inline int memcmp(const void *s1, const void *s2, size_t len)
6161 {
6162 u8 diff;
6163- asm("repe; cmpsb; setnz %0"
6164+ asm volatile("repe; cmpsb; setnz %0"
6165 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
6166 return diff;
6167 }
6168diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/head_32.S linux-2.6.32.43/arch/x86/boot/compressed/head_32.S
6169--- linux-2.6.32.43/arch/x86/boot/compressed/head_32.S 2011-03-27 14:31:47.000000000 -0400
6170+++ linux-2.6.32.43/arch/x86/boot/compressed/head_32.S 2011-04-17 15:56:46.000000000 -0400
6171@@ -76,7 +76,7 @@ ENTRY(startup_32)
6172 notl %eax
6173 andl %eax, %ebx
6174 #else
6175- movl $LOAD_PHYSICAL_ADDR, %ebx
6176+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6177 #endif
6178
6179 /* Target address to relocate to for decompression */
6180@@ -149,7 +149,7 @@ relocated:
6181 * and where it was actually loaded.
6182 */
6183 movl %ebp, %ebx
6184- subl $LOAD_PHYSICAL_ADDR, %ebx
6185+ subl $____LOAD_PHYSICAL_ADDR, %ebx
6186 jz 2f /* Nothing to be done if loaded at compiled addr. */
6187 /*
6188 * Process relocations.
6189@@ -157,8 +157,7 @@ relocated:
6190
6191 1: subl $4, %edi
6192 movl (%edi), %ecx
6193- testl %ecx, %ecx
6194- jz 2f
6195+ jecxz 2f
6196 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
6197 jmp 1b
6198 2:
6199diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/head_64.S linux-2.6.32.43/arch/x86/boot/compressed/head_64.S
6200--- linux-2.6.32.43/arch/x86/boot/compressed/head_64.S 2011-03-27 14:31:47.000000000 -0400
6201+++ linux-2.6.32.43/arch/x86/boot/compressed/head_64.S 2011-07-01 18:53:00.000000000 -0400
6202@@ -91,7 +91,7 @@ ENTRY(startup_32)
6203 notl %eax
6204 andl %eax, %ebx
6205 #else
6206- movl $LOAD_PHYSICAL_ADDR, %ebx
6207+ movl $____LOAD_PHYSICAL_ADDR, %ebx
6208 #endif
6209
6210 /* Target address to relocate to for decompression */
6211@@ -183,7 +183,7 @@ no_longmode:
6212 hlt
6213 jmp 1b
6214
6215-#include "../../kernel/verify_cpu_64.S"
6216+#include "../../kernel/verify_cpu.S"
6217
6218 /*
6219 * Be careful here startup_64 needs to be at a predictable
6220@@ -234,7 +234,7 @@ ENTRY(startup_64)
6221 notq %rax
6222 andq %rax, %rbp
6223 #else
6224- movq $LOAD_PHYSICAL_ADDR, %rbp
6225+ movq $____LOAD_PHYSICAL_ADDR, %rbp
6226 #endif
6227
6228 /* Target address to relocate to for decompression */
6229diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/Makefile linux-2.6.32.43/arch/x86/boot/compressed/Makefile
6230--- linux-2.6.32.43/arch/x86/boot/compressed/Makefile 2011-03-27 14:31:47.000000000 -0400
6231+++ linux-2.6.32.43/arch/x86/boot/compressed/Makefile 2011-08-07 14:38:34.000000000 -0400
6232@@ -13,6 +13,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=smal
6233 KBUILD_CFLAGS += $(cflags-y)
6234 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
6235 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
6236+ifdef CONSTIFY_PLUGIN
6237+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6238+endif
6239
6240 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6241 GCOV_PROFILE := n
6242diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/misc.c linux-2.6.32.43/arch/x86/boot/compressed/misc.c
6243--- linux-2.6.32.43/arch/x86/boot/compressed/misc.c 2011-03-27 14:31:47.000000000 -0400
6244+++ linux-2.6.32.43/arch/x86/boot/compressed/misc.c 2011-04-17 15:56:46.000000000 -0400
6245@@ -288,7 +288,7 @@ static void parse_elf(void *output)
6246 case PT_LOAD:
6247 #ifdef CONFIG_RELOCATABLE
6248 dest = output;
6249- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
6250+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
6251 #else
6252 dest = (void *)(phdr->p_paddr);
6253 #endif
6254@@ -335,7 +335,7 @@ asmlinkage void decompress_kernel(void *
6255 error("Destination address too large");
6256 #endif
6257 #ifndef CONFIG_RELOCATABLE
6258- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
6259+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
6260 error("Wrong destination address");
6261 #endif
6262
6263diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c
6264--- linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c 2011-03-27 14:31:47.000000000 -0400
6265+++ linux-2.6.32.43/arch/x86/boot/compressed/mkpiggy.c 2011-04-17 15:56:46.000000000 -0400
6266@@ -74,7 +74,7 @@ int main(int argc, char *argv[])
6267
6268 offs = (olen > ilen) ? olen - ilen : 0;
6269 offs += olen >> 12; /* Add 8 bytes for each 32K block */
6270- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */
6271+ offs += 64*1024; /* Add 64K bytes slack */
6272 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
6273
6274 printf(".section \".rodata.compressed\",\"a\",@progbits\n");
6275diff -urNp linux-2.6.32.43/arch/x86/boot/compressed/relocs.c linux-2.6.32.43/arch/x86/boot/compressed/relocs.c
6276--- linux-2.6.32.43/arch/x86/boot/compressed/relocs.c 2011-03-27 14:31:47.000000000 -0400
6277+++ linux-2.6.32.43/arch/x86/boot/compressed/relocs.c 2011-04-17 15:56:46.000000000 -0400
6278@@ -10,8 +10,11 @@
6279 #define USE_BSD
6280 #include <endian.h>
6281
6282+#include "../../../../include/linux/autoconf.h"
6283+
6284 #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
6285 static Elf32_Ehdr ehdr;
6286+static Elf32_Phdr *phdr;
6287 static unsigned long reloc_count, reloc_idx;
6288 static unsigned long *relocs;
6289
6290@@ -37,7 +40,7 @@ static const char* safe_abs_relocs[] = {
6291
6292 static int is_safe_abs_reloc(const char* sym_name)
6293 {
6294- int i;
6295+ unsigned int i;
6296
6297 for (i = 0; i < ARRAY_SIZE(safe_abs_relocs); i++) {
6298 if (!strcmp(sym_name, safe_abs_relocs[i]))
6299@@ -245,9 +248,39 @@ static void read_ehdr(FILE *fp)
6300 }
6301 }
6302
6303+static void read_phdrs(FILE *fp)
6304+{
6305+ unsigned int i;
6306+
6307+ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr));
6308+ if (!phdr) {
6309+ die("Unable to allocate %d program headers\n",
6310+ ehdr.e_phnum);
6311+ }
6312+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
6313+ die("Seek to %d failed: %s\n",
6314+ ehdr.e_phoff, strerror(errno));
6315+ }
6316+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
6317+ die("Cannot read ELF program headers: %s\n",
6318+ strerror(errno));
6319+ }
6320+ for(i = 0; i < ehdr.e_phnum; i++) {
6321+ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type);
6322+ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset);
6323+ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr);
6324+ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr);
6325+ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz);
6326+ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz);
6327+ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags);
6328+ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align);
6329+ }
6330+
6331+}
6332+
6333 static void read_shdrs(FILE *fp)
6334 {
6335- int i;
6336+ unsigned int i;
6337 Elf32_Shdr shdr;
6338
6339 secs = calloc(ehdr.e_shnum, sizeof(struct section));
6340@@ -282,7 +315,7 @@ static void read_shdrs(FILE *fp)
6341
6342 static void read_strtabs(FILE *fp)
6343 {
6344- int i;
6345+ unsigned int i;
6346 for (i = 0; i < ehdr.e_shnum; i++) {
6347 struct section *sec = &secs[i];
6348 if (sec->shdr.sh_type != SHT_STRTAB) {
6349@@ -307,7 +340,7 @@ static void read_strtabs(FILE *fp)
6350
6351 static void read_symtabs(FILE *fp)
6352 {
6353- int i,j;
6354+ unsigned int i,j;
6355 for (i = 0; i < ehdr.e_shnum; i++) {
6356 struct section *sec = &secs[i];
6357 if (sec->shdr.sh_type != SHT_SYMTAB) {
6358@@ -340,7 +373,9 @@ static void read_symtabs(FILE *fp)
6359
6360 static void read_relocs(FILE *fp)
6361 {
6362- int i,j;
6363+ unsigned int i,j;
6364+ uint32_t base;
6365+
6366 for (i = 0; i < ehdr.e_shnum; i++) {
6367 struct section *sec = &secs[i];
6368 if (sec->shdr.sh_type != SHT_REL) {
6369@@ -360,9 +395,18 @@ static void read_relocs(FILE *fp)
6370 die("Cannot read symbol table: %s\n",
6371 strerror(errno));
6372 }
6373+ base = 0;
6374+ for (j = 0; j < ehdr.e_phnum; j++) {
6375+ if (phdr[j].p_type != PT_LOAD )
6376+ continue;
6377+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
6378+ continue;
6379+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
6380+ break;
6381+ }
6382 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) {
6383 Elf32_Rel *rel = &sec->reltab[j];
6384- rel->r_offset = elf32_to_cpu(rel->r_offset);
6385+ rel->r_offset = elf32_to_cpu(rel->r_offset) + base;
6386 rel->r_info = elf32_to_cpu(rel->r_info);
6387 }
6388 }
6389@@ -371,14 +415,14 @@ static void read_relocs(FILE *fp)
6390
6391 static void print_absolute_symbols(void)
6392 {
6393- int i;
6394+ unsigned int i;
6395 printf("Absolute symbols\n");
6396 printf(" Num: Value Size Type Bind Visibility Name\n");
6397 for (i = 0; i < ehdr.e_shnum; i++) {
6398 struct section *sec = &secs[i];
6399 char *sym_strtab;
6400 Elf32_Sym *sh_symtab;
6401- int j;
6402+ unsigned int j;
6403
6404 if (sec->shdr.sh_type != SHT_SYMTAB) {
6405 continue;
6406@@ -406,14 +450,14 @@ static void print_absolute_symbols(void)
6407
6408 static void print_absolute_relocs(void)
6409 {
6410- int i, printed = 0;
6411+ unsigned int i, printed = 0;
6412
6413 for (i = 0; i < ehdr.e_shnum; i++) {
6414 struct section *sec = &secs[i];
6415 struct section *sec_applies, *sec_symtab;
6416 char *sym_strtab;
6417 Elf32_Sym *sh_symtab;
6418- int j;
6419+ unsigned int j;
6420 if (sec->shdr.sh_type != SHT_REL) {
6421 continue;
6422 }
6423@@ -474,13 +518,13 @@ static void print_absolute_relocs(void)
6424
6425 static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym))
6426 {
6427- int i;
6428+ unsigned int i;
6429 /* Walk through the relocations */
6430 for (i = 0; i < ehdr.e_shnum; i++) {
6431 char *sym_strtab;
6432 Elf32_Sym *sh_symtab;
6433 struct section *sec_applies, *sec_symtab;
6434- int j;
6435+ unsigned int j;
6436 struct section *sec = &secs[i];
6437
6438 if (sec->shdr.sh_type != SHT_REL) {
6439@@ -504,6 +548,21 @@ static void walk_relocs(void (*visit)(El
6440 if (sym->st_shndx == SHN_ABS) {
6441 continue;
6442 }
6443+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
6444+ if (!strcmp(sec_name(sym->st_shndx), ".data.percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
6445+ continue;
6446+
6447+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32)
6448+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
6449+ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
6450+ continue;
6451+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
6452+ continue;
6453+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
6454+ continue;
6455+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
6456+ continue;
6457+#endif
6458 if (r_type == R_386_NONE || r_type == R_386_PC32) {
6459 /*
6460 * NONE can be ignored and and PC relative
6461@@ -541,7 +600,7 @@ static int cmp_relocs(const void *va, co
6462
6463 static void emit_relocs(int as_text)
6464 {
6465- int i;
6466+ unsigned int i;
6467 /* Count how many relocations I have and allocate space for them. */
6468 reloc_count = 0;
6469 walk_relocs(count_reloc);
6470@@ -634,6 +693,7 @@ int main(int argc, char **argv)
6471 fname, strerror(errno));
6472 }
6473 read_ehdr(fp);
6474+ read_phdrs(fp);
6475 read_shdrs(fp);
6476 read_strtabs(fp);
6477 read_symtabs(fp);
6478diff -urNp linux-2.6.32.43/arch/x86/boot/cpucheck.c linux-2.6.32.43/arch/x86/boot/cpucheck.c
6479--- linux-2.6.32.43/arch/x86/boot/cpucheck.c 2011-03-27 14:31:47.000000000 -0400
6480+++ linux-2.6.32.43/arch/x86/boot/cpucheck.c 2011-04-17 15:56:46.000000000 -0400
6481@@ -74,7 +74,7 @@ static int has_fpu(void)
6482 u16 fcw = -1, fsw = -1;
6483 u32 cr0;
6484
6485- asm("movl %%cr0,%0" : "=r" (cr0));
6486+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
6487 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
6488 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
6489 asm volatile("movl %0,%%cr0" : : "r" (cr0));
6490@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
6491 {
6492 u32 f0, f1;
6493
6494- asm("pushfl ; "
6495+ asm volatile("pushfl ; "
6496 "pushfl ; "
6497 "popl %0 ; "
6498 "movl %0,%1 ; "
6499@@ -115,7 +115,7 @@ static void get_flags(void)
6500 set_bit(X86_FEATURE_FPU, cpu.flags);
6501
6502 if (has_eflag(X86_EFLAGS_ID)) {
6503- asm("cpuid"
6504+ asm volatile("cpuid"
6505 : "=a" (max_intel_level),
6506 "=b" (cpu_vendor[0]),
6507 "=d" (cpu_vendor[1]),
6508@@ -124,7 +124,7 @@ static void get_flags(void)
6509
6510 if (max_intel_level >= 0x00000001 &&
6511 max_intel_level <= 0x0000ffff) {
6512- asm("cpuid"
6513+ asm volatile("cpuid"
6514 : "=a" (tfms),
6515 "=c" (cpu.flags[4]),
6516 "=d" (cpu.flags[0])
6517@@ -136,7 +136,7 @@ static void get_flags(void)
6518 cpu.model += ((tfms >> 16) & 0xf) << 4;
6519 }
6520
6521- asm("cpuid"
6522+ asm volatile("cpuid"
6523 : "=a" (max_amd_level)
6524 : "a" (0x80000000)
6525 : "ebx", "ecx", "edx");
6526@@ -144,7 +144,7 @@ static void get_flags(void)
6527 if (max_amd_level >= 0x80000001 &&
6528 max_amd_level <= 0x8000ffff) {
6529 u32 eax = 0x80000001;
6530- asm("cpuid"
6531+ asm volatile("cpuid"
6532 : "+a" (eax),
6533 "=c" (cpu.flags[6]),
6534 "=d" (cpu.flags[1])
6535@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6536 u32 ecx = MSR_K7_HWCR;
6537 u32 eax, edx;
6538
6539- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6540+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6541 eax &= ~(1 << 15);
6542- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6543+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6544
6545 get_flags(); /* Make sure it really did something */
6546 err = check_flags();
6547@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r
6548 u32 ecx = MSR_VIA_FCR;
6549 u32 eax, edx;
6550
6551- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6552+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6553 eax |= (1<<1)|(1<<7);
6554- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6555+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6556
6557 set_bit(X86_FEATURE_CX8, cpu.flags);
6558 err = check_flags();
6559@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r
6560 u32 eax, edx;
6561 u32 level = 1;
6562
6563- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6564- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6565- asm("cpuid"
6566+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
6567+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
6568+ asm volatile("cpuid"
6569 : "+a" (level), "=d" (cpu.flags[0])
6570 : : "ecx", "ebx");
6571- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6572+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
6573
6574 err = check_flags();
6575 }
6576diff -urNp linux-2.6.32.43/arch/x86/boot/header.S linux-2.6.32.43/arch/x86/boot/header.S
6577--- linux-2.6.32.43/arch/x86/boot/header.S 2011-03-27 14:31:47.000000000 -0400
6578+++ linux-2.6.32.43/arch/x86/boot/header.S 2011-04-17 15:56:46.000000000 -0400
6579@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical
6580 # single linked list of
6581 # struct setup_data
6582
6583-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
6584+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
6585
6586 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
6587 #define VO_INIT_SIZE (VO__end - VO__text)
6588diff -urNp linux-2.6.32.43/arch/x86/boot/Makefile linux-2.6.32.43/arch/x86/boot/Makefile
6589--- linux-2.6.32.43/arch/x86/boot/Makefile 2011-03-27 14:31:47.000000000 -0400
6590+++ linux-2.6.32.43/arch/x86/boot/Makefile 2011-08-07 14:38:13.000000000 -0400
6591@@ -69,6 +69,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
6592 $(call cc-option, -fno-stack-protector) \
6593 $(call cc-option, -mpreferred-stack-boundary=2)
6594 KBUILD_CFLAGS += $(call cc-option, -m32)
6595+ifdef CONSTIFY_PLUGIN
6596+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
6597+endif
6598 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
6599 GCOV_PROFILE := n
6600
6601diff -urNp linux-2.6.32.43/arch/x86/boot/memory.c linux-2.6.32.43/arch/x86/boot/memory.c
6602--- linux-2.6.32.43/arch/x86/boot/memory.c 2011-03-27 14:31:47.000000000 -0400
6603+++ linux-2.6.32.43/arch/x86/boot/memory.c 2011-04-17 15:56:46.000000000 -0400
6604@@ -19,7 +19,7 @@
6605
6606 static int detect_memory_e820(void)
6607 {
6608- int count = 0;
6609+ unsigned int count = 0;
6610 struct biosregs ireg, oreg;
6611 struct e820entry *desc = boot_params.e820_map;
6612 static struct e820entry buf; /* static so it is zeroed */
6613diff -urNp linux-2.6.32.43/arch/x86/boot/video.c linux-2.6.32.43/arch/x86/boot/video.c
6614--- linux-2.6.32.43/arch/x86/boot/video.c 2011-03-27 14:31:47.000000000 -0400
6615+++ linux-2.6.32.43/arch/x86/boot/video.c 2011-04-17 15:56:46.000000000 -0400
6616@@ -90,7 +90,7 @@ static void store_mode_params(void)
6617 static unsigned int get_entry(void)
6618 {
6619 char entry_buf[4];
6620- int i, len = 0;
6621+ unsigned int i, len = 0;
6622 int key;
6623 unsigned int v;
6624
6625diff -urNp linux-2.6.32.43/arch/x86/boot/video-vesa.c linux-2.6.32.43/arch/x86/boot/video-vesa.c
6626--- linux-2.6.32.43/arch/x86/boot/video-vesa.c 2011-03-27 14:31:47.000000000 -0400
6627+++ linux-2.6.32.43/arch/x86/boot/video-vesa.c 2011-04-17 15:56:46.000000000 -0400
6628@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
6629
6630 boot_params.screen_info.vesapm_seg = oreg.es;
6631 boot_params.screen_info.vesapm_off = oreg.di;
6632+ boot_params.screen_info.vesapm_size = oreg.cx;
6633 }
6634
6635 /*
6636diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32_aout.c linux-2.6.32.43/arch/x86/ia32/ia32_aout.c
6637--- linux-2.6.32.43/arch/x86/ia32/ia32_aout.c 2011-03-27 14:31:47.000000000 -0400
6638+++ linux-2.6.32.43/arch/x86/ia32/ia32_aout.c 2011-04-17 15:56:46.000000000 -0400
6639@@ -169,6 +169,8 @@ static int aout_core_dump(long signr, st
6640 unsigned long dump_start, dump_size;
6641 struct user32 dump;
6642
6643+ memset(&dump, 0, sizeof(dump));
6644+
6645 fs = get_fs();
6646 set_fs(KERNEL_DS);
6647 has_dumped = 1;
6648@@ -218,12 +220,6 @@ static int aout_core_dump(long signr, st
6649 dump_size = dump.u_ssize << PAGE_SHIFT;
6650 DUMP_WRITE(dump_start, dump_size);
6651 }
6652- /*
6653- * Finally dump the task struct. Not be used by gdb, but
6654- * could be useful
6655- */
6656- set_fs(KERNEL_DS);
6657- DUMP_WRITE(current, sizeof(*current));
6658 end_coredump:
6659 set_fs(fs);
6660 return has_dumped;
6661diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32entry.S linux-2.6.32.43/arch/x86/ia32/ia32entry.S
6662--- linux-2.6.32.43/arch/x86/ia32/ia32entry.S 2011-03-27 14:31:47.000000000 -0400
6663+++ linux-2.6.32.43/arch/x86/ia32/ia32entry.S 2011-06-04 20:29:52.000000000 -0400
6664@@ -13,6 +13,7 @@
6665 #include <asm/thread_info.h>
6666 #include <asm/segment.h>
6667 #include <asm/irqflags.h>
6668+#include <asm/pgtable.h>
6669 #include <linux/linkage.h>
6670
6671 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
6672@@ -93,6 +94,30 @@ ENTRY(native_irq_enable_sysexit)
6673 ENDPROC(native_irq_enable_sysexit)
6674 #endif
6675
6676+ .macro pax_enter_kernel_user
6677+#ifdef CONFIG_PAX_MEMORY_UDEREF
6678+ call pax_enter_kernel_user
6679+#endif
6680+ .endm
6681+
6682+ .macro pax_exit_kernel_user
6683+#ifdef CONFIG_PAX_MEMORY_UDEREF
6684+ call pax_exit_kernel_user
6685+#endif
6686+#ifdef CONFIG_PAX_RANDKSTACK
6687+ pushq %rax
6688+ call pax_randomize_kstack
6689+ popq %rax
6690+#endif
6691+ pax_erase_kstack
6692+ .endm
6693+
6694+.macro pax_erase_kstack
6695+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
6696+ call pax_erase_kstack
6697+#endif
6698+.endm
6699+
6700 /*
6701 * 32bit SYSENTER instruction entry.
6702 *
6703@@ -119,7 +144,7 @@ ENTRY(ia32_sysenter_target)
6704 CFI_REGISTER rsp,rbp
6705 SWAPGS_UNSAFE_STACK
6706 movq PER_CPU_VAR(kernel_stack), %rsp
6707- addq $(KERNEL_STACK_OFFSET),%rsp
6708+ pax_enter_kernel_user
6709 /*
6710 * No need to follow this irqs on/off section: the syscall
6711 * disabled irqs, here we enable it straight after entry:
6712@@ -135,7 +160,8 @@ ENTRY(ia32_sysenter_target)
6713 pushfq
6714 CFI_ADJUST_CFA_OFFSET 8
6715 /*CFI_REL_OFFSET rflags,0*/
6716- movl 8*3-THREAD_SIZE+TI_sysenter_return(%rsp), %r10d
6717+ GET_THREAD_INFO(%r10)
6718+ movl TI_sysenter_return(%r10), %r10d
6719 CFI_REGISTER rip,r10
6720 pushq $__USER32_CS
6721 CFI_ADJUST_CFA_OFFSET 8
6722@@ -150,6 +176,12 @@ ENTRY(ia32_sysenter_target)
6723 SAVE_ARGS 0,0,1
6724 /* no need to do an access_ok check here because rbp has been
6725 32bit zero extended */
6726+
6727+#ifdef CONFIG_PAX_MEMORY_UDEREF
6728+ mov $PAX_USER_SHADOW_BASE,%r10
6729+ add %r10,%rbp
6730+#endif
6731+
6732 1: movl (%rbp),%ebp
6733 .section __ex_table,"a"
6734 .quad 1b,ia32_badarg
6735@@ -172,6 +204,7 @@ sysenter_dispatch:
6736 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6737 jnz sysexit_audit
6738 sysexit_from_sys_call:
6739+ pax_exit_kernel_user
6740 andl $~TS_COMPAT,TI_status(%r10)
6741 /* clear IF, that popfq doesn't enable interrupts early */
6742 andl $~0x200,EFLAGS-R11(%rsp)
6743@@ -200,6 +233,9 @@ sysexit_from_sys_call:
6744 movl %eax,%esi /* 2nd arg: syscall number */
6745 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
6746 call audit_syscall_entry
6747+
6748+ pax_erase_kstack
6749+
6750 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
6751 cmpq $(IA32_NR_syscalls-1),%rax
6752 ja ia32_badsys
6753@@ -252,6 +288,9 @@ sysenter_tracesys:
6754 movq $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
6755 movq %rsp,%rdi /* &pt_regs -> arg1 */
6756 call syscall_trace_enter
6757+
6758+ pax_erase_kstack
6759+
6760 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6761 RESTORE_REST
6762 cmpq $(IA32_NR_syscalls-1),%rax
6763@@ -283,19 +322,24 @@ ENDPROC(ia32_sysenter_target)
6764 ENTRY(ia32_cstar_target)
6765 CFI_STARTPROC32 simple
6766 CFI_SIGNAL_FRAME
6767- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
6768+ CFI_DEF_CFA rsp,0
6769 CFI_REGISTER rip,rcx
6770 /*CFI_REGISTER rflags,r11*/
6771 SWAPGS_UNSAFE_STACK
6772 movl %esp,%r8d
6773 CFI_REGISTER rsp,r8
6774 movq PER_CPU_VAR(kernel_stack),%rsp
6775+
6776+#ifdef CONFIG_PAX_MEMORY_UDEREF
6777+ pax_enter_kernel_user
6778+#endif
6779+
6780 /*
6781 * No need to follow this irqs on/off section: the syscall
6782 * disabled irqs and here we enable it straight after entry:
6783 */
6784 ENABLE_INTERRUPTS(CLBR_NONE)
6785- SAVE_ARGS 8,1,1
6786+ SAVE_ARGS 8*6,1,1
6787 movl %eax,%eax /* zero extension */
6788 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
6789 movq %rcx,RIP-ARGOFFSET(%rsp)
6790@@ -311,6 +355,12 @@ ENTRY(ia32_cstar_target)
6791 /* no need to do an access_ok check here because r8 has been
6792 32bit zero extended */
6793 /* hardware stack frame is complete now */
6794+
6795+#ifdef CONFIG_PAX_MEMORY_UDEREF
6796+ mov $PAX_USER_SHADOW_BASE,%r10
6797+ add %r10,%r8
6798+#endif
6799+
6800 1: movl (%r8),%r9d
6801 .section __ex_table,"a"
6802 .quad 1b,ia32_badarg
6803@@ -333,6 +383,7 @@ cstar_dispatch:
6804 testl $_TIF_ALLWORK_MASK,TI_flags(%r10)
6805 jnz sysretl_audit
6806 sysretl_from_sys_call:
6807+ pax_exit_kernel_user
6808 andl $~TS_COMPAT,TI_status(%r10)
6809 RESTORE_ARGS 1,-ARG_SKIP,1,1,1
6810 movl RIP-ARGOFFSET(%rsp),%ecx
6811@@ -370,6 +421,9 @@ cstar_tracesys:
6812 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6813 movq %rsp,%rdi /* &pt_regs -> arg1 */
6814 call syscall_trace_enter
6815+
6816+ pax_erase_kstack
6817+
6818 LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */
6819 RESTORE_REST
6820 xchgl %ebp,%r9d
6821@@ -415,6 +469,7 @@ ENTRY(ia32_syscall)
6822 CFI_REL_OFFSET rip,RIP-RIP
6823 PARAVIRT_ADJUST_EXCEPTION_FRAME
6824 SWAPGS
6825+ pax_enter_kernel_user
6826 /*
6827 * No need to follow this irqs on/off section: the syscall
6828 * disabled irqs and here we enable it straight after entry:
6829@@ -448,6 +503,9 @@ ia32_tracesys:
6830 movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
6831 movq %rsp,%rdi /* &pt_regs -> arg1 */
6832 call syscall_trace_enter
6833+
6834+ pax_erase_kstack
6835+
6836 LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */
6837 RESTORE_REST
6838 cmpq $(IA32_NR_syscalls-1),%rax
6839diff -urNp linux-2.6.32.43/arch/x86/ia32/ia32_signal.c linux-2.6.32.43/arch/x86/ia32/ia32_signal.c
6840--- linux-2.6.32.43/arch/x86/ia32/ia32_signal.c 2011-03-27 14:31:47.000000000 -0400
6841+++ linux-2.6.32.43/arch/x86/ia32/ia32_signal.c 2011-04-17 15:56:46.000000000 -0400
6842@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct
6843 sp -= frame_size;
6844 /* Align the stack pointer according to the i386 ABI,
6845 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
6846- sp = ((sp + 4) & -16ul) - 4;
6847+ sp = ((sp - 12) & -16ul) - 4;
6848 return (void __user *) sp;
6849 }
6850
6851@@ -461,7 +461,7 @@ int ia32_setup_frame(int sig, struct k_s
6852 * These are actually not used anymore, but left because some
6853 * gdb versions depend on them as a marker.
6854 */
6855- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6856+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6857 } put_user_catch(err);
6858
6859 if (err)
6860@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct
6861 0xb8,
6862 __NR_ia32_rt_sigreturn,
6863 0x80cd,
6864- 0,
6865+ 0
6866 };
6867
6868 frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);
6869@@ -533,16 +533,18 @@ int ia32_setup_rt_frame(int sig, struct
6870
6871 if (ka->sa.sa_flags & SA_RESTORER)
6872 restorer = ka->sa.sa_restorer;
6873+ else if (current->mm->context.vdso)
6874+ /* Return stub is in 32bit vsyscall page */
6875+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
6876 else
6877- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
6878- rt_sigreturn);
6879+ restorer = &frame->retcode;
6880 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
6881
6882 /*
6883 * Not actually used anymore, but left because some gdb
6884 * versions need it.
6885 */
6886- put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
6887+ put_user_ex(*((const u64 *)&code), (u64 *)frame->retcode);
6888 } put_user_catch(err);
6889
6890 if (err)
6891diff -urNp linux-2.6.32.43/arch/x86/include/asm/alternative.h linux-2.6.32.43/arch/x86/include/asm/alternative.h
6892--- linux-2.6.32.43/arch/x86/include/asm/alternative.h 2011-03-27 14:31:47.000000000 -0400
6893+++ linux-2.6.32.43/arch/x86/include/asm/alternative.h 2011-04-17 15:56:46.000000000 -0400
6894@@ -85,7 +85,7 @@ static inline void alternatives_smp_swit
6895 " .byte 662b-661b\n" /* sourcelen */ \
6896 " .byte 664f-663f\n" /* replacementlen */ \
6897 ".previous\n" \
6898- ".section .altinstr_replacement, \"ax\"\n" \
6899+ ".section .altinstr_replacement, \"a\"\n" \
6900 "663:\n\t" newinstr "\n664:\n" /* replacement */ \
6901 ".previous"
6902
6903diff -urNp linux-2.6.32.43/arch/x86/include/asm/apm.h linux-2.6.32.43/arch/x86/include/asm/apm.h
6904--- linux-2.6.32.43/arch/x86/include/asm/apm.h 2011-03-27 14:31:47.000000000 -0400
6905+++ linux-2.6.32.43/arch/x86/include/asm/apm.h 2011-04-17 15:56:46.000000000 -0400
6906@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32
6907 __asm__ __volatile__(APM_DO_ZERO_SEGS
6908 "pushl %%edi\n\t"
6909 "pushl %%ebp\n\t"
6910- "lcall *%%cs:apm_bios_entry\n\t"
6911+ "lcall *%%ss:apm_bios_entry\n\t"
6912 "setc %%al\n\t"
6913 "popl %%ebp\n\t"
6914 "popl %%edi\n\t"
6915@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as
6916 __asm__ __volatile__(APM_DO_ZERO_SEGS
6917 "pushl %%edi\n\t"
6918 "pushl %%ebp\n\t"
6919- "lcall *%%cs:apm_bios_entry\n\t"
6920+ "lcall *%%ss:apm_bios_entry\n\t"
6921 "setc %%bl\n\t"
6922 "popl %%ebp\n\t"
6923 "popl %%edi\n\t"
6924diff -urNp linux-2.6.32.43/arch/x86/include/asm/atomic_32.h linux-2.6.32.43/arch/x86/include/asm/atomic_32.h
6925--- linux-2.6.32.43/arch/x86/include/asm/atomic_32.h 2011-03-27 14:31:47.000000000 -0400
6926+++ linux-2.6.32.43/arch/x86/include/asm/atomic_32.h 2011-05-04 17:56:20.000000000 -0400
6927@@ -25,6 +25,17 @@ static inline int atomic_read(const atom
6928 }
6929
6930 /**
6931+ * atomic_read_unchecked - read atomic variable
6932+ * @v: pointer of type atomic_unchecked_t
6933+ *
6934+ * Atomically reads the value of @v.
6935+ */
6936+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
6937+{
6938+ return v->counter;
6939+}
6940+
6941+/**
6942 * atomic_set - set atomic variable
6943 * @v: pointer of type atomic_t
6944 * @i: required value
6945@@ -37,6 +48,18 @@ static inline void atomic_set(atomic_t *
6946 }
6947
6948 /**
6949+ * atomic_set_unchecked - set atomic variable
6950+ * @v: pointer of type atomic_unchecked_t
6951+ * @i: required value
6952+ *
6953+ * Atomically sets the value of @v to @i.
6954+ */
6955+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
6956+{
6957+ v->counter = i;
6958+}
6959+
6960+/**
6961 * atomic_add - add integer to atomic variable
6962 * @i: integer value to add
6963 * @v: pointer of type atomic_t
6964@@ -45,7 +68,29 @@ static inline void atomic_set(atomic_t *
6965 */
6966 static inline void atomic_add(int i, atomic_t *v)
6967 {
6968- asm volatile(LOCK_PREFIX "addl %1,%0"
6969+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6970+
6971+#ifdef CONFIG_PAX_REFCOUNT
6972+ "jno 0f\n"
6973+ LOCK_PREFIX "subl %1,%0\n"
6974+ "int $4\n0:\n"
6975+ _ASM_EXTABLE(0b, 0b)
6976+#endif
6977+
6978+ : "+m" (v->counter)
6979+ : "ir" (i));
6980+}
6981+
6982+/**
6983+ * atomic_add_unchecked - add integer to atomic variable
6984+ * @i: integer value to add
6985+ * @v: pointer of type atomic_unchecked_t
6986+ *
6987+ * Atomically adds @i to @v.
6988+ */
6989+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
6990+{
6991+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
6992 : "+m" (v->counter)
6993 : "ir" (i));
6994 }
6995@@ -59,7 +104,29 @@ static inline void atomic_add(int i, ato
6996 */
6997 static inline void atomic_sub(int i, atomic_t *v)
6998 {
6999- asm volatile(LOCK_PREFIX "subl %1,%0"
7000+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7001+
7002+#ifdef CONFIG_PAX_REFCOUNT
7003+ "jno 0f\n"
7004+ LOCK_PREFIX "addl %1,%0\n"
7005+ "int $4\n0:\n"
7006+ _ASM_EXTABLE(0b, 0b)
7007+#endif
7008+
7009+ : "+m" (v->counter)
7010+ : "ir" (i));
7011+}
7012+
7013+/**
7014+ * atomic_sub_unchecked - subtract integer from atomic variable
7015+ * @i: integer value to subtract
7016+ * @v: pointer of type atomic_unchecked_t
7017+ *
7018+ * Atomically subtracts @i from @v.
7019+ */
7020+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7021+{
7022+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7023 : "+m" (v->counter)
7024 : "ir" (i));
7025 }
7026@@ -77,7 +144,16 @@ static inline int atomic_sub_and_test(in
7027 {
7028 unsigned char c;
7029
7030- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7031+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7032+
7033+#ifdef CONFIG_PAX_REFCOUNT
7034+ "jno 0f\n"
7035+ LOCK_PREFIX "addl %2,%0\n"
7036+ "int $4\n0:\n"
7037+ _ASM_EXTABLE(0b, 0b)
7038+#endif
7039+
7040+ "sete %1\n"
7041 : "+m" (v->counter), "=qm" (c)
7042 : "ir" (i) : "memory");
7043 return c;
7044@@ -91,7 +167,27 @@ static inline int atomic_sub_and_test(in
7045 */
7046 static inline void atomic_inc(atomic_t *v)
7047 {
7048- asm volatile(LOCK_PREFIX "incl %0"
7049+ asm volatile(LOCK_PREFIX "incl %0\n"
7050+
7051+#ifdef CONFIG_PAX_REFCOUNT
7052+ "jno 0f\n"
7053+ LOCK_PREFIX "decl %0\n"
7054+ "int $4\n0:\n"
7055+ _ASM_EXTABLE(0b, 0b)
7056+#endif
7057+
7058+ : "+m" (v->counter));
7059+}
7060+
7061+/**
7062+ * atomic_inc_unchecked - increment atomic variable
7063+ * @v: pointer of type atomic_unchecked_t
7064+ *
7065+ * Atomically increments @v by 1.
7066+ */
7067+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7068+{
7069+ asm volatile(LOCK_PREFIX "incl %0\n"
7070 : "+m" (v->counter));
7071 }
7072
7073@@ -103,7 +199,27 @@ static inline void atomic_inc(atomic_t *
7074 */
7075 static inline void atomic_dec(atomic_t *v)
7076 {
7077- asm volatile(LOCK_PREFIX "decl %0"
7078+ asm volatile(LOCK_PREFIX "decl %0\n"
7079+
7080+#ifdef CONFIG_PAX_REFCOUNT
7081+ "jno 0f\n"
7082+ LOCK_PREFIX "incl %0\n"
7083+ "int $4\n0:\n"
7084+ _ASM_EXTABLE(0b, 0b)
7085+#endif
7086+
7087+ : "+m" (v->counter));
7088+}
7089+
7090+/**
7091+ * atomic_dec_unchecked - decrement atomic variable
7092+ * @v: pointer of type atomic_unchecked_t
7093+ *
7094+ * Atomically decrements @v by 1.
7095+ */
7096+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7097+{
7098+ asm volatile(LOCK_PREFIX "decl %0\n"
7099 : "+m" (v->counter));
7100 }
7101
7102@@ -119,7 +235,16 @@ static inline int atomic_dec_and_test(at
7103 {
7104 unsigned char c;
7105
7106- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7107+ asm volatile(LOCK_PREFIX "decl %0\n"
7108+
7109+#ifdef CONFIG_PAX_REFCOUNT
7110+ "jno 0f\n"
7111+ LOCK_PREFIX "incl %0\n"
7112+ "int $4\n0:\n"
7113+ _ASM_EXTABLE(0b, 0b)
7114+#endif
7115+
7116+ "sete %1\n"
7117 : "+m" (v->counter), "=qm" (c)
7118 : : "memory");
7119 return c != 0;
7120@@ -137,7 +262,35 @@ static inline int atomic_inc_and_test(at
7121 {
7122 unsigned char c;
7123
7124- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7125+ asm volatile(LOCK_PREFIX "incl %0\n"
7126+
7127+#ifdef CONFIG_PAX_REFCOUNT
7128+ "jno 0f\n"
7129+ LOCK_PREFIX "decl %0\n"
7130+ "into\n0:\n"
7131+ _ASM_EXTABLE(0b, 0b)
7132+#endif
7133+
7134+ "sete %1\n"
7135+ : "+m" (v->counter), "=qm" (c)
7136+ : : "memory");
7137+ return c != 0;
7138+}
7139+
7140+/**
7141+ * atomic_inc_and_test_unchecked - increment and test
7142+ * @v: pointer of type atomic_unchecked_t
7143+ *
7144+ * Atomically increments @v by 1
7145+ * and returns true if the result is zero, or false for all
7146+ * other cases.
7147+ */
7148+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7149+{
7150+ unsigned char c;
7151+
7152+ asm volatile(LOCK_PREFIX "incl %0\n"
7153+ "sete %1\n"
7154 : "+m" (v->counter), "=qm" (c)
7155 : : "memory");
7156 return c != 0;
7157@@ -156,7 +309,16 @@ static inline int atomic_add_negative(in
7158 {
7159 unsigned char c;
7160
7161- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7162+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7163+
7164+#ifdef CONFIG_PAX_REFCOUNT
7165+ "jno 0f\n"
7166+ LOCK_PREFIX "subl %2,%0\n"
7167+ "int $4\n0:\n"
7168+ _ASM_EXTABLE(0b, 0b)
7169+#endif
7170+
7171+ "sets %1\n"
7172 : "+m" (v->counter), "=qm" (c)
7173 : "ir" (i) : "memory");
7174 return c;
7175@@ -179,6 +341,46 @@ static inline int atomic_add_return(int
7176 #endif
7177 /* Modern 486+ processor */
7178 __i = i;
7179+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7180+
7181+#ifdef CONFIG_PAX_REFCOUNT
7182+ "jno 0f\n"
7183+ "movl %0, %1\n"
7184+ "int $4\n0:\n"
7185+ _ASM_EXTABLE(0b, 0b)
7186+#endif
7187+
7188+ : "+r" (i), "+m" (v->counter)
7189+ : : "memory");
7190+ return i + __i;
7191+
7192+#ifdef CONFIG_M386
7193+no_xadd: /* Legacy 386 processor */
7194+ local_irq_save(flags);
7195+ __i = atomic_read(v);
7196+ atomic_set(v, i + __i);
7197+ local_irq_restore(flags);
7198+ return i + __i;
7199+#endif
7200+}
7201+
7202+/**
7203+ * atomic_add_return_unchecked - add integer and return
7204+ * @v: pointer of type atomic_unchecked_t
7205+ * @i: integer value to add
7206+ *
7207+ * Atomically adds @i to @v and returns @i + @v
7208+ */
7209+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7210+{
7211+ int __i;
7212+#ifdef CONFIG_M386
7213+ unsigned long flags;
7214+ if (unlikely(boot_cpu_data.x86 <= 3))
7215+ goto no_xadd;
7216+#endif
7217+ /* Modern 486+ processor */
7218+ __i = i;
7219 asm volatile(LOCK_PREFIX "xaddl %0, %1"
7220 : "+r" (i), "+m" (v->counter)
7221 : : "memory");
7222@@ -211,11 +413,21 @@ static inline int atomic_cmpxchg(atomic_
7223 return cmpxchg(&v->counter, old, new);
7224 }
7225
7226+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7227+{
7228+ return cmpxchg(&v->counter, old, new);
7229+}
7230+
7231 static inline int atomic_xchg(atomic_t *v, int new)
7232 {
7233 return xchg(&v->counter, new);
7234 }
7235
7236+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7237+{
7238+ return xchg(&v->counter, new);
7239+}
7240+
7241 /**
7242 * atomic_add_unless - add unless the number is already a given value
7243 * @v: pointer of type atomic_t
7244@@ -227,22 +439,39 @@ static inline int atomic_xchg(atomic_t *
7245 */
7246 static inline int atomic_add_unless(atomic_t *v, int a, int u)
7247 {
7248- int c, old;
7249+ int c, old, new;
7250 c = atomic_read(v);
7251 for (;;) {
7252- if (unlikely(c == (u)))
7253+ if (unlikely(c == u))
7254 break;
7255- old = atomic_cmpxchg((v), c, c + (a));
7256+
7257+ asm volatile("addl %2,%0\n"
7258+
7259+#ifdef CONFIG_PAX_REFCOUNT
7260+ "jno 0f\n"
7261+ "subl %2,%0\n"
7262+ "int $4\n0:\n"
7263+ _ASM_EXTABLE(0b, 0b)
7264+#endif
7265+
7266+ : "=r" (new)
7267+ : "0" (c), "ir" (a));
7268+
7269+ old = atomic_cmpxchg(v, c, new);
7270 if (likely(old == c))
7271 break;
7272 c = old;
7273 }
7274- return c != (u);
7275+ return c != u;
7276 }
7277
7278 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
7279
7280 #define atomic_inc_return(v) (atomic_add_return(1, v))
7281+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7282+{
7283+ return atomic_add_return_unchecked(1, v);
7284+}
7285 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7286
7287 /* These are x86-specific, used by some header files */
7288@@ -266,9 +495,18 @@ typedef struct {
7289 u64 __aligned(8) counter;
7290 } atomic64_t;
7291
7292+#ifdef CONFIG_PAX_REFCOUNT
7293+typedef struct {
7294+ u64 __aligned(8) counter;
7295+} atomic64_unchecked_t;
7296+#else
7297+typedef atomic64_t atomic64_unchecked_t;
7298+#endif
7299+
7300 #define ATOMIC64_INIT(val) { (val) }
7301
7302 extern u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old_val, u64 new_val);
7303+extern u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val);
7304
7305 /**
7306 * atomic64_xchg - xchg atomic64 variable
7307@@ -279,6 +517,7 @@ extern u64 atomic64_cmpxchg(atomic64_t *
7308 * the old value.
7309 */
7310 extern u64 atomic64_xchg(atomic64_t *ptr, u64 new_val);
7311+extern u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7312
7313 /**
7314 * atomic64_set - set atomic64 variable
7315@@ -290,6 +529,15 @@ extern u64 atomic64_xchg(atomic64_t *ptr
7316 extern void atomic64_set(atomic64_t *ptr, u64 new_val);
7317
7318 /**
7319+ * atomic64_unchecked_set - set atomic64 variable
7320+ * @ptr: pointer to type atomic64_unchecked_t
7321+ * @new_val: value to assign
7322+ *
7323+ * Atomically sets the value of @ptr to @new_val.
7324+ */
7325+extern void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val);
7326+
7327+/**
7328 * atomic64_read - read atomic64 variable
7329 * @ptr: pointer to type atomic64_t
7330 *
7331@@ -317,7 +565,33 @@ static inline u64 atomic64_read(atomic64
7332 return res;
7333 }
7334
7335-extern u64 atomic64_read(atomic64_t *ptr);
7336+/**
7337+ * atomic64_read_unchecked - read atomic64 variable
7338+ * @ptr: pointer to type atomic64_unchecked_t
7339+ *
7340+ * Atomically reads the value of @ptr and returns it.
7341+ */
7342+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *ptr)
7343+{
7344+ u64 res;
7345+
7346+ /*
7347+ * Note, we inline this atomic64_unchecked_t primitive because
7348+ * it only clobbers EAX/EDX and leaves the others
7349+ * untouched. We also (somewhat subtly) rely on the
7350+ * fact that cmpxchg8b returns the current 64-bit value
7351+ * of the memory location we are touching:
7352+ */
7353+ asm volatile(
7354+ "mov %%ebx, %%eax\n\t"
7355+ "mov %%ecx, %%edx\n\t"
7356+ LOCK_PREFIX "cmpxchg8b %1\n"
7357+ : "=&A" (res)
7358+ : "m" (*ptr)
7359+ );
7360+
7361+ return res;
7362+}
7363
7364 /**
7365 * atomic64_add_return - add and return
7366@@ -332,8 +606,11 @@ extern u64 atomic64_add_return(u64 delta
7367 * Other variants with different arithmetic operators:
7368 */
7369 extern u64 atomic64_sub_return(u64 delta, atomic64_t *ptr);
7370+extern u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7371 extern u64 atomic64_inc_return(atomic64_t *ptr);
7372+extern u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr);
7373 extern u64 atomic64_dec_return(atomic64_t *ptr);
7374+extern u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr);
7375
7376 /**
7377 * atomic64_add - add integer to atomic64 variable
7378@@ -345,6 +622,15 @@ extern u64 atomic64_dec_return(atomic64_
7379 extern void atomic64_add(u64 delta, atomic64_t *ptr);
7380
7381 /**
7382+ * atomic64_add_unchecked - add integer to atomic64 variable
7383+ * @delta: integer value to add
7384+ * @ptr: pointer to type atomic64_unchecked_t
7385+ *
7386+ * Atomically adds @delta to @ptr.
7387+ */
7388+extern void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7389+
7390+/**
7391 * atomic64_sub - subtract the atomic64 variable
7392 * @delta: integer value to subtract
7393 * @ptr: pointer to type atomic64_t
7394@@ -354,6 +640,15 @@ extern void atomic64_add(u64 delta, atom
7395 extern void atomic64_sub(u64 delta, atomic64_t *ptr);
7396
7397 /**
7398+ * atomic64_sub_unchecked - subtract the atomic64 variable
7399+ * @delta: integer value to subtract
7400+ * @ptr: pointer to type atomic64_unchecked_t
7401+ *
7402+ * Atomically subtracts @delta from @ptr.
7403+ */
7404+extern void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr);
7405+
7406+/**
7407 * atomic64_sub_and_test - subtract value from variable and test result
7408 * @delta: integer value to subtract
7409 * @ptr: pointer to type atomic64_t
7410@@ -373,6 +668,14 @@ extern int atomic64_sub_and_test(u64 del
7411 extern void atomic64_inc(atomic64_t *ptr);
7412
7413 /**
7414+ * atomic64_inc_unchecked - increment atomic64 variable
7415+ * @ptr: pointer to type atomic64_unchecked_t
7416+ *
7417+ * Atomically increments @ptr by 1.
7418+ */
7419+extern void atomic64_inc_unchecked(atomic64_unchecked_t *ptr);
7420+
7421+/**
7422 * atomic64_dec - decrement atomic64 variable
7423 * @ptr: pointer to type atomic64_t
7424 *
7425@@ -381,6 +684,14 @@ extern void atomic64_inc(atomic64_t *ptr
7426 extern void atomic64_dec(atomic64_t *ptr);
7427
7428 /**
7429+ * atomic64_dec_unchecked - decrement atomic64 variable
7430+ * @ptr: pointer to type atomic64_unchecked_t
7431+ *
7432+ * Atomically decrements @ptr by 1.
7433+ */
7434+extern void atomic64_dec_unchecked(atomic64_unchecked_t *ptr);
7435+
7436+/**
7437 * atomic64_dec_and_test - decrement and test
7438 * @ptr: pointer to type atomic64_t
7439 *
7440diff -urNp linux-2.6.32.43/arch/x86/include/asm/atomic_64.h linux-2.6.32.43/arch/x86/include/asm/atomic_64.h
7441--- linux-2.6.32.43/arch/x86/include/asm/atomic_64.h 2011-03-27 14:31:47.000000000 -0400
7442+++ linux-2.6.32.43/arch/x86/include/asm/atomic_64.h 2011-05-04 18:35:31.000000000 -0400
7443@@ -24,6 +24,17 @@ static inline int atomic_read(const atom
7444 }
7445
7446 /**
7447+ * atomic_read_unchecked - read atomic variable
7448+ * @v: pointer of type atomic_unchecked_t
7449+ *
7450+ * Atomically reads the value of @v.
7451+ */
7452+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7453+{
7454+ return v->counter;
7455+}
7456+
7457+/**
7458 * atomic_set - set atomic variable
7459 * @v: pointer of type atomic_t
7460 * @i: required value
7461@@ -36,6 +47,18 @@ static inline void atomic_set(atomic_t *
7462 }
7463
7464 /**
7465+ * atomic_set_unchecked - set atomic variable
7466+ * @v: pointer of type atomic_unchecked_t
7467+ * @i: required value
7468+ *
7469+ * Atomically sets the value of @v to @i.
7470+ */
7471+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7472+{
7473+ v->counter = i;
7474+}
7475+
7476+/**
7477 * atomic_add - add integer to atomic variable
7478 * @i: integer value to add
7479 * @v: pointer of type atomic_t
7480@@ -44,7 +67,29 @@ static inline void atomic_set(atomic_t *
7481 */
7482 static inline void atomic_add(int i, atomic_t *v)
7483 {
7484- asm volatile(LOCK_PREFIX "addl %1,%0"
7485+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7486+
7487+#ifdef CONFIG_PAX_REFCOUNT
7488+ "jno 0f\n"
7489+ LOCK_PREFIX "subl %1,%0\n"
7490+ "int $4\n0:\n"
7491+ _ASM_EXTABLE(0b, 0b)
7492+#endif
7493+
7494+ : "=m" (v->counter)
7495+ : "ir" (i), "m" (v->counter));
7496+}
7497+
7498+/**
7499+ * atomic_add_unchecked - add integer to atomic variable
7500+ * @i: integer value to add
7501+ * @v: pointer of type atomic_unchecked_t
7502+ *
7503+ * Atomically adds @i to @v.
7504+ */
7505+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
7506+{
7507+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
7508 : "=m" (v->counter)
7509 : "ir" (i), "m" (v->counter));
7510 }
7511@@ -58,7 +103,29 @@ static inline void atomic_add(int i, ato
7512 */
7513 static inline void atomic_sub(int i, atomic_t *v)
7514 {
7515- asm volatile(LOCK_PREFIX "subl %1,%0"
7516+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7517+
7518+#ifdef CONFIG_PAX_REFCOUNT
7519+ "jno 0f\n"
7520+ LOCK_PREFIX "addl %1,%0\n"
7521+ "int $4\n0:\n"
7522+ _ASM_EXTABLE(0b, 0b)
7523+#endif
7524+
7525+ : "=m" (v->counter)
7526+ : "ir" (i), "m" (v->counter));
7527+}
7528+
7529+/**
7530+ * atomic_sub_unchecked - subtract the atomic variable
7531+ * @i: integer value to subtract
7532+ * @v: pointer of type atomic_unchecked_t
7533+ *
7534+ * Atomically subtracts @i from @v.
7535+ */
7536+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
7537+{
7538+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
7539 : "=m" (v->counter)
7540 : "ir" (i), "m" (v->counter));
7541 }
7542@@ -76,7 +143,16 @@ static inline int atomic_sub_and_test(in
7543 {
7544 unsigned char c;
7545
7546- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
7547+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
7548+
7549+#ifdef CONFIG_PAX_REFCOUNT
7550+ "jno 0f\n"
7551+ LOCK_PREFIX "addl %2,%0\n"
7552+ "int $4\n0:\n"
7553+ _ASM_EXTABLE(0b, 0b)
7554+#endif
7555+
7556+ "sete %1\n"
7557 : "=m" (v->counter), "=qm" (c)
7558 : "ir" (i), "m" (v->counter) : "memory");
7559 return c;
7560@@ -90,7 +166,28 @@ static inline int atomic_sub_and_test(in
7561 */
7562 static inline void atomic_inc(atomic_t *v)
7563 {
7564- asm volatile(LOCK_PREFIX "incl %0"
7565+ asm volatile(LOCK_PREFIX "incl %0\n"
7566+
7567+#ifdef CONFIG_PAX_REFCOUNT
7568+ "jno 0f\n"
7569+ LOCK_PREFIX "decl %0\n"
7570+ "int $4\n0:\n"
7571+ _ASM_EXTABLE(0b, 0b)
7572+#endif
7573+
7574+ : "=m" (v->counter)
7575+ : "m" (v->counter));
7576+}
7577+
7578+/**
7579+ * atomic_inc_unchecked - increment atomic variable
7580+ * @v: pointer of type atomic_unchecked_t
7581+ *
7582+ * Atomically increments @v by 1.
7583+ */
7584+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7585+{
7586+ asm volatile(LOCK_PREFIX "incl %0\n"
7587 : "=m" (v->counter)
7588 : "m" (v->counter));
7589 }
7590@@ -103,7 +200,28 @@ static inline void atomic_inc(atomic_t *
7591 */
7592 static inline void atomic_dec(atomic_t *v)
7593 {
7594- asm volatile(LOCK_PREFIX "decl %0"
7595+ asm volatile(LOCK_PREFIX "decl %0\n"
7596+
7597+#ifdef CONFIG_PAX_REFCOUNT
7598+ "jno 0f\n"
7599+ LOCK_PREFIX "incl %0\n"
7600+ "int $4\n0:\n"
7601+ _ASM_EXTABLE(0b, 0b)
7602+#endif
7603+
7604+ : "=m" (v->counter)
7605+ : "m" (v->counter));
7606+}
7607+
7608+/**
7609+ * atomic_dec_unchecked - decrement atomic variable
7610+ * @v: pointer of type atomic_unchecked_t
7611+ *
7612+ * Atomically decrements @v by 1.
7613+ */
7614+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7615+{
7616+ asm volatile(LOCK_PREFIX "decl %0\n"
7617 : "=m" (v->counter)
7618 : "m" (v->counter));
7619 }
7620@@ -120,7 +238,16 @@ static inline int atomic_dec_and_test(at
7621 {
7622 unsigned char c;
7623
7624- asm volatile(LOCK_PREFIX "decl %0; sete %1"
7625+ asm volatile(LOCK_PREFIX "decl %0\n"
7626+
7627+#ifdef CONFIG_PAX_REFCOUNT
7628+ "jno 0f\n"
7629+ LOCK_PREFIX "incl %0\n"
7630+ "int $4\n0:\n"
7631+ _ASM_EXTABLE(0b, 0b)
7632+#endif
7633+
7634+ "sete %1\n"
7635 : "=m" (v->counter), "=qm" (c)
7636 : "m" (v->counter) : "memory");
7637 return c != 0;
7638@@ -138,7 +265,35 @@ static inline int atomic_inc_and_test(at
7639 {
7640 unsigned char c;
7641
7642- asm volatile(LOCK_PREFIX "incl %0; sete %1"
7643+ asm volatile(LOCK_PREFIX "incl %0\n"
7644+
7645+#ifdef CONFIG_PAX_REFCOUNT
7646+ "jno 0f\n"
7647+ LOCK_PREFIX "decl %0\n"
7648+ "int $4\n0:\n"
7649+ _ASM_EXTABLE(0b, 0b)
7650+#endif
7651+
7652+ "sete %1\n"
7653+ : "=m" (v->counter), "=qm" (c)
7654+ : "m" (v->counter) : "memory");
7655+ return c != 0;
7656+}
7657+
7658+/**
7659+ * atomic_inc_and_test_unchecked - increment and test
7660+ * @v: pointer of type atomic_unchecked_t
7661+ *
7662+ * Atomically increments @v by 1
7663+ * and returns true if the result is zero, or false for all
7664+ * other cases.
7665+ */
7666+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7667+{
7668+ unsigned char c;
7669+
7670+ asm volatile(LOCK_PREFIX "incl %0\n"
7671+ "sete %1\n"
7672 : "=m" (v->counter), "=qm" (c)
7673 : "m" (v->counter) : "memory");
7674 return c != 0;
7675@@ -157,7 +312,16 @@ static inline int atomic_add_negative(in
7676 {
7677 unsigned char c;
7678
7679- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
7680+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
7681+
7682+#ifdef CONFIG_PAX_REFCOUNT
7683+ "jno 0f\n"
7684+ LOCK_PREFIX "subl %2,%0\n"
7685+ "int $4\n0:\n"
7686+ _ASM_EXTABLE(0b, 0b)
7687+#endif
7688+
7689+ "sets %1\n"
7690 : "=m" (v->counter), "=qm" (c)
7691 : "ir" (i), "m" (v->counter) : "memory");
7692 return c;
7693@@ -173,7 +337,31 @@ static inline int atomic_add_negative(in
7694 static inline int atomic_add_return(int i, atomic_t *v)
7695 {
7696 int __i = i;
7697- asm volatile(LOCK_PREFIX "xaddl %0, %1"
7698+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7699+
7700+#ifdef CONFIG_PAX_REFCOUNT
7701+ "jno 0f\n"
7702+ "movl %0, %1\n"
7703+ "int $4\n0:\n"
7704+ _ASM_EXTABLE(0b, 0b)
7705+#endif
7706+
7707+ : "+r" (i), "+m" (v->counter)
7708+ : : "memory");
7709+ return i + __i;
7710+}
7711+
7712+/**
7713+ * atomic_add_return_unchecked - add and return
7714+ * @i: integer value to add
7715+ * @v: pointer of type atomic_unchecked_t
7716+ *
7717+ * Atomically adds @i to @v and returns @i + @v
7718+ */
7719+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7720+{
7721+ int __i = i;
7722+ asm volatile(LOCK_PREFIX "xaddl %0, %1\n"
7723 : "+r" (i), "+m" (v->counter)
7724 : : "memory");
7725 return i + __i;
7726@@ -185,6 +373,10 @@ static inline int atomic_sub_return(int
7727 }
7728
7729 #define atomic_inc_return(v) (atomic_add_return(1, v))
7730+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7731+{
7732+ return atomic_add_return_unchecked(1, v);
7733+}
7734 #define atomic_dec_return(v) (atomic_sub_return(1, v))
7735
7736 /* The 64-bit atomic type */
7737@@ -204,6 +396,18 @@ static inline long atomic64_read(const a
7738 }
7739
7740 /**
7741+ * atomic64_read_unchecked - read atomic64 variable
7742+ * @v: pointer of type atomic64_unchecked_t
7743+ *
7744+ * Atomically reads the value of @v.
7745+ * Doesn't imply a read memory barrier.
7746+ */
7747+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7748+{
7749+ return v->counter;
7750+}
7751+
7752+/**
7753 * atomic64_set - set atomic64 variable
7754 * @v: pointer to type atomic64_t
7755 * @i: required value
7756@@ -216,6 +420,18 @@ static inline void atomic64_set(atomic64
7757 }
7758
7759 /**
7760+ * atomic64_set_unchecked - set atomic64 variable
7761+ * @v: pointer to type atomic64_unchecked_t
7762+ * @i: required value
7763+ *
7764+ * Atomically sets the value of @v to @i.
7765+ */
7766+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7767+{
7768+ v->counter = i;
7769+}
7770+
7771+/**
7772 * atomic64_add - add integer to atomic64 variable
7773 * @i: integer value to add
7774 * @v: pointer to type atomic64_t
7775@@ -224,6 +440,28 @@ static inline void atomic64_set(atomic64
7776 */
7777 static inline void atomic64_add(long i, atomic64_t *v)
7778 {
7779+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
7780+
7781+#ifdef CONFIG_PAX_REFCOUNT
7782+ "jno 0f\n"
7783+ LOCK_PREFIX "subq %1,%0\n"
7784+ "int $4\n0:\n"
7785+ _ASM_EXTABLE(0b, 0b)
7786+#endif
7787+
7788+ : "=m" (v->counter)
7789+ : "er" (i), "m" (v->counter));
7790+}
7791+
7792+/**
7793+ * atomic64_add_unchecked - add integer to atomic64 variable
7794+ * @i: integer value to add
7795+ * @v: pointer to type atomic64_unchecked_t
7796+ *
7797+ * Atomically adds @i to @v.
7798+ */
7799+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
7800+{
7801 asm volatile(LOCK_PREFIX "addq %1,%0"
7802 : "=m" (v->counter)
7803 : "er" (i), "m" (v->counter));
7804@@ -238,7 +476,15 @@ static inline void atomic64_add(long i,
7805 */
7806 static inline void atomic64_sub(long i, atomic64_t *v)
7807 {
7808- asm volatile(LOCK_PREFIX "subq %1,%0"
7809+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
7810+
7811+#ifdef CONFIG_PAX_REFCOUNT
7812+ "jno 0f\n"
7813+ LOCK_PREFIX "addq %1,%0\n"
7814+ "int $4\n0:\n"
7815+ _ASM_EXTABLE(0b, 0b)
7816+#endif
7817+
7818 : "=m" (v->counter)
7819 : "er" (i), "m" (v->counter));
7820 }
7821@@ -256,7 +502,16 @@ static inline int atomic64_sub_and_test(
7822 {
7823 unsigned char c;
7824
7825- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
7826+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
7827+
7828+#ifdef CONFIG_PAX_REFCOUNT
7829+ "jno 0f\n"
7830+ LOCK_PREFIX "addq %2,%0\n"
7831+ "int $4\n0:\n"
7832+ _ASM_EXTABLE(0b, 0b)
7833+#endif
7834+
7835+ "sete %1\n"
7836 : "=m" (v->counter), "=qm" (c)
7837 : "er" (i), "m" (v->counter) : "memory");
7838 return c;
7839@@ -270,6 +525,27 @@ static inline int atomic64_sub_and_test(
7840 */
7841 static inline void atomic64_inc(atomic64_t *v)
7842 {
7843+ asm volatile(LOCK_PREFIX "incq %0\n"
7844+
7845+#ifdef CONFIG_PAX_REFCOUNT
7846+ "jno 0f\n"
7847+ LOCK_PREFIX "decq %0\n"
7848+ "int $4\n0:\n"
7849+ _ASM_EXTABLE(0b, 0b)
7850+#endif
7851+
7852+ : "=m" (v->counter)
7853+ : "m" (v->counter));
7854+}
7855+
7856+/**
7857+ * atomic64_inc_unchecked - increment atomic64 variable
7858+ * @v: pointer to type atomic64_unchecked_t
7859+ *
7860+ * Atomically increments @v by 1.
7861+ */
7862+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7863+{
7864 asm volatile(LOCK_PREFIX "incq %0"
7865 : "=m" (v->counter)
7866 : "m" (v->counter));
7867@@ -283,7 +559,28 @@ static inline void atomic64_inc(atomic64
7868 */
7869 static inline void atomic64_dec(atomic64_t *v)
7870 {
7871- asm volatile(LOCK_PREFIX "decq %0"
7872+ asm volatile(LOCK_PREFIX "decq %0\n"
7873+
7874+#ifdef CONFIG_PAX_REFCOUNT
7875+ "jno 0f\n"
7876+ LOCK_PREFIX "incq %0\n"
7877+ "int $4\n0:\n"
7878+ _ASM_EXTABLE(0b, 0b)
7879+#endif
7880+
7881+ : "=m" (v->counter)
7882+ : "m" (v->counter));
7883+}
7884+
7885+/**
7886+ * atomic64_dec_unchecked - decrement atomic64 variable
7887+ * @v: pointer to type atomic64_t
7888+ *
7889+ * Atomically decrements @v by 1.
7890+ */
7891+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7892+{
7893+ asm volatile(LOCK_PREFIX "decq %0\n"
7894 : "=m" (v->counter)
7895 : "m" (v->counter));
7896 }
7897@@ -300,7 +597,16 @@ static inline int atomic64_dec_and_test(
7898 {
7899 unsigned char c;
7900
7901- asm volatile(LOCK_PREFIX "decq %0; sete %1"
7902+ asm volatile(LOCK_PREFIX "decq %0\n"
7903+
7904+#ifdef CONFIG_PAX_REFCOUNT
7905+ "jno 0f\n"
7906+ LOCK_PREFIX "incq %0\n"
7907+ "int $4\n0:\n"
7908+ _ASM_EXTABLE(0b, 0b)
7909+#endif
7910+
7911+ "sete %1\n"
7912 : "=m" (v->counter), "=qm" (c)
7913 : "m" (v->counter) : "memory");
7914 return c != 0;
7915@@ -318,7 +624,16 @@ static inline int atomic64_inc_and_test(
7916 {
7917 unsigned char c;
7918
7919- asm volatile(LOCK_PREFIX "incq %0; sete %1"
7920+ asm volatile(LOCK_PREFIX "incq %0\n"
7921+
7922+#ifdef CONFIG_PAX_REFCOUNT
7923+ "jno 0f\n"
7924+ LOCK_PREFIX "decq %0\n"
7925+ "int $4\n0:\n"
7926+ _ASM_EXTABLE(0b, 0b)
7927+#endif
7928+
7929+ "sete %1\n"
7930 : "=m" (v->counter), "=qm" (c)
7931 : "m" (v->counter) : "memory");
7932 return c != 0;
7933@@ -337,7 +652,16 @@ static inline int atomic64_add_negative(
7934 {
7935 unsigned char c;
7936
7937- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
7938+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
7939+
7940+#ifdef CONFIG_PAX_REFCOUNT
7941+ "jno 0f\n"
7942+ LOCK_PREFIX "subq %2,%0\n"
7943+ "int $4\n0:\n"
7944+ _ASM_EXTABLE(0b, 0b)
7945+#endif
7946+
7947+ "sets %1\n"
7948 : "=m" (v->counter), "=qm" (c)
7949 : "er" (i), "m" (v->counter) : "memory");
7950 return c;
7951@@ -353,7 +677,31 @@ static inline int atomic64_add_negative(
7952 static inline long atomic64_add_return(long i, atomic64_t *v)
7953 {
7954 long __i = i;
7955- asm volatile(LOCK_PREFIX "xaddq %0, %1;"
7956+ asm volatile(LOCK_PREFIX "xaddq %0, %1\n"
7957+
7958+#ifdef CONFIG_PAX_REFCOUNT
7959+ "jno 0f\n"
7960+ "movq %0, %1\n"
7961+ "int $4\n0:\n"
7962+ _ASM_EXTABLE(0b, 0b)
7963+#endif
7964+
7965+ : "+r" (i), "+m" (v->counter)
7966+ : : "memory");
7967+ return i + __i;
7968+}
7969+
7970+/**
7971+ * atomic64_add_return_unchecked - add and return
7972+ * @i: integer value to add
7973+ * @v: pointer to type atomic64_unchecked_t
7974+ *
7975+ * Atomically adds @i to @v and returns @i + @v
7976+ */
7977+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7978+{
7979+ long __i = i;
7980+ asm volatile(LOCK_PREFIX "xaddq %0, %1"
7981 : "+r" (i), "+m" (v->counter)
7982 : : "memory");
7983 return i + __i;
7984@@ -365,6 +713,10 @@ static inline long atomic64_sub_return(l
7985 }
7986
7987 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
7988+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7989+{
7990+ return atomic64_add_return_unchecked(1, v);
7991+}
7992 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
7993
7994 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
7995@@ -372,21 +724,41 @@ static inline long atomic64_cmpxchg(atom
7996 return cmpxchg(&v->counter, old, new);
7997 }
7998
7999+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
8000+{
8001+ return cmpxchg(&v->counter, old, new);
8002+}
8003+
8004 static inline long atomic64_xchg(atomic64_t *v, long new)
8005 {
8006 return xchg(&v->counter, new);
8007 }
8008
8009+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
8010+{
8011+ return xchg(&v->counter, new);
8012+}
8013+
8014 static inline long atomic_cmpxchg(atomic_t *v, int old, int new)
8015 {
8016 return cmpxchg(&v->counter, old, new);
8017 }
8018
8019+static inline long atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
8020+{
8021+ return cmpxchg(&v->counter, old, new);
8022+}
8023+
8024 static inline long atomic_xchg(atomic_t *v, int new)
8025 {
8026 return xchg(&v->counter, new);
8027 }
8028
8029+static inline long atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
8030+{
8031+ return xchg(&v->counter, new);
8032+}
8033+
8034 /**
8035 * atomic_add_unless - add unless the number is a given value
8036 * @v: pointer of type atomic_t
8037@@ -398,17 +770,30 @@ static inline long atomic_xchg(atomic_t
8038 */
8039 static inline int atomic_add_unless(atomic_t *v, int a, int u)
8040 {
8041- int c, old;
8042+ int c, old, new;
8043 c = atomic_read(v);
8044 for (;;) {
8045- if (unlikely(c == (u)))
8046+ if (unlikely(c == u))
8047 break;
8048- old = atomic_cmpxchg((v), c, c + (a));
8049+
8050+ asm volatile("addl %2,%0\n"
8051+
8052+#ifdef CONFIG_PAX_REFCOUNT
8053+ "jno 0f\n"
8054+ "subl %2,%0\n"
8055+ "int $4\n0:\n"
8056+ _ASM_EXTABLE(0b, 0b)
8057+#endif
8058+
8059+ : "=r" (new)
8060+ : "0" (c), "ir" (a));
8061+
8062+ old = atomic_cmpxchg(v, c, new);
8063 if (likely(old == c))
8064 break;
8065 c = old;
8066 }
8067- return c != (u);
8068+ return c != u;
8069 }
8070
8071 #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)
8072@@ -424,17 +809,30 @@ static inline int atomic_add_unless(atom
8073 */
8074 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
8075 {
8076- long c, old;
8077+ long c, old, new;
8078 c = atomic64_read(v);
8079 for (;;) {
8080- if (unlikely(c == (u)))
8081+ if (unlikely(c == u))
8082 break;
8083- old = atomic64_cmpxchg((v), c, c + (a));
8084+
8085+ asm volatile("addq %2,%0\n"
8086+
8087+#ifdef CONFIG_PAX_REFCOUNT
8088+ "jno 0f\n"
8089+ "subq %2,%0\n"
8090+ "int $4\n0:\n"
8091+ _ASM_EXTABLE(0b, 0b)
8092+#endif
8093+
8094+ : "=r" (new)
8095+ : "0" (c), "er" (a));
8096+
8097+ old = atomic64_cmpxchg(v, c, new);
8098 if (likely(old == c))
8099 break;
8100 c = old;
8101 }
8102- return c != (u);
8103+ return c != u;
8104 }
8105
8106 /**
8107diff -urNp linux-2.6.32.43/arch/x86/include/asm/bitops.h linux-2.6.32.43/arch/x86/include/asm/bitops.h
8108--- linux-2.6.32.43/arch/x86/include/asm/bitops.h 2011-03-27 14:31:47.000000000 -0400
8109+++ linux-2.6.32.43/arch/x86/include/asm/bitops.h 2011-04-17 15:56:46.000000000 -0400
8110@@ -38,7 +38,7 @@
8111 * a mask operation on a byte.
8112 */
8113 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
8114-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
8115+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
8116 #define CONST_MASK(nr) (1 << ((nr) & 7))
8117
8118 /**
8119diff -urNp linux-2.6.32.43/arch/x86/include/asm/boot.h linux-2.6.32.43/arch/x86/include/asm/boot.h
8120--- linux-2.6.32.43/arch/x86/include/asm/boot.h 2011-03-27 14:31:47.000000000 -0400
8121+++ linux-2.6.32.43/arch/x86/include/asm/boot.h 2011-04-17 15:56:46.000000000 -0400
8122@@ -11,10 +11,15 @@
8123 #include <asm/pgtable_types.h>
8124
8125 /* Physical address where kernel should be loaded. */
8126-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8127+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
8128 + (CONFIG_PHYSICAL_ALIGN - 1)) \
8129 & ~(CONFIG_PHYSICAL_ALIGN - 1))
8130
8131+#ifndef __ASSEMBLY__
8132+extern unsigned char __LOAD_PHYSICAL_ADDR[];
8133+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
8134+#endif
8135+
8136 /* Minimum kernel alignment, as a power of two */
8137 #ifdef CONFIG_X86_64
8138 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
8139diff -urNp linux-2.6.32.43/arch/x86/include/asm/cacheflush.h linux-2.6.32.43/arch/x86/include/asm/cacheflush.h
8140--- linux-2.6.32.43/arch/x86/include/asm/cacheflush.h 2011-03-27 14:31:47.000000000 -0400
8141+++ linux-2.6.32.43/arch/x86/include/asm/cacheflush.h 2011-04-17 15:56:46.000000000 -0400
8142@@ -60,7 +60,7 @@ PAGEFLAG(WC, WC)
8143 static inline unsigned long get_page_memtype(struct page *pg)
8144 {
8145 if (!PageUncached(pg) && !PageWC(pg))
8146- return -1;
8147+ return ~0UL;
8148 else if (!PageUncached(pg) && PageWC(pg))
8149 return _PAGE_CACHE_WC;
8150 else if (PageUncached(pg) && !PageWC(pg))
8151@@ -85,7 +85,7 @@ static inline void set_page_memtype(stru
8152 SetPageWC(pg);
8153 break;
8154 default:
8155- case -1:
8156+ case ~0UL:
8157 ClearPageUncached(pg);
8158 ClearPageWC(pg);
8159 break;
8160diff -urNp linux-2.6.32.43/arch/x86/include/asm/cache.h linux-2.6.32.43/arch/x86/include/asm/cache.h
8161--- linux-2.6.32.43/arch/x86/include/asm/cache.h 2011-03-27 14:31:47.000000000 -0400
8162+++ linux-2.6.32.43/arch/x86/include/asm/cache.h 2011-07-06 19:53:33.000000000 -0400
8163@@ -5,9 +5,10 @@
8164
8165 /* L1 cache line size */
8166 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
8167-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
8168+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
8169
8170 #define __read_mostly __attribute__((__section__(".data.read_mostly")))
8171+#define __read_only __attribute__((__section__(".data.read_only")))
8172
8173 #ifdef CONFIG_X86_VSMP
8174 /* vSMP Internode cacheline shift */
8175diff -urNp linux-2.6.32.43/arch/x86/include/asm/checksum_32.h linux-2.6.32.43/arch/x86/include/asm/checksum_32.h
8176--- linux-2.6.32.43/arch/x86/include/asm/checksum_32.h 2011-03-27 14:31:47.000000000 -0400
8177+++ linux-2.6.32.43/arch/x86/include/asm/checksum_32.h 2011-04-17 15:56:46.000000000 -0400
8178@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene
8179 int len, __wsum sum,
8180 int *src_err_ptr, int *dst_err_ptr);
8181
8182+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
8183+ int len, __wsum sum,
8184+ int *src_err_ptr, int *dst_err_ptr);
8185+
8186+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
8187+ int len, __wsum sum,
8188+ int *src_err_ptr, int *dst_err_ptr);
8189+
8190 /*
8191 * Note: when you get a NULL pointer exception here this means someone
8192 * passed in an incorrect kernel address to one of these functions.
8193@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f
8194 int *err_ptr)
8195 {
8196 might_sleep();
8197- return csum_partial_copy_generic((__force void *)src, dst,
8198+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
8199 len, sum, err_ptr, NULL);
8200 }
8201
8202@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us
8203 {
8204 might_sleep();
8205 if (access_ok(VERIFY_WRITE, dst, len))
8206- return csum_partial_copy_generic(src, (__force void *)dst,
8207+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
8208 len, sum, NULL, err_ptr);
8209
8210 if (len)
8211diff -urNp linux-2.6.32.43/arch/x86/include/asm/desc_defs.h linux-2.6.32.43/arch/x86/include/asm/desc_defs.h
8212--- linux-2.6.32.43/arch/x86/include/asm/desc_defs.h 2011-03-27 14:31:47.000000000 -0400
8213+++ linux-2.6.32.43/arch/x86/include/asm/desc_defs.h 2011-04-17 15:56:46.000000000 -0400
8214@@ -31,6 +31,12 @@ struct desc_struct {
8215 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
8216 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
8217 };
8218+ struct {
8219+ u16 offset_low;
8220+ u16 seg;
8221+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
8222+ unsigned offset_high: 16;
8223+ } gate;
8224 };
8225 } __attribute__((packed));
8226
8227diff -urNp linux-2.6.32.43/arch/x86/include/asm/desc.h linux-2.6.32.43/arch/x86/include/asm/desc.h
8228--- linux-2.6.32.43/arch/x86/include/asm/desc.h 2011-03-27 14:31:47.000000000 -0400
8229+++ linux-2.6.32.43/arch/x86/include/asm/desc.h 2011-04-23 12:56:10.000000000 -0400
8230@@ -4,6 +4,7 @@
8231 #include <asm/desc_defs.h>
8232 #include <asm/ldt.h>
8233 #include <asm/mmu.h>
8234+#include <asm/pgtable.h>
8235 #include <linux/smp.h>
8236
8237 static inline void fill_ldt(struct desc_struct *desc,
8238@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_
8239 desc->base1 = (info->base_addr & 0x00ff0000) >> 16;
8240 desc->type = (info->read_exec_only ^ 1) << 1;
8241 desc->type |= info->contents << 2;
8242+ desc->type |= info->seg_not_present ^ 1;
8243 desc->s = 1;
8244 desc->dpl = 0x3;
8245 desc->p = info->seg_not_present ^ 1;
8246@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_
8247 }
8248
8249 extern struct desc_ptr idt_descr;
8250-extern gate_desc idt_table[];
8251-
8252-struct gdt_page {
8253- struct desc_struct gdt[GDT_ENTRIES];
8254-} __attribute__((aligned(PAGE_SIZE)));
8255-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
8256+extern gate_desc idt_table[256];
8257
8258+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
8259 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
8260 {
8261- return per_cpu(gdt_page, cpu).gdt;
8262+ return cpu_gdt_table[cpu];
8263 }
8264
8265 #ifdef CONFIG_X86_64
8266@@ -65,9 +63,14 @@ static inline void pack_gate(gate_desc *
8267 unsigned long base, unsigned dpl, unsigned flags,
8268 unsigned short seg)
8269 {
8270- gate->a = (seg << 16) | (base & 0xffff);
8271- gate->b = (base & 0xffff0000) |
8272- (((0x80 | type | (dpl << 5)) & 0xff) << 8);
8273+ gate->gate.offset_low = base;
8274+ gate->gate.seg = seg;
8275+ gate->gate.reserved = 0;
8276+ gate->gate.type = type;
8277+ gate->gate.s = 0;
8278+ gate->gate.dpl = dpl;
8279+ gate->gate.p = 1;
8280+ gate->gate.offset_high = base >> 16;
8281 }
8282
8283 #endif
8284@@ -115,13 +118,17 @@ static inline void paravirt_free_ldt(str
8285 static inline void native_write_idt_entry(gate_desc *idt, int entry,
8286 const gate_desc *gate)
8287 {
8288+ pax_open_kernel();
8289 memcpy(&idt[entry], gate, sizeof(*gate));
8290+ pax_close_kernel();
8291 }
8292
8293 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry,
8294 const void *desc)
8295 {
8296+ pax_open_kernel();
8297 memcpy(&ldt[entry], desc, 8);
8298+ pax_close_kernel();
8299 }
8300
8301 static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry,
8302@@ -139,7 +146,10 @@ static inline void native_write_gdt_entr
8303 size = sizeof(struct desc_struct);
8304 break;
8305 }
8306+
8307+ pax_open_kernel();
8308 memcpy(&gdt[entry], desc, size);
8309+ pax_close_kernel();
8310 }
8311
8312 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
8313@@ -211,7 +221,9 @@ static inline void native_set_ldt(const
8314
8315 static inline void native_load_tr_desc(void)
8316 {
8317+ pax_open_kernel();
8318 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
8319+ pax_close_kernel();
8320 }
8321
8322 static inline void native_load_gdt(const struct desc_ptr *dtr)
8323@@ -246,8 +258,10 @@ static inline void native_load_tls(struc
8324 unsigned int i;
8325 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
8326
8327+ pax_open_kernel();
8328 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
8329 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
8330+ pax_close_kernel();
8331 }
8332
8333 #define _LDT_empty(info) \
8334@@ -309,7 +323,7 @@ static inline void set_desc_limit(struct
8335 desc->limit = (limit >> 16) & 0xf;
8336 }
8337
8338-static inline void _set_gate(int gate, unsigned type, void *addr,
8339+static inline void _set_gate(int gate, unsigned type, const void *addr,
8340 unsigned dpl, unsigned ist, unsigned seg)
8341 {
8342 gate_desc s;
8343@@ -327,7 +341,7 @@ static inline void _set_gate(int gate, u
8344 * Pentium F0 0F bugfix can have resulted in the mapped
8345 * IDT being write-protected.
8346 */
8347-static inline void set_intr_gate(unsigned int n, void *addr)
8348+static inline void set_intr_gate(unsigned int n, const void *addr)
8349 {
8350 BUG_ON((unsigned)n > 0xFF);
8351 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
8352@@ -356,19 +370,19 @@ static inline void alloc_intr_gate(unsig
8353 /*
8354 * This routine sets up an interrupt gate at directory privilege level 3.
8355 */
8356-static inline void set_system_intr_gate(unsigned int n, void *addr)
8357+static inline void set_system_intr_gate(unsigned int n, const void *addr)
8358 {
8359 BUG_ON((unsigned)n > 0xFF);
8360 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
8361 }
8362
8363-static inline void set_system_trap_gate(unsigned int n, void *addr)
8364+static inline void set_system_trap_gate(unsigned int n, const void *addr)
8365 {
8366 BUG_ON((unsigned)n > 0xFF);
8367 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
8368 }
8369
8370-static inline void set_trap_gate(unsigned int n, void *addr)
8371+static inline void set_trap_gate(unsigned int n, const void *addr)
8372 {
8373 BUG_ON((unsigned)n > 0xFF);
8374 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
8375@@ -377,19 +391,31 @@ static inline void set_trap_gate(unsigne
8376 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
8377 {
8378 BUG_ON((unsigned)n > 0xFF);
8379- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
8380+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
8381 }
8382
8383-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
8384+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
8385 {
8386 BUG_ON((unsigned)n > 0xFF);
8387 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
8388 }
8389
8390-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
8391+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
8392 {
8393 BUG_ON((unsigned)n > 0xFF);
8394 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
8395 }
8396
8397+#ifdef CONFIG_X86_32
8398+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
8399+{
8400+ struct desc_struct d;
8401+
8402+ if (likely(limit))
8403+ limit = (limit - 1UL) >> PAGE_SHIFT;
8404+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
8405+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
8406+}
8407+#endif
8408+
8409 #endif /* _ASM_X86_DESC_H */
8410diff -urNp linux-2.6.32.43/arch/x86/include/asm/device.h linux-2.6.32.43/arch/x86/include/asm/device.h
8411--- linux-2.6.32.43/arch/x86/include/asm/device.h 2011-03-27 14:31:47.000000000 -0400
8412+++ linux-2.6.32.43/arch/x86/include/asm/device.h 2011-04-17 15:56:46.000000000 -0400
8413@@ -6,7 +6,7 @@ struct dev_archdata {
8414 void *acpi_handle;
8415 #endif
8416 #ifdef CONFIG_X86_64
8417-struct dma_map_ops *dma_ops;
8418+ const struct dma_map_ops *dma_ops;
8419 #endif
8420 #ifdef CONFIG_DMAR
8421 void *iommu; /* hook for IOMMU specific extension */
8422diff -urNp linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h
8423--- linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
8424+++ linux-2.6.32.43/arch/x86/include/asm/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
8425@@ -25,9 +25,9 @@ extern int iommu_merge;
8426 extern struct device x86_dma_fallback_dev;
8427 extern int panic_on_overflow;
8428
8429-extern struct dma_map_ops *dma_ops;
8430+extern const struct dma_map_ops *dma_ops;
8431
8432-static inline struct dma_map_ops *get_dma_ops(struct device *dev)
8433+static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
8434 {
8435 #ifdef CONFIG_X86_32
8436 return dma_ops;
8437@@ -44,7 +44,7 @@ static inline struct dma_map_ops *get_dm
8438 /* Make sure we keep the same behaviour */
8439 static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
8440 {
8441- struct dma_map_ops *ops = get_dma_ops(dev);
8442+ const struct dma_map_ops *ops = get_dma_ops(dev);
8443 if (ops->mapping_error)
8444 return ops->mapping_error(dev, dma_addr);
8445
8446@@ -122,7 +122,7 @@ static inline void *
8447 dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
8448 gfp_t gfp)
8449 {
8450- struct dma_map_ops *ops = get_dma_ops(dev);
8451+ const struct dma_map_ops *ops = get_dma_ops(dev);
8452 void *memory;
8453
8454 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
8455@@ -149,7 +149,7 @@ dma_alloc_coherent(struct device *dev, s
8456 static inline void dma_free_coherent(struct device *dev, size_t size,
8457 void *vaddr, dma_addr_t bus)
8458 {
8459- struct dma_map_ops *ops = get_dma_ops(dev);
8460+ const struct dma_map_ops *ops = get_dma_ops(dev);
8461
8462 WARN_ON(irqs_disabled()); /* for portability */
8463
8464diff -urNp linux-2.6.32.43/arch/x86/include/asm/e820.h linux-2.6.32.43/arch/x86/include/asm/e820.h
8465--- linux-2.6.32.43/arch/x86/include/asm/e820.h 2011-03-27 14:31:47.000000000 -0400
8466+++ linux-2.6.32.43/arch/x86/include/asm/e820.h 2011-04-17 15:56:46.000000000 -0400
8467@@ -133,7 +133,7 @@ extern char *default_machine_specific_me
8468 #define ISA_END_ADDRESS 0x100000
8469 #define is_ISA_range(s, e) ((s) >= ISA_START_ADDRESS && (e) < ISA_END_ADDRESS)
8470
8471-#define BIOS_BEGIN 0x000a0000
8472+#define BIOS_BEGIN 0x000c0000
8473 #define BIOS_END 0x00100000
8474
8475 #ifdef __KERNEL__
8476diff -urNp linux-2.6.32.43/arch/x86/include/asm/elf.h linux-2.6.32.43/arch/x86/include/asm/elf.h
8477--- linux-2.6.32.43/arch/x86/include/asm/elf.h 2011-03-27 14:31:47.000000000 -0400
8478+++ linux-2.6.32.43/arch/x86/include/asm/elf.h 2011-04-17 15:56:46.000000000 -0400
8479@@ -257,7 +257,25 @@ extern int force_personality32;
8480 the loader. We need to make sure that it is out of the way of the program
8481 that it will "exec", and that there is sufficient room for the brk. */
8482
8483+#ifdef CONFIG_PAX_SEGMEXEC
8484+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
8485+#else
8486 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
8487+#endif
8488+
8489+#ifdef CONFIG_PAX_ASLR
8490+#ifdef CONFIG_X86_32
8491+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
8492+
8493+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8494+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
8495+#else
8496+#define PAX_ELF_ET_DYN_BASE 0x400000UL
8497+
8498+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8499+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
8500+#endif
8501+#endif
8502
8503 /* This yields a mask that user programs can use to figure out what
8504 instruction set this CPU supports. This could be done in user space,
8505@@ -311,8 +329,7 @@ do { \
8506 #define ARCH_DLINFO \
8507 do { \
8508 if (vdso_enabled) \
8509- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
8510- (unsigned long)current->mm->context.vdso); \
8511+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\
8512 } while (0)
8513
8514 #define AT_SYSINFO 32
8515@@ -323,7 +340,7 @@ do { \
8516
8517 #endif /* !CONFIG_X86_32 */
8518
8519-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
8520+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
8521
8522 #define VDSO_ENTRY \
8523 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
8524@@ -337,7 +354,4 @@ extern int arch_setup_additional_pages(s
8525 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
8526 #define compat_arch_setup_additional_pages syscall32_setup_pages
8527
8528-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
8529-#define arch_randomize_brk arch_randomize_brk
8530-
8531 #endif /* _ASM_X86_ELF_H */
8532diff -urNp linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h
8533--- linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h 2011-03-27 14:31:47.000000000 -0400
8534+++ linux-2.6.32.43/arch/x86/include/asm/emergency-restart.h 2011-05-22 23:02:06.000000000 -0400
8535@@ -15,6 +15,6 @@ enum reboot_type {
8536
8537 extern enum reboot_type reboot_type;
8538
8539-extern void machine_emergency_restart(void);
8540+extern void machine_emergency_restart(void) __noreturn;
8541
8542 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
8543diff -urNp linux-2.6.32.43/arch/x86/include/asm/futex.h linux-2.6.32.43/arch/x86/include/asm/futex.h
8544--- linux-2.6.32.43/arch/x86/include/asm/futex.h 2011-03-27 14:31:47.000000000 -0400
8545+++ linux-2.6.32.43/arch/x86/include/asm/futex.h 2011-04-17 15:56:46.000000000 -0400
8546@@ -12,16 +12,18 @@
8547 #include <asm/system.h>
8548
8549 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
8550+ typecheck(u32 *, uaddr); \
8551 asm volatile("1:\t" insn "\n" \
8552 "2:\t.section .fixup,\"ax\"\n" \
8553 "3:\tmov\t%3, %1\n" \
8554 "\tjmp\t2b\n" \
8555 "\t.previous\n" \
8556 _ASM_EXTABLE(1b, 3b) \
8557- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
8558+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 *)____m(uaddr))\
8559 : "i" (-EFAULT), "0" (oparg), "1" (0))
8560
8561 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
8562+ typecheck(u32 *, uaddr); \
8563 asm volatile("1:\tmovl %2, %0\n" \
8564 "\tmovl\t%0, %3\n" \
8565 "\t" insn "\n" \
8566@@ -34,10 +36,10 @@
8567 _ASM_EXTABLE(1b, 4b) \
8568 _ASM_EXTABLE(2b, 4b) \
8569 : "=&a" (oldval), "=&r" (ret), \
8570- "+m" (*uaddr), "=&r" (tem) \
8571+ "+m" (*(u32 *)____m(uaddr)), "=&r" (tem) \
8572 : "r" (oparg), "i" (-EFAULT), "1" (0))
8573
8574-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
8575+static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
8576 {
8577 int op = (encoded_op >> 28) & 7;
8578 int cmp = (encoded_op >> 24) & 15;
8579@@ -61,10 +63,10 @@ static inline int futex_atomic_op_inuser
8580
8581 switch (op) {
8582 case FUTEX_OP_SET:
8583- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
8584+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
8585 break;
8586 case FUTEX_OP_ADD:
8587- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
8588+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
8589 uaddr, oparg);
8590 break;
8591 case FUTEX_OP_OR:
8592@@ -109,7 +111,7 @@ static inline int futex_atomic_op_inuser
8593 return ret;
8594 }
8595
8596-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval,
8597+static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval,
8598 int newval)
8599 {
8600
8601@@ -119,16 +121,16 @@ static inline int futex_atomic_cmpxchg_i
8602 return -ENOSYS;
8603 #endif
8604
8605- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
8606+ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
8607 return -EFAULT;
8608
8609- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n"
8610+ asm volatile("1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %1\n"
8611 "2:\t.section .fixup, \"ax\"\n"
8612 "3:\tmov %2, %0\n"
8613 "\tjmp 2b\n"
8614 "\t.previous\n"
8615 _ASM_EXTABLE(1b, 3b)
8616- : "=a" (oldval), "+m" (*uaddr)
8617+ : "=a" (oldval), "+m" (*(u32 *)____m(uaddr))
8618 : "i" (-EFAULT), "r" (newval), "0" (oldval)
8619 : "memory"
8620 );
8621diff -urNp linux-2.6.32.43/arch/x86/include/asm/hw_irq.h linux-2.6.32.43/arch/x86/include/asm/hw_irq.h
8622--- linux-2.6.32.43/arch/x86/include/asm/hw_irq.h 2011-03-27 14:31:47.000000000 -0400
8623+++ linux-2.6.32.43/arch/x86/include/asm/hw_irq.h 2011-05-04 17:56:28.000000000 -0400
8624@@ -92,8 +92,8 @@ extern void setup_ioapic_dest(void);
8625 extern void enable_IO_APIC(void);
8626
8627 /* Statistics */
8628-extern atomic_t irq_err_count;
8629-extern atomic_t irq_mis_count;
8630+extern atomic_unchecked_t irq_err_count;
8631+extern atomic_unchecked_t irq_mis_count;
8632
8633 /* EISA */
8634 extern void eisa_set_level_irq(unsigned int irq);
8635diff -urNp linux-2.6.32.43/arch/x86/include/asm/i387.h linux-2.6.32.43/arch/x86/include/asm/i387.h
8636--- linux-2.6.32.43/arch/x86/include/asm/i387.h 2011-03-27 14:31:47.000000000 -0400
8637+++ linux-2.6.32.43/arch/x86/include/asm/i387.h 2011-04-17 15:56:46.000000000 -0400
8638@@ -60,6 +60,11 @@ static inline int fxrstor_checking(struc
8639 {
8640 int err;
8641
8642+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8643+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8644+ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE);
8645+#endif
8646+
8647 asm volatile("1: rex64/fxrstor (%[fx])\n\t"
8648 "2:\n"
8649 ".section .fixup,\"ax\"\n"
8650@@ -105,6 +110,11 @@ static inline int fxsave_user(struct i38
8651 {
8652 int err;
8653
8654+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
8655+ if ((unsigned long)fx < PAX_USER_SHADOW_BASE)
8656+ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE);
8657+#endif
8658+
8659 asm volatile("1: rex64/fxsave (%[fx])\n\t"
8660 "2:\n"
8661 ".section .fixup,\"ax\"\n"
8662@@ -195,13 +205,8 @@ static inline int fxrstor_checking(struc
8663 }
8664
8665 /* We need a safe address that is cheap to find and that is already
8666- in L1 during context switch. The best choices are unfortunately
8667- different for UP and SMP */
8668-#ifdef CONFIG_SMP
8669-#define safe_address (__per_cpu_offset[0])
8670-#else
8671-#define safe_address (kstat_cpu(0).cpustat.user)
8672-#endif
8673+ in L1 during context switch. */
8674+#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0)
8675
8676 /*
8677 * These must be called with preempt disabled
8678@@ -291,7 +296,7 @@ static inline void kernel_fpu_begin(void
8679 struct thread_info *me = current_thread_info();
8680 preempt_disable();
8681 if (me->status & TS_USEDFPU)
8682- __save_init_fpu(me->task);
8683+ __save_init_fpu(current);
8684 else
8685 clts();
8686 }
8687diff -urNp linux-2.6.32.43/arch/x86/include/asm/io_32.h linux-2.6.32.43/arch/x86/include/asm/io_32.h
8688--- linux-2.6.32.43/arch/x86/include/asm/io_32.h 2011-03-27 14:31:47.000000000 -0400
8689+++ linux-2.6.32.43/arch/x86/include/asm/io_32.h 2011-04-17 15:56:46.000000000 -0400
8690@@ -3,6 +3,7 @@
8691
8692 #include <linux/string.h>
8693 #include <linux/compiler.h>
8694+#include <asm/processor.h>
8695
8696 /*
8697 * This file contains the definitions for the x86 IO instructions
8698@@ -42,6 +43,17 @@
8699
8700 #ifdef __KERNEL__
8701
8702+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8703+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8704+{
8705+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8706+}
8707+
8708+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8709+{
8710+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8711+}
8712+
8713 #include <asm-generic/iomap.h>
8714
8715 #include <linux/vmalloc.h>
8716diff -urNp linux-2.6.32.43/arch/x86/include/asm/io_64.h linux-2.6.32.43/arch/x86/include/asm/io_64.h
8717--- linux-2.6.32.43/arch/x86/include/asm/io_64.h 2011-03-27 14:31:47.000000000 -0400
8718+++ linux-2.6.32.43/arch/x86/include/asm/io_64.h 2011-04-17 15:56:46.000000000 -0400
8719@@ -140,6 +140,17 @@ __OUTS(l)
8720
8721 #include <linux/vmalloc.h>
8722
8723+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
8724+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
8725+{
8726+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8727+}
8728+
8729+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
8730+{
8731+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
8732+}
8733+
8734 #include <asm-generic/iomap.h>
8735
8736 void __memcpy_fromio(void *, unsigned long, unsigned);
8737diff -urNp linux-2.6.32.43/arch/x86/include/asm/iommu.h linux-2.6.32.43/arch/x86/include/asm/iommu.h
8738--- linux-2.6.32.43/arch/x86/include/asm/iommu.h 2011-03-27 14:31:47.000000000 -0400
8739+++ linux-2.6.32.43/arch/x86/include/asm/iommu.h 2011-04-17 15:56:46.000000000 -0400
8740@@ -3,7 +3,7 @@
8741
8742 extern void pci_iommu_shutdown(void);
8743 extern void no_iommu_init(void);
8744-extern struct dma_map_ops nommu_dma_ops;
8745+extern const struct dma_map_ops nommu_dma_ops;
8746 extern int force_iommu, no_iommu;
8747 extern int iommu_detected;
8748 extern int iommu_pass_through;
8749diff -urNp linux-2.6.32.43/arch/x86/include/asm/irqflags.h linux-2.6.32.43/arch/x86/include/asm/irqflags.h
8750--- linux-2.6.32.43/arch/x86/include/asm/irqflags.h 2011-03-27 14:31:47.000000000 -0400
8751+++ linux-2.6.32.43/arch/x86/include/asm/irqflags.h 2011-04-17 15:56:46.000000000 -0400
8752@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_
8753 sti; \
8754 sysexit
8755
8756+#define GET_CR0_INTO_RDI mov %cr0, %rdi
8757+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
8758+#define GET_CR3_INTO_RDI mov %cr3, %rdi
8759+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
8760+
8761 #else
8762 #define INTERRUPT_RETURN iret
8763 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
8764diff -urNp linux-2.6.32.43/arch/x86/include/asm/kprobes.h linux-2.6.32.43/arch/x86/include/asm/kprobes.h
8765--- linux-2.6.32.43/arch/x86/include/asm/kprobes.h 2011-03-27 14:31:47.000000000 -0400
8766+++ linux-2.6.32.43/arch/x86/include/asm/kprobes.h 2011-04-23 12:56:12.000000000 -0400
8767@@ -34,13 +34,8 @@ typedef u8 kprobe_opcode_t;
8768 #define BREAKPOINT_INSTRUCTION 0xcc
8769 #define RELATIVEJUMP_INSTRUCTION 0xe9
8770 #define MAX_INSN_SIZE 16
8771-#define MAX_STACK_SIZE 64
8772-#define MIN_STACK_SIZE(ADDR) \
8773- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
8774- THREAD_SIZE - (unsigned long)(ADDR))) \
8775- ? (MAX_STACK_SIZE) \
8776- : (((unsigned long)current_thread_info()) + \
8777- THREAD_SIZE - (unsigned long)(ADDR)))
8778+#define MAX_STACK_SIZE 64UL
8779+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
8780
8781 #define flush_insn_slot(p) do { } while (0)
8782
8783diff -urNp linux-2.6.32.43/arch/x86/include/asm/kvm_host.h linux-2.6.32.43/arch/x86/include/asm/kvm_host.h
8784--- linux-2.6.32.43/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:01.000000000 -0400
8785+++ linux-2.6.32.43/arch/x86/include/asm/kvm_host.h 2011-05-10 22:12:26.000000000 -0400
8786@@ -536,7 +536,7 @@ struct kvm_x86_ops {
8787 const struct trace_print_flags *exit_reasons_str;
8788 };
8789
8790-extern struct kvm_x86_ops *kvm_x86_ops;
8791+extern const struct kvm_x86_ops *kvm_x86_ops;
8792
8793 int kvm_mmu_module_init(void);
8794 void kvm_mmu_module_exit(void);
8795diff -urNp linux-2.6.32.43/arch/x86/include/asm/local.h linux-2.6.32.43/arch/x86/include/asm/local.h
8796--- linux-2.6.32.43/arch/x86/include/asm/local.h 2011-03-27 14:31:47.000000000 -0400
8797+++ linux-2.6.32.43/arch/x86/include/asm/local.h 2011-04-17 15:56:46.000000000 -0400
8798@@ -18,26 +18,58 @@ typedef struct {
8799
8800 static inline void local_inc(local_t *l)
8801 {
8802- asm volatile(_ASM_INC "%0"
8803+ asm volatile(_ASM_INC "%0\n"
8804+
8805+#ifdef CONFIG_PAX_REFCOUNT
8806+ "jno 0f\n"
8807+ _ASM_DEC "%0\n"
8808+ "int $4\n0:\n"
8809+ _ASM_EXTABLE(0b, 0b)
8810+#endif
8811+
8812 : "+m" (l->a.counter));
8813 }
8814
8815 static inline void local_dec(local_t *l)
8816 {
8817- asm volatile(_ASM_DEC "%0"
8818+ asm volatile(_ASM_DEC "%0\n"
8819+
8820+#ifdef CONFIG_PAX_REFCOUNT
8821+ "jno 0f\n"
8822+ _ASM_INC "%0\n"
8823+ "int $4\n0:\n"
8824+ _ASM_EXTABLE(0b, 0b)
8825+#endif
8826+
8827 : "+m" (l->a.counter));
8828 }
8829
8830 static inline void local_add(long i, local_t *l)
8831 {
8832- asm volatile(_ASM_ADD "%1,%0"
8833+ asm volatile(_ASM_ADD "%1,%0\n"
8834+
8835+#ifdef CONFIG_PAX_REFCOUNT
8836+ "jno 0f\n"
8837+ _ASM_SUB "%1,%0\n"
8838+ "int $4\n0:\n"
8839+ _ASM_EXTABLE(0b, 0b)
8840+#endif
8841+
8842 : "+m" (l->a.counter)
8843 : "ir" (i));
8844 }
8845
8846 static inline void local_sub(long i, local_t *l)
8847 {
8848- asm volatile(_ASM_SUB "%1,%0"
8849+ asm volatile(_ASM_SUB "%1,%0\n"
8850+
8851+#ifdef CONFIG_PAX_REFCOUNT
8852+ "jno 0f\n"
8853+ _ASM_ADD "%1,%0\n"
8854+ "int $4\n0:\n"
8855+ _ASM_EXTABLE(0b, 0b)
8856+#endif
8857+
8858 : "+m" (l->a.counter)
8859 : "ir" (i));
8860 }
8861@@ -55,7 +87,16 @@ static inline int local_sub_and_test(lon
8862 {
8863 unsigned char c;
8864
8865- asm volatile(_ASM_SUB "%2,%0; sete %1"
8866+ asm volatile(_ASM_SUB "%2,%0\n"
8867+
8868+#ifdef CONFIG_PAX_REFCOUNT
8869+ "jno 0f\n"
8870+ _ASM_ADD "%2,%0\n"
8871+ "int $4\n0:\n"
8872+ _ASM_EXTABLE(0b, 0b)
8873+#endif
8874+
8875+ "sete %1\n"
8876 : "+m" (l->a.counter), "=qm" (c)
8877 : "ir" (i) : "memory");
8878 return c;
8879@@ -73,7 +114,16 @@ static inline int local_dec_and_test(loc
8880 {
8881 unsigned char c;
8882
8883- asm volatile(_ASM_DEC "%0; sete %1"
8884+ asm volatile(_ASM_DEC "%0\n"
8885+
8886+#ifdef CONFIG_PAX_REFCOUNT
8887+ "jno 0f\n"
8888+ _ASM_INC "%0\n"
8889+ "int $4\n0:\n"
8890+ _ASM_EXTABLE(0b, 0b)
8891+#endif
8892+
8893+ "sete %1\n"
8894 : "+m" (l->a.counter), "=qm" (c)
8895 : : "memory");
8896 return c != 0;
8897@@ -91,7 +141,16 @@ static inline int local_inc_and_test(loc
8898 {
8899 unsigned char c;
8900
8901- asm volatile(_ASM_INC "%0; sete %1"
8902+ asm volatile(_ASM_INC "%0\n"
8903+
8904+#ifdef CONFIG_PAX_REFCOUNT
8905+ "jno 0f\n"
8906+ _ASM_DEC "%0\n"
8907+ "int $4\n0:\n"
8908+ _ASM_EXTABLE(0b, 0b)
8909+#endif
8910+
8911+ "sete %1\n"
8912 : "+m" (l->a.counter), "=qm" (c)
8913 : : "memory");
8914 return c != 0;
8915@@ -110,7 +169,16 @@ static inline int local_add_negative(lon
8916 {
8917 unsigned char c;
8918
8919- asm volatile(_ASM_ADD "%2,%0; sets %1"
8920+ asm volatile(_ASM_ADD "%2,%0\n"
8921+
8922+#ifdef CONFIG_PAX_REFCOUNT
8923+ "jno 0f\n"
8924+ _ASM_SUB "%2,%0\n"
8925+ "int $4\n0:\n"
8926+ _ASM_EXTABLE(0b, 0b)
8927+#endif
8928+
8929+ "sets %1\n"
8930 : "+m" (l->a.counter), "=qm" (c)
8931 : "ir" (i) : "memory");
8932 return c;
8933@@ -133,7 +201,15 @@ static inline long local_add_return(long
8934 #endif
8935 /* Modern 486+ processor */
8936 __i = i;
8937- asm volatile(_ASM_XADD "%0, %1;"
8938+ asm volatile(_ASM_XADD "%0, %1\n"
8939+
8940+#ifdef CONFIG_PAX_REFCOUNT
8941+ "jno 0f\n"
8942+ _ASM_MOV "%0,%1\n"
8943+ "int $4\n0:\n"
8944+ _ASM_EXTABLE(0b, 0b)
8945+#endif
8946+
8947 : "+r" (i), "+m" (l->a.counter)
8948 : : "memory");
8949 return i + __i;
8950diff -urNp linux-2.6.32.43/arch/x86/include/asm/microcode.h linux-2.6.32.43/arch/x86/include/asm/microcode.h
8951--- linux-2.6.32.43/arch/x86/include/asm/microcode.h 2011-03-27 14:31:47.000000000 -0400
8952+++ linux-2.6.32.43/arch/x86/include/asm/microcode.h 2011-04-17 15:56:46.000000000 -0400
8953@@ -12,13 +12,13 @@ struct device;
8954 enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND };
8955
8956 struct microcode_ops {
8957- enum ucode_state (*request_microcode_user) (int cpu,
8958+ enum ucode_state (* const request_microcode_user) (int cpu,
8959 const void __user *buf, size_t size);
8960
8961- enum ucode_state (*request_microcode_fw) (int cpu,
8962+ enum ucode_state (* const request_microcode_fw) (int cpu,
8963 struct device *device);
8964
8965- void (*microcode_fini_cpu) (int cpu);
8966+ void (* const microcode_fini_cpu) (int cpu);
8967
8968 /*
8969 * The generic 'microcode_core' part guarantees that
8970@@ -38,18 +38,18 @@ struct ucode_cpu_info {
8971 extern struct ucode_cpu_info ucode_cpu_info[];
8972
8973 #ifdef CONFIG_MICROCODE_INTEL
8974-extern struct microcode_ops * __init init_intel_microcode(void);
8975+extern const struct microcode_ops * __init init_intel_microcode(void);
8976 #else
8977-static inline struct microcode_ops * __init init_intel_microcode(void)
8978+static inline const struct microcode_ops * __init init_intel_microcode(void)
8979 {
8980 return NULL;
8981 }
8982 #endif /* CONFIG_MICROCODE_INTEL */
8983
8984 #ifdef CONFIG_MICROCODE_AMD
8985-extern struct microcode_ops * __init init_amd_microcode(void);
8986+extern const struct microcode_ops * __init init_amd_microcode(void);
8987 #else
8988-static inline struct microcode_ops * __init init_amd_microcode(void)
8989+static inline const struct microcode_ops * __init init_amd_microcode(void)
8990 {
8991 return NULL;
8992 }
8993diff -urNp linux-2.6.32.43/arch/x86/include/asm/mman.h linux-2.6.32.43/arch/x86/include/asm/mman.h
8994--- linux-2.6.32.43/arch/x86/include/asm/mman.h 2011-03-27 14:31:47.000000000 -0400
8995+++ linux-2.6.32.43/arch/x86/include/asm/mman.h 2011-04-17 15:56:46.000000000 -0400
8996@@ -5,4 +5,14 @@
8997
8998 #include <asm-generic/mman.h>
8999
9000+#ifdef __KERNEL__
9001+#ifndef __ASSEMBLY__
9002+#ifdef CONFIG_X86_32
9003+#define arch_mmap_check i386_mmap_check
9004+int i386_mmap_check(unsigned long addr, unsigned long len,
9005+ unsigned long flags);
9006+#endif
9007+#endif
9008+#endif
9009+
9010 #endif /* _ASM_X86_MMAN_H */
9011diff -urNp linux-2.6.32.43/arch/x86/include/asm/mmu_context.h linux-2.6.32.43/arch/x86/include/asm/mmu_context.h
9012--- linux-2.6.32.43/arch/x86/include/asm/mmu_context.h 2011-03-27 14:31:47.000000000 -0400
9013+++ linux-2.6.32.43/arch/x86/include/asm/mmu_context.h 2011-04-17 15:56:46.000000000 -0400
9014@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m
9015
9016 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
9017 {
9018+
9019+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9020+ unsigned int i;
9021+ pgd_t *pgd;
9022+
9023+ pax_open_kernel();
9024+ pgd = get_cpu_pgd(smp_processor_id());
9025+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
9026+ if (paravirt_enabled())
9027+ set_pgd(pgd+i, native_make_pgd(0));
9028+ else
9029+ pgd[i] = native_make_pgd(0);
9030+ pax_close_kernel();
9031+#endif
9032+
9033 #ifdef CONFIG_SMP
9034 if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
9035 percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
9036@@ -34,16 +49,30 @@ static inline void switch_mm(struct mm_s
9037 struct task_struct *tsk)
9038 {
9039 unsigned cpu = smp_processor_id();
9040+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP)
9041+ int tlbstate = TLBSTATE_OK;
9042+#endif
9043
9044 if (likely(prev != next)) {
9045 #ifdef CONFIG_SMP
9046+#ifdef CONFIG_X86_32
9047+ tlbstate = percpu_read(cpu_tlbstate.state);
9048+#endif
9049 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9050 percpu_write(cpu_tlbstate.active_mm, next);
9051 #endif
9052 cpumask_set_cpu(cpu, mm_cpumask(next));
9053
9054 /* Re-load page tables */
9055+#ifdef CONFIG_PAX_PER_CPU_PGD
9056+ pax_open_kernel();
9057+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9058+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9059+ pax_close_kernel();
9060+ load_cr3(get_cpu_pgd(cpu));
9061+#else
9062 load_cr3(next->pgd);
9063+#endif
9064
9065 /* stop flush ipis for the previous mm */
9066 cpumask_clear_cpu(cpu, mm_cpumask(prev));
9067@@ -53,9 +82,38 @@ static inline void switch_mm(struct mm_s
9068 */
9069 if (unlikely(prev->context.ldt != next->context.ldt))
9070 load_LDT_nolock(&next->context);
9071- }
9072+
9073+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9074+ if (!nx_enabled) {
9075+ smp_mb__before_clear_bit();
9076+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
9077+ smp_mb__after_clear_bit();
9078+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9079+ }
9080+#endif
9081+
9082+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9083+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
9084+ prev->context.user_cs_limit != next->context.user_cs_limit))
9085+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9086 #ifdef CONFIG_SMP
9087+ else if (unlikely(tlbstate != TLBSTATE_OK))
9088+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9089+#endif
9090+#endif
9091+
9092+ }
9093 else {
9094+
9095+#ifdef CONFIG_PAX_PER_CPU_PGD
9096+ pax_open_kernel();
9097+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS);
9098+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS);
9099+ pax_close_kernel();
9100+ load_cr3(get_cpu_pgd(cpu));
9101+#endif
9102+
9103+#ifdef CONFIG_SMP
9104 percpu_write(cpu_tlbstate.state, TLBSTATE_OK);
9105 BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next);
9106
9107@@ -64,11 +122,28 @@ static inline void switch_mm(struct mm_s
9108 * tlb flush IPI delivery. We must reload CR3
9109 * to make sure to use no freed page tables.
9110 */
9111+
9112+#ifndef CONFIG_PAX_PER_CPU_PGD
9113 load_cr3(next->pgd);
9114+#endif
9115+
9116 load_LDT_nolock(&next->context);
9117+
9118+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
9119+ if (!nx_enabled)
9120+ cpu_set(cpu, next->context.cpu_user_cs_mask);
9121+#endif
9122+
9123+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
9124+#ifdef CONFIG_PAX_PAGEEXEC
9125+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && nx_enabled))
9126+#endif
9127+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
9128+#endif
9129+
9130 }
9131- }
9132 #endif
9133+ }
9134 }
9135
9136 #define activate_mm(prev, next) \
9137diff -urNp linux-2.6.32.43/arch/x86/include/asm/mmu.h linux-2.6.32.43/arch/x86/include/asm/mmu.h
9138--- linux-2.6.32.43/arch/x86/include/asm/mmu.h 2011-03-27 14:31:47.000000000 -0400
9139+++ linux-2.6.32.43/arch/x86/include/asm/mmu.h 2011-04-17 15:56:46.000000000 -0400
9140@@ -9,10 +9,23 @@
9141 * we put the segment information here.
9142 */
9143 typedef struct {
9144- void *ldt;
9145+ struct desc_struct *ldt;
9146 int size;
9147 struct mutex lock;
9148- void *vdso;
9149+ unsigned long vdso;
9150+
9151+#ifdef CONFIG_X86_32
9152+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
9153+ unsigned long user_cs_base;
9154+ unsigned long user_cs_limit;
9155+
9156+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
9157+ cpumask_t cpu_user_cs_mask;
9158+#endif
9159+
9160+#endif
9161+#endif
9162+
9163 } mm_context_t;
9164
9165 #ifdef CONFIG_SMP
9166diff -urNp linux-2.6.32.43/arch/x86/include/asm/module.h linux-2.6.32.43/arch/x86/include/asm/module.h
9167--- linux-2.6.32.43/arch/x86/include/asm/module.h 2011-03-27 14:31:47.000000000 -0400
9168+++ linux-2.6.32.43/arch/x86/include/asm/module.h 2011-04-23 13:18:57.000000000 -0400
9169@@ -5,6 +5,7 @@
9170
9171 #ifdef CONFIG_X86_64
9172 /* X86_64 does not define MODULE_PROC_FAMILY */
9173+#define MODULE_PROC_FAMILY ""
9174 #elif defined CONFIG_M386
9175 #define MODULE_PROC_FAMILY "386 "
9176 #elif defined CONFIG_M486
9177@@ -59,13 +60,36 @@
9178 #error unknown processor family
9179 #endif
9180
9181-#ifdef CONFIG_X86_32
9182-# ifdef CONFIG_4KSTACKS
9183-# define MODULE_STACKSIZE "4KSTACKS "
9184-# else
9185-# define MODULE_STACKSIZE ""
9186-# endif
9187-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE
9188+#ifdef CONFIG_PAX_MEMORY_UDEREF
9189+#define MODULE_PAX_UDEREF "UDEREF "
9190+#else
9191+#define MODULE_PAX_UDEREF ""
9192+#endif
9193+
9194+#ifdef CONFIG_PAX_KERNEXEC
9195+#define MODULE_PAX_KERNEXEC "KERNEXEC "
9196+#else
9197+#define MODULE_PAX_KERNEXEC ""
9198+#endif
9199+
9200+#ifdef CONFIG_PAX_REFCOUNT
9201+#define MODULE_PAX_REFCOUNT "REFCOUNT "
9202+#else
9203+#define MODULE_PAX_REFCOUNT ""
9204 #endif
9205
9206+#if defined(CONFIG_X86_32) && defined(CONFIG_4KSTACKS)
9207+#define MODULE_STACKSIZE "4KSTACKS "
9208+#else
9209+#define MODULE_STACKSIZE ""
9210+#endif
9211+
9212+#ifdef CONFIG_GRKERNSEC
9213+#define MODULE_GRSEC "GRSECURITY "
9214+#else
9215+#define MODULE_GRSEC ""
9216+#endif
9217+
9218+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF MODULE_PAX_REFCOUNT
9219+
9220 #endif /* _ASM_X86_MODULE_H */
9221diff -urNp linux-2.6.32.43/arch/x86/include/asm/page_64_types.h linux-2.6.32.43/arch/x86/include/asm/page_64_types.h
9222--- linux-2.6.32.43/arch/x86/include/asm/page_64_types.h 2011-03-27 14:31:47.000000000 -0400
9223+++ linux-2.6.32.43/arch/x86/include/asm/page_64_types.h 2011-04-17 15:56:46.000000000 -0400
9224@@ -56,7 +56,7 @@ void copy_page(void *to, void *from);
9225
9226 /* duplicated to the one in bootmem.h */
9227 extern unsigned long max_pfn;
9228-extern unsigned long phys_base;
9229+extern const unsigned long phys_base;
9230
9231 extern unsigned long __phys_addr(unsigned long);
9232 #define __phys_reloc_hide(x) (x)
9233diff -urNp linux-2.6.32.43/arch/x86/include/asm/paravirt.h linux-2.6.32.43/arch/x86/include/asm/paravirt.h
9234--- linux-2.6.32.43/arch/x86/include/asm/paravirt.h 2011-03-27 14:31:47.000000000 -0400
9235+++ linux-2.6.32.43/arch/x86/include/asm/paravirt.h 2011-04-17 15:56:46.000000000 -0400
9236@@ -729,6 +729,21 @@ static inline void __set_fixmap(unsigned
9237 pv_mmu_ops.set_fixmap(idx, phys, flags);
9238 }
9239
9240+#ifdef CONFIG_PAX_KERNEXEC
9241+static inline unsigned long pax_open_kernel(void)
9242+{
9243+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
9244+}
9245+
9246+static inline unsigned long pax_close_kernel(void)
9247+{
9248+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
9249+}
9250+#else
9251+static inline unsigned long pax_open_kernel(void) { return 0; }
9252+static inline unsigned long pax_close_kernel(void) { return 0; }
9253+#endif
9254+
9255 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
9256
9257 static inline int __raw_spin_is_locked(struct raw_spinlock *lock)
9258@@ -945,7 +960,7 @@ extern void default_banner(void);
9259
9260 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
9261 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
9262-#define PARA_INDIRECT(addr) *%cs:addr
9263+#define PARA_INDIRECT(addr) *%ss:addr
9264 #endif
9265
9266 #define INTERRUPT_RETURN \
9267@@ -1022,6 +1037,21 @@ extern void default_banner(void);
9268 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
9269 CLBR_NONE, \
9270 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
9271+
9272+#define GET_CR0_INTO_RDI \
9273+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
9274+ mov %rax,%rdi
9275+
9276+#define SET_RDI_INTO_CR0 \
9277+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
9278+
9279+#define GET_CR3_INTO_RDI \
9280+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
9281+ mov %rax,%rdi
9282+
9283+#define SET_RDI_INTO_CR3 \
9284+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
9285+
9286 #endif /* CONFIG_X86_32 */
9287
9288 #endif /* __ASSEMBLY__ */
9289diff -urNp linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h
9290--- linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h 2011-03-27 14:31:47.000000000 -0400
9291+++ linux-2.6.32.43/arch/x86/include/asm/paravirt_types.h 2011-08-05 20:33:55.000000000 -0400
9292@@ -78,19 +78,19 @@ struct pv_init_ops {
9293 */
9294 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
9295 unsigned long addr, unsigned len);
9296-};
9297+} __no_const;
9298
9299
9300 struct pv_lazy_ops {
9301 /* Set deferred update mode, used for batching operations. */
9302 void (*enter)(void);
9303 void (*leave)(void);
9304-};
9305+} __no_const;
9306
9307 struct pv_time_ops {
9308 unsigned long long (*sched_clock)(void);
9309 unsigned long (*get_tsc_khz)(void);
9310-};
9311+} __no_const;
9312
9313 struct pv_cpu_ops {
9314 /* hooks for various privileged instructions */
9315@@ -186,7 +186,7 @@ struct pv_cpu_ops {
9316
9317 void (*start_context_switch)(struct task_struct *prev);
9318 void (*end_context_switch)(struct task_struct *next);
9319-};
9320+} __no_const;
9321
9322 struct pv_irq_ops {
9323 /*
9324@@ -217,7 +217,7 @@ struct pv_apic_ops {
9325 unsigned long start_eip,
9326 unsigned long start_esp);
9327 #endif
9328-};
9329+} __no_const;
9330
9331 struct pv_mmu_ops {
9332 unsigned long (*read_cr2)(void);
9333@@ -316,6 +316,12 @@ struct pv_mmu_ops {
9334 an mfn. We can tell which is which from the index. */
9335 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
9336 phys_addr_t phys, pgprot_t flags);
9337+
9338+#ifdef CONFIG_PAX_KERNEXEC
9339+ unsigned long (*pax_open_kernel)(void);
9340+ unsigned long (*pax_close_kernel)(void);
9341+#endif
9342+
9343 };
9344
9345 struct raw_spinlock;
9346@@ -326,7 +332,7 @@ struct pv_lock_ops {
9347 void (*spin_lock_flags)(struct raw_spinlock *lock, unsigned long flags);
9348 int (*spin_trylock)(struct raw_spinlock *lock);
9349 void (*spin_unlock)(struct raw_spinlock *lock);
9350-};
9351+} __no_const;
9352
9353 /* This contains all the paravirt structures: we get a convenient
9354 * number for each function using the offset which we use to indicate
9355diff -urNp linux-2.6.32.43/arch/x86/include/asm/pci_x86.h linux-2.6.32.43/arch/x86/include/asm/pci_x86.h
9356--- linux-2.6.32.43/arch/x86/include/asm/pci_x86.h 2011-03-27 14:31:47.000000000 -0400
9357+++ linux-2.6.32.43/arch/x86/include/asm/pci_x86.h 2011-04-17 15:56:46.000000000 -0400
9358@@ -89,16 +89,16 @@ extern int (*pcibios_enable_irq)(struct
9359 extern void (*pcibios_disable_irq)(struct pci_dev *dev);
9360
9361 struct pci_raw_ops {
9362- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9363+ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn,
9364 int reg, int len, u32 *val);
9365- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9366+ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn,
9367 int reg, int len, u32 val);
9368 };
9369
9370-extern struct pci_raw_ops *raw_pci_ops;
9371-extern struct pci_raw_ops *raw_pci_ext_ops;
9372+extern const struct pci_raw_ops *raw_pci_ops;
9373+extern const struct pci_raw_ops *raw_pci_ext_ops;
9374
9375-extern struct pci_raw_ops pci_direct_conf1;
9376+extern const struct pci_raw_ops pci_direct_conf1;
9377 extern bool port_cf9_safe;
9378
9379 /* arch_initcall level */
9380diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgalloc.h linux-2.6.32.43/arch/x86/include/asm/pgalloc.h
9381--- linux-2.6.32.43/arch/x86/include/asm/pgalloc.h 2011-03-27 14:31:47.000000000 -0400
9382+++ linux-2.6.32.43/arch/x86/include/asm/pgalloc.h 2011-04-17 15:56:46.000000000 -0400
9383@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s
9384 pmd_t *pmd, pte_t *pte)
9385 {
9386 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9387+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
9388+}
9389+
9390+static inline void pmd_populate_user(struct mm_struct *mm,
9391+ pmd_t *pmd, pte_t *pte)
9392+{
9393+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
9394 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
9395 }
9396
9397diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h
9398--- linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h 2011-03-27 14:31:47.000000000 -0400
9399+++ linux-2.6.32.43/arch/x86/include/asm/pgtable-2level.h 2011-04-17 15:56:46.000000000 -0400
9400@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t
9401
9402 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9403 {
9404+ pax_open_kernel();
9405 *pmdp = pmd;
9406+ pax_close_kernel();
9407 }
9408
9409 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
9410diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h
9411--- linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h 2011-03-27 14:31:47.000000000 -0400
9412+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_32.h 2011-04-17 15:56:46.000000000 -0400
9413@@ -26,9 +26,6 @@
9414 struct mm_struct;
9415 struct vm_area_struct;
9416
9417-extern pgd_t swapper_pg_dir[1024];
9418-extern pgd_t trampoline_pg_dir[1024];
9419-
9420 static inline void pgtable_cache_init(void) { }
9421 static inline void check_pgt_cache(void) { }
9422 void paging_init(void);
9423@@ -49,6 +46,12 @@ extern void set_pmd_pfn(unsigned long, u
9424 # include <asm/pgtable-2level.h>
9425 #endif
9426
9427+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
9428+extern pgd_t trampoline_pg_dir[PTRS_PER_PGD];
9429+#ifdef CONFIG_X86_PAE
9430+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
9431+#endif
9432+
9433 #if defined(CONFIG_HIGHPTE)
9434 #define __KM_PTE \
9435 (in_nmi() ? KM_NMI_PTE : \
9436@@ -73,7 +76,9 @@ extern void set_pmd_pfn(unsigned long, u
9437 /* Clear a kernel PTE and flush it from the TLB */
9438 #define kpte_clear_flush(ptep, vaddr) \
9439 do { \
9440+ pax_open_kernel(); \
9441 pte_clear(&init_mm, (vaddr), (ptep)); \
9442+ pax_close_kernel(); \
9443 __flush_tlb_one((vaddr)); \
9444 } while (0)
9445
9446@@ -85,6 +90,9 @@ do { \
9447
9448 #endif /* !__ASSEMBLY__ */
9449
9450+#define HAVE_ARCH_UNMAPPED_AREA
9451+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
9452+
9453 /*
9454 * kern_addr_valid() is (1) for FLATMEM and (0) for
9455 * SPARSEMEM and DISCONTIGMEM
9456diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h
9457--- linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h 2011-03-27 14:31:47.000000000 -0400
9458+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_32_types.h 2011-04-17 15:56:46.000000000 -0400
9459@@ -8,7 +8,7 @@
9460 */
9461 #ifdef CONFIG_X86_PAE
9462 # include <asm/pgtable-3level_types.h>
9463-# define PMD_SIZE (1UL << PMD_SHIFT)
9464+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
9465 # define PMD_MASK (~(PMD_SIZE - 1))
9466 #else
9467 # include <asm/pgtable-2level_types.h>
9468@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set
9469 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
9470 #endif
9471
9472+#ifdef CONFIG_PAX_KERNEXEC
9473+#ifndef __ASSEMBLY__
9474+extern unsigned char MODULES_EXEC_VADDR[];
9475+extern unsigned char MODULES_EXEC_END[];
9476+#endif
9477+#include <asm/boot.h>
9478+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
9479+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
9480+#else
9481+#define ktla_ktva(addr) (addr)
9482+#define ktva_ktla(addr) (addr)
9483+#endif
9484+
9485 #define MODULES_VADDR VMALLOC_START
9486 #define MODULES_END VMALLOC_END
9487 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
9488diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h
9489--- linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h 2011-03-27 14:31:47.000000000 -0400
9490+++ linux-2.6.32.43/arch/x86/include/asm/pgtable-3level.h 2011-04-17 15:56:46.000000000 -0400
9491@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic
9492
9493 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9494 {
9495+ pax_open_kernel();
9496 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
9497+ pax_close_kernel();
9498 }
9499
9500 static inline void native_set_pud(pud_t *pudp, pud_t pud)
9501 {
9502+ pax_open_kernel();
9503 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
9504+ pax_close_kernel();
9505 }
9506
9507 /*
9508diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h
9509--- linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h 2011-03-27 14:31:47.000000000 -0400
9510+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_64.h 2011-04-17 15:56:46.000000000 -0400
9511@@ -16,10 +16,13 @@
9512
9513 extern pud_t level3_kernel_pgt[512];
9514 extern pud_t level3_ident_pgt[512];
9515+extern pud_t level3_vmalloc_pgt[512];
9516+extern pud_t level3_vmemmap_pgt[512];
9517+extern pud_t level2_vmemmap_pgt[512];
9518 extern pmd_t level2_kernel_pgt[512];
9519 extern pmd_t level2_fixmap_pgt[512];
9520-extern pmd_t level2_ident_pgt[512];
9521-extern pgd_t init_level4_pgt[];
9522+extern pmd_t level2_ident_pgt[512*2];
9523+extern pgd_t init_level4_pgt[512];
9524
9525 #define swapper_pg_dir init_level4_pgt
9526
9527@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_
9528
9529 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
9530 {
9531+ pax_open_kernel();
9532 *pmdp = pmd;
9533+ pax_close_kernel();
9534 }
9535
9536 static inline void native_pmd_clear(pmd_t *pmd)
9537@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_
9538
9539 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
9540 {
9541+ pax_open_kernel();
9542 *pgdp = pgd;
9543+ pax_close_kernel();
9544 }
9545
9546 static inline void native_pgd_clear(pgd_t *pgd)
9547diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h
9548--- linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h 2011-03-27 14:31:47.000000000 -0400
9549+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_64_types.h 2011-04-17 15:56:46.000000000 -0400
9550@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t;
9551 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
9552 #define MODULES_END _AC(0xffffffffff000000, UL)
9553 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
9554+#define MODULES_EXEC_VADDR MODULES_VADDR
9555+#define MODULES_EXEC_END MODULES_END
9556+
9557+#define ktla_ktva(addr) (addr)
9558+#define ktva_ktla(addr) (addr)
9559
9560 #endif /* _ASM_X86_PGTABLE_64_DEFS_H */
9561diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable.h linux-2.6.32.43/arch/x86/include/asm/pgtable.h
9562--- linux-2.6.32.43/arch/x86/include/asm/pgtable.h 2011-03-27 14:31:47.000000000 -0400
9563+++ linux-2.6.32.43/arch/x86/include/asm/pgtable.h 2011-04-17 15:56:46.000000000 -0400
9564@@ -74,12 +74,51 @@ extern struct list_head pgd_list;
9565
9566 #define arch_end_context_switch(prev) do {} while(0)
9567
9568+#define pax_open_kernel() native_pax_open_kernel()
9569+#define pax_close_kernel() native_pax_close_kernel()
9570 #endif /* CONFIG_PARAVIRT */
9571
9572+#define __HAVE_ARCH_PAX_OPEN_KERNEL
9573+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
9574+
9575+#ifdef CONFIG_PAX_KERNEXEC
9576+static inline unsigned long native_pax_open_kernel(void)
9577+{
9578+ unsigned long cr0;
9579+
9580+ preempt_disable();
9581+ barrier();
9582+ cr0 = read_cr0() ^ X86_CR0_WP;
9583+ BUG_ON(unlikely(cr0 & X86_CR0_WP));
9584+ write_cr0(cr0);
9585+ return cr0 ^ X86_CR0_WP;
9586+}
9587+
9588+static inline unsigned long native_pax_close_kernel(void)
9589+{
9590+ unsigned long cr0;
9591+
9592+ cr0 = read_cr0() ^ X86_CR0_WP;
9593+ BUG_ON(unlikely(!(cr0 & X86_CR0_WP)));
9594+ write_cr0(cr0);
9595+ barrier();
9596+ preempt_enable_no_resched();
9597+ return cr0 ^ X86_CR0_WP;
9598+}
9599+#else
9600+static inline unsigned long native_pax_open_kernel(void) { return 0; }
9601+static inline unsigned long native_pax_close_kernel(void) { return 0; }
9602+#endif
9603+
9604 /*
9605 * The following only work if pte_present() is true.
9606 * Undefined behaviour if not..
9607 */
9608+static inline int pte_user(pte_t pte)
9609+{
9610+ return pte_val(pte) & _PAGE_USER;
9611+}
9612+
9613 static inline int pte_dirty(pte_t pte)
9614 {
9615 return pte_flags(pte) & _PAGE_DIRTY;
9616@@ -167,9 +206,29 @@ static inline pte_t pte_wrprotect(pte_t
9617 return pte_clear_flags(pte, _PAGE_RW);
9618 }
9619
9620+static inline pte_t pte_mkread(pte_t pte)
9621+{
9622+ return __pte(pte_val(pte) | _PAGE_USER);
9623+}
9624+
9625 static inline pte_t pte_mkexec(pte_t pte)
9626 {
9627- return pte_clear_flags(pte, _PAGE_NX);
9628+#ifdef CONFIG_X86_PAE
9629+ if (__supported_pte_mask & _PAGE_NX)
9630+ return pte_clear_flags(pte, _PAGE_NX);
9631+ else
9632+#endif
9633+ return pte_set_flags(pte, _PAGE_USER);
9634+}
9635+
9636+static inline pte_t pte_exprotect(pte_t pte)
9637+{
9638+#ifdef CONFIG_X86_PAE
9639+ if (__supported_pte_mask & _PAGE_NX)
9640+ return pte_set_flags(pte, _PAGE_NX);
9641+ else
9642+#endif
9643+ return pte_clear_flags(pte, _PAGE_USER);
9644 }
9645
9646 static inline pte_t pte_mkdirty(pte_t pte)
9647@@ -302,6 +361,15 @@ pte_t *populate_extra_pte(unsigned long
9648 #endif
9649
9650 #ifndef __ASSEMBLY__
9651+
9652+#ifdef CONFIG_PAX_PER_CPU_PGD
9653+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
9654+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
9655+{
9656+ return cpu_pgd[cpu];
9657+}
9658+#endif
9659+
9660 #include <linux/mm_types.h>
9661
9662 static inline int pte_none(pte_t pte)
9663@@ -472,7 +540,7 @@ static inline pud_t *pud_offset(pgd_t *p
9664
9665 static inline int pgd_bad(pgd_t pgd)
9666 {
9667- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
9668+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
9669 }
9670
9671 static inline int pgd_none(pgd_t pgd)
9672@@ -495,7 +563,12 @@ static inline int pgd_none(pgd_t pgd)
9673 * pgd_offset() returns a (pgd_t *)
9674 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
9675 */
9676-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
9677+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
9678+
9679+#ifdef CONFIG_PAX_PER_CPU_PGD
9680+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
9681+#endif
9682+
9683 /*
9684 * a shortcut which implies the use of the kernel's pgd, instead
9685 * of a process's
9686@@ -506,6 +579,20 @@ static inline int pgd_none(pgd_t pgd)
9687 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
9688 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
9689
9690+#ifdef CONFIG_X86_32
9691+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
9692+#else
9693+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
9694+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
9695+
9696+#ifdef CONFIG_PAX_MEMORY_UDEREF
9697+#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT)
9698+#else
9699+#define PAX_USER_SHADOW_BASE (_AC(0,UL))
9700+#endif
9701+
9702+#endif
9703+
9704 #ifndef __ASSEMBLY__
9705
9706 extern int direct_gbpages;
9707@@ -611,11 +698,23 @@ static inline void ptep_set_wrprotect(st
9708 * dst and src can be on the same page, but the range must not overlap,
9709 * and must not cross a page boundary.
9710 */
9711-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
9712+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
9713 {
9714- memcpy(dst, src, count * sizeof(pgd_t));
9715+ pax_open_kernel();
9716+ while (count--)
9717+ *dst++ = *src++;
9718+ pax_close_kernel();
9719 }
9720
9721+#ifdef CONFIG_PAX_PER_CPU_PGD
9722+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9723+#endif
9724+
9725+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
9726+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count);
9727+#else
9728+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {}
9729+#endif
9730
9731 #include <asm-generic/pgtable.h>
9732 #endif /* __ASSEMBLY__ */
9733diff -urNp linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h
9734--- linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h 2011-03-27 14:31:47.000000000 -0400
9735+++ linux-2.6.32.43/arch/x86/include/asm/pgtable_types.h 2011-04-17 15:56:46.000000000 -0400
9736@@ -16,12 +16,11 @@
9737 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
9738 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
9739 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
9740-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
9741+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
9742 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
9743 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
9744 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
9745-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
9746-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
9747+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
9748 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
9749
9750 /* If _PAGE_BIT_PRESENT is clear, we use these: */
9751@@ -39,7 +38,6 @@
9752 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
9753 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
9754 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
9755-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
9756 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
9757 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
9758 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
9759@@ -55,8 +53,10 @@
9760
9761 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
9762 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
9763-#else
9764+#elif defined(CONFIG_KMEMCHECK)
9765 #define _PAGE_NX (_AT(pteval_t, 0))
9766+#else
9767+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
9768 #endif
9769
9770 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
9771@@ -93,6 +93,9 @@
9772 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
9773 _PAGE_ACCESSED)
9774
9775+#define PAGE_READONLY_NOEXEC PAGE_READONLY
9776+#define PAGE_SHARED_NOEXEC PAGE_SHARED
9777+
9778 #define __PAGE_KERNEL_EXEC \
9779 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
9780 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
9781@@ -103,8 +106,8 @@
9782 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
9783 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
9784 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
9785-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
9786-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT)
9787+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
9788+#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER)
9789 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
9790 #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE)
9791 #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE)
9792@@ -163,8 +166,8 @@
9793 * bits are combined, this will alow user to access the high address mapped
9794 * VDSO in the presence of CONFIG_COMPAT_VDSO
9795 */
9796-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
9797-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
9798+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9799+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
9800 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
9801 #endif
9802
9803@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p
9804 {
9805 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
9806 }
9807+#endif
9808
9809+#if PAGETABLE_LEVELS == 3
9810+#include <asm-generic/pgtable-nopud.h>
9811+#endif
9812+
9813+#if PAGETABLE_LEVELS == 2
9814+#include <asm-generic/pgtable-nopmd.h>
9815+#endif
9816+
9817+#ifndef __ASSEMBLY__
9818 #if PAGETABLE_LEVELS > 3
9819 typedef struct { pudval_t pud; } pud_t;
9820
9821@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu
9822 return pud.pud;
9823 }
9824 #else
9825-#include <asm-generic/pgtable-nopud.h>
9826-
9827 static inline pudval_t native_pud_val(pud_t pud)
9828 {
9829 return native_pgd_val(pud.pgd);
9830@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm
9831 return pmd.pmd;
9832 }
9833 #else
9834-#include <asm-generic/pgtable-nopmd.h>
9835-
9836 static inline pmdval_t native_pmd_val(pmd_t pmd)
9837 {
9838 return native_pgd_val(pmd.pud.pgd);
9839@@ -278,7 +287,16 @@ typedef struct page *pgtable_t;
9840
9841 extern pteval_t __supported_pte_mask;
9842 extern void set_nx(void);
9843+
9844+#ifdef CONFIG_X86_32
9845+#ifdef CONFIG_X86_PAE
9846 extern int nx_enabled;
9847+#else
9848+#define nx_enabled (0)
9849+#endif
9850+#else
9851+#define nx_enabled (1)
9852+#endif
9853
9854 #define pgprot_writecombine pgprot_writecombine
9855 extern pgprot_t pgprot_writecombine(pgprot_t prot);
9856diff -urNp linux-2.6.32.43/arch/x86/include/asm/processor.h linux-2.6.32.43/arch/x86/include/asm/processor.h
9857--- linux-2.6.32.43/arch/x86/include/asm/processor.h 2011-04-22 19:16:29.000000000 -0400
9858+++ linux-2.6.32.43/arch/x86/include/asm/processor.h 2011-05-11 18:25:15.000000000 -0400
9859@@ -272,7 +272,7 @@ struct tss_struct {
9860
9861 } ____cacheline_aligned;
9862
9863-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
9864+extern struct tss_struct init_tss[NR_CPUS];
9865
9866 /*
9867 * Save the original ist values for checking stack pointers during debugging
9868@@ -888,11 +888,18 @@ static inline void spin_lock_prefetch(co
9869 */
9870 #define TASK_SIZE PAGE_OFFSET
9871 #define TASK_SIZE_MAX TASK_SIZE
9872+
9873+#ifdef CONFIG_PAX_SEGMEXEC
9874+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
9875+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
9876+#else
9877 #define STACK_TOP TASK_SIZE
9878-#define STACK_TOP_MAX STACK_TOP
9879+#endif
9880+
9881+#define STACK_TOP_MAX TASK_SIZE
9882
9883 #define INIT_THREAD { \
9884- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9885+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9886 .vm86_info = NULL, \
9887 .sysenter_cs = __KERNEL_CS, \
9888 .io_bitmap_ptr = NULL, \
9889@@ -906,7 +913,7 @@ static inline void spin_lock_prefetch(co
9890 */
9891 #define INIT_TSS { \
9892 .x86_tss = { \
9893- .sp0 = sizeof(init_stack) + (long)&init_stack, \
9894+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
9895 .ss0 = __KERNEL_DS, \
9896 .ss1 = __KERNEL_CS, \
9897 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
9898@@ -917,11 +924,7 @@ static inline void spin_lock_prefetch(co
9899 extern unsigned long thread_saved_pc(struct task_struct *tsk);
9900
9901 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
9902-#define KSTK_TOP(info) \
9903-({ \
9904- unsigned long *__ptr = (unsigned long *)(info); \
9905- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
9906-})
9907+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
9908
9909 /*
9910 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
9911@@ -936,7 +939,7 @@ extern unsigned long thread_saved_pc(str
9912 #define task_pt_regs(task) \
9913 ({ \
9914 struct pt_regs *__regs__; \
9915- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
9916+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
9917 __regs__ - 1; \
9918 })
9919
9920@@ -946,13 +949,13 @@ extern unsigned long thread_saved_pc(str
9921 /*
9922 * User space process size. 47bits minus one guard page.
9923 */
9924-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
9925+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
9926
9927 /* This decides where the kernel will search for a free chunk of vm
9928 * space during mmap's.
9929 */
9930 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
9931- 0xc0000000 : 0xFFFFe000)
9932+ 0xc0000000 : 0xFFFFf000)
9933
9934 #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \
9935 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
9936@@ -963,11 +966,11 @@ extern unsigned long thread_saved_pc(str
9937 #define STACK_TOP_MAX TASK_SIZE_MAX
9938
9939 #define INIT_THREAD { \
9940- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9941+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9942 }
9943
9944 #define INIT_TSS { \
9945- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
9946+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
9947 }
9948
9949 /*
9950@@ -989,6 +992,10 @@ extern void start_thread(struct pt_regs
9951 */
9952 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
9953
9954+#ifdef CONFIG_PAX_SEGMEXEC
9955+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
9956+#endif
9957+
9958 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
9959
9960 /* Get/set a process' ability to use the timestamp counter instruction */
9961diff -urNp linux-2.6.32.43/arch/x86/include/asm/ptrace.h linux-2.6.32.43/arch/x86/include/asm/ptrace.h
9962--- linux-2.6.32.43/arch/x86/include/asm/ptrace.h 2011-03-27 14:31:47.000000000 -0400
9963+++ linux-2.6.32.43/arch/x86/include/asm/ptrace.h 2011-04-17 15:56:46.000000000 -0400
9964@@ -151,28 +151,29 @@ static inline unsigned long regs_return_
9965 }
9966
9967 /*
9968- * user_mode_vm(regs) determines whether a register set came from user mode.
9969+ * user_mode(regs) determines whether a register set came from user mode.
9970 * This is true if V8086 mode was enabled OR if the register set was from
9971 * protected mode with RPL-3 CS value. This tricky test checks that with
9972 * one comparison. Many places in the kernel can bypass this full check
9973- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
9974+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
9975+ * be used.
9976 */
9977-static inline int user_mode(struct pt_regs *regs)
9978+static inline int user_mode_novm(struct pt_regs *regs)
9979 {
9980 #ifdef CONFIG_X86_32
9981 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
9982 #else
9983- return !!(regs->cs & 3);
9984+ return !!(regs->cs & SEGMENT_RPL_MASK);
9985 #endif
9986 }
9987
9988-static inline int user_mode_vm(struct pt_regs *regs)
9989+static inline int user_mode(struct pt_regs *regs)
9990 {
9991 #ifdef CONFIG_X86_32
9992 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
9993 USER_RPL;
9994 #else
9995- return user_mode(regs);
9996+ return user_mode_novm(regs);
9997 #endif
9998 }
9999
10000diff -urNp linux-2.6.32.43/arch/x86/include/asm/reboot.h linux-2.6.32.43/arch/x86/include/asm/reboot.h
10001--- linux-2.6.32.43/arch/x86/include/asm/reboot.h 2011-03-27 14:31:47.000000000 -0400
10002+++ linux-2.6.32.43/arch/x86/include/asm/reboot.h 2011-08-05 20:33:55.000000000 -0400
10003@@ -6,19 +6,19 @@
10004 struct pt_regs;
10005
10006 struct machine_ops {
10007- void (*restart)(char *cmd);
10008- void (*halt)(void);
10009- void (*power_off)(void);
10010+ void (* __noreturn restart)(char *cmd);
10011+ void (* __noreturn halt)(void);
10012+ void (* __noreturn power_off)(void);
10013 void (*shutdown)(void);
10014 void (*crash_shutdown)(struct pt_regs *);
10015- void (*emergency_restart)(void);
10016-};
10017+ void (* __noreturn emergency_restart)(void);
10018+} __no_const;
10019
10020 extern struct machine_ops machine_ops;
10021
10022 void native_machine_crash_shutdown(struct pt_regs *regs);
10023 void native_machine_shutdown(void);
10024-void machine_real_restart(const unsigned char *code, int length);
10025+void machine_real_restart(const unsigned char *code, unsigned int length) __noreturn;
10026
10027 typedef void (*nmi_shootdown_cb)(int, struct die_args*);
10028 void nmi_shootdown_cpus(nmi_shootdown_cb callback);
10029diff -urNp linux-2.6.32.43/arch/x86/include/asm/rwsem.h linux-2.6.32.43/arch/x86/include/asm/rwsem.h
10030--- linux-2.6.32.43/arch/x86/include/asm/rwsem.h 2011-03-27 14:31:47.000000000 -0400
10031+++ linux-2.6.32.43/arch/x86/include/asm/rwsem.h 2011-04-17 15:56:46.000000000 -0400
10032@@ -118,6 +118,14 @@ static inline void __down_read(struct rw
10033 {
10034 asm volatile("# beginning down_read\n\t"
10035 LOCK_PREFIX _ASM_INC "(%1)\n\t"
10036+
10037+#ifdef CONFIG_PAX_REFCOUNT
10038+ "jno 0f\n"
10039+ LOCK_PREFIX _ASM_DEC "(%1)\n\t"
10040+ "int $4\n0:\n"
10041+ _ASM_EXTABLE(0b, 0b)
10042+#endif
10043+
10044 /* adds 0x00000001, returns the old value */
10045 " jns 1f\n"
10046 " call call_rwsem_down_read_failed\n"
10047@@ -139,6 +147,14 @@ static inline int __down_read_trylock(st
10048 "1:\n\t"
10049 " mov %1,%2\n\t"
10050 " add %3,%2\n\t"
10051+
10052+#ifdef CONFIG_PAX_REFCOUNT
10053+ "jno 0f\n"
10054+ "sub %3,%2\n"
10055+ "int $4\n0:\n"
10056+ _ASM_EXTABLE(0b, 0b)
10057+#endif
10058+
10059 " jle 2f\n\t"
10060 LOCK_PREFIX " cmpxchg %2,%0\n\t"
10061 " jnz 1b\n\t"
10062@@ -160,6 +176,14 @@ static inline void __down_write_nested(s
10063 tmp = RWSEM_ACTIVE_WRITE_BIAS;
10064 asm volatile("# beginning down_write\n\t"
10065 LOCK_PREFIX " xadd %1,(%2)\n\t"
10066+
10067+#ifdef CONFIG_PAX_REFCOUNT
10068+ "jno 0f\n"
10069+ "mov %1,(%2)\n"
10070+ "int $4\n0:\n"
10071+ _ASM_EXTABLE(0b, 0b)
10072+#endif
10073+
10074 /* subtract 0x0000ffff, returns the old value */
10075 " test %1,%1\n\t"
10076 /* was the count 0 before? */
10077@@ -198,6 +222,14 @@ static inline void __up_read(struct rw_s
10078 rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS;
10079 asm volatile("# beginning __up_read\n\t"
10080 LOCK_PREFIX " xadd %1,(%2)\n\t"
10081+
10082+#ifdef CONFIG_PAX_REFCOUNT
10083+ "jno 0f\n"
10084+ "mov %1,(%2)\n"
10085+ "int $4\n0:\n"
10086+ _ASM_EXTABLE(0b, 0b)
10087+#endif
10088+
10089 /* subtracts 1, returns the old value */
10090 " jns 1f\n\t"
10091 " call call_rwsem_wake\n"
10092@@ -216,6 +248,14 @@ static inline void __up_write(struct rw_
10093 rwsem_count_t tmp;
10094 asm volatile("# beginning __up_write\n\t"
10095 LOCK_PREFIX " xadd %1,(%2)\n\t"
10096+
10097+#ifdef CONFIG_PAX_REFCOUNT
10098+ "jno 0f\n"
10099+ "mov %1,(%2)\n"
10100+ "int $4\n0:\n"
10101+ _ASM_EXTABLE(0b, 0b)
10102+#endif
10103+
10104 /* tries to transition
10105 0xffff0001 -> 0x00000000 */
10106 " jz 1f\n"
10107@@ -234,6 +274,14 @@ static inline void __downgrade_write(str
10108 {
10109 asm volatile("# beginning __downgrade_write\n\t"
10110 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
10111+
10112+#ifdef CONFIG_PAX_REFCOUNT
10113+ "jno 0f\n"
10114+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
10115+ "int $4\n0:\n"
10116+ _ASM_EXTABLE(0b, 0b)
10117+#endif
10118+
10119 /*
10120 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
10121 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
10122@@ -253,7 +301,15 @@ static inline void __downgrade_write(str
10123 static inline void rwsem_atomic_add(rwsem_count_t delta,
10124 struct rw_semaphore *sem)
10125 {
10126- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
10127+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
10128+
10129+#ifdef CONFIG_PAX_REFCOUNT
10130+ "jno 0f\n"
10131+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
10132+ "int $4\n0:\n"
10133+ _ASM_EXTABLE(0b, 0b)
10134+#endif
10135+
10136 : "+m" (sem->count)
10137 : "er" (delta));
10138 }
10139@@ -266,7 +322,15 @@ static inline rwsem_count_t rwsem_atomic
10140 {
10141 rwsem_count_t tmp = delta;
10142
10143- asm volatile(LOCK_PREFIX "xadd %0,%1"
10144+ asm volatile(LOCK_PREFIX "xadd %0,%1\n"
10145+
10146+#ifdef CONFIG_PAX_REFCOUNT
10147+ "jno 0f\n"
10148+ "mov %0,%1\n"
10149+ "int $4\n0:\n"
10150+ _ASM_EXTABLE(0b, 0b)
10151+#endif
10152+
10153 : "+r" (tmp), "+m" (sem->count)
10154 : : "memory");
10155
10156diff -urNp linux-2.6.32.43/arch/x86/include/asm/segment.h linux-2.6.32.43/arch/x86/include/asm/segment.h
10157--- linux-2.6.32.43/arch/x86/include/asm/segment.h 2011-03-27 14:31:47.000000000 -0400
10158+++ linux-2.6.32.43/arch/x86/include/asm/segment.h 2011-04-17 15:56:46.000000000 -0400
10159@@ -62,8 +62,8 @@
10160 * 26 - ESPFIX small SS
10161 * 27 - per-cpu [ offset to per-cpu data area ]
10162 * 28 - stack_canary-20 [ for stack protector ]
10163- * 29 - unused
10164- * 30 - unused
10165+ * 29 - PCI BIOS CS
10166+ * 30 - PCI BIOS DS
10167 * 31 - TSS for double fault handler
10168 */
10169 #define GDT_ENTRY_TLS_MIN 6
10170@@ -77,6 +77,8 @@
10171
10172 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0)
10173
10174+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
10175+
10176 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1)
10177
10178 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4)
10179@@ -88,7 +90,7 @@
10180 #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14)
10181 #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)
10182
10183-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10184+#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15)
10185 #ifdef CONFIG_SMP
10186 #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8)
10187 #else
10188@@ -102,6 +104,12 @@
10189 #define __KERNEL_STACK_CANARY 0
10190 #endif
10191
10192+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17)
10193+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
10194+
10195+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18)
10196+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
10197+
10198 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
10199
10200 /*
10201@@ -139,7 +147,7 @@
10202 */
10203
10204 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
10205-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
10206+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
10207
10208
10209 #else
10210@@ -163,6 +171,8 @@
10211 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3)
10212 #define __USER32_DS __USER_DS
10213
10214+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
10215+
10216 #define GDT_ENTRY_TSS 8 /* needs two entries */
10217 #define GDT_ENTRY_LDT 10 /* needs two entries */
10218 #define GDT_ENTRY_TLS_MIN 12
10219@@ -183,6 +193,7 @@
10220 #endif
10221
10222 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8)
10223+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8)
10224 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8)
10225 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3)
10226 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3)
10227diff -urNp linux-2.6.32.43/arch/x86/include/asm/smp.h linux-2.6.32.43/arch/x86/include/asm/smp.h
10228--- linux-2.6.32.43/arch/x86/include/asm/smp.h 2011-03-27 14:31:47.000000000 -0400
10229+++ linux-2.6.32.43/arch/x86/include/asm/smp.h 2011-08-05 20:33:55.000000000 -0400
10230@@ -24,7 +24,7 @@ extern unsigned int num_processors;
10231 DECLARE_PER_CPU(cpumask_var_t, cpu_sibling_map);
10232 DECLARE_PER_CPU(cpumask_var_t, cpu_core_map);
10233 DECLARE_PER_CPU(u16, cpu_llc_id);
10234-DECLARE_PER_CPU(int, cpu_number);
10235+DECLARE_PER_CPU(unsigned int, cpu_number);
10236
10237 static inline struct cpumask *cpu_sibling_mask(int cpu)
10238 {
10239@@ -40,10 +40,7 @@ DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_ap
10240 DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
10241
10242 /* Static state in head.S used to set up a CPU */
10243-extern struct {
10244- void *sp;
10245- unsigned short ss;
10246-} stack_start;
10247+extern unsigned long stack_start; /* Initial stack pointer address */
10248
10249 struct smp_ops {
10250 void (*smp_prepare_boot_cpu)(void);
10251@@ -60,7 +57,7 @@ struct smp_ops {
10252
10253 void (*send_call_func_ipi)(const struct cpumask *mask);
10254 void (*send_call_func_single_ipi)(int cpu);
10255-};
10256+} __no_const;
10257
10258 /* Globals due to paravirt */
10259 extern void set_cpu_sibling_map(int cpu);
10260@@ -175,14 +172,8 @@ extern unsigned disabled_cpus __cpuinitd
10261 extern int safe_smp_processor_id(void);
10262
10263 #elif defined(CONFIG_X86_64_SMP)
10264-#define raw_smp_processor_id() (percpu_read(cpu_number))
10265-
10266-#define stack_smp_processor_id() \
10267-({ \
10268- struct thread_info *ti; \
10269- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
10270- ti->cpu; \
10271-})
10272+#define raw_smp_processor_id() (percpu_read(cpu_number))
10273+#define stack_smp_processor_id() raw_smp_processor_id()
10274 #define safe_smp_processor_id() smp_processor_id()
10275
10276 #endif
10277diff -urNp linux-2.6.32.43/arch/x86/include/asm/spinlock.h linux-2.6.32.43/arch/x86/include/asm/spinlock.h
10278--- linux-2.6.32.43/arch/x86/include/asm/spinlock.h 2011-03-27 14:31:47.000000000 -0400
10279+++ linux-2.6.32.43/arch/x86/include/asm/spinlock.h 2011-04-17 15:56:46.000000000 -0400
10280@@ -249,6 +249,14 @@ static inline int __raw_write_can_lock(r
10281 static inline void __raw_read_lock(raw_rwlock_t *rw)
10282 {
10283 asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t"
10284+
10285+#ifdef CONFIG_PAX_REFCOUNT
10286+ "jno 0f\n"
10287+ LOCK_PREFIX " addl $1,(%0)\n"
10288+ "int $4\n0:\n"
10289+ _ASM_EXTABLE(0b, 0b)
10290+#endif
10291+
10292 "jns 1f\n"
10293 "call __read_lock_failed\n\t"
10294 "1:\n"
10295@@ -258,6 +266,14 @@ static inline void __raw_read_lock(raw_r
10296 static inline void __raw_write_lock(raw_rwlock_t *rw)
10297 {
10298 asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t"
10299+
10300+#ifdef CONFIG_PAX_REFCOUNT
10301+ "jno 0f\n"
10302+ LOCK_PREFIX " addl %1,(%0)\n"
10303+ "int $4\n0:\n"
10304+ _ASM_EXTABLE(0b, 0b)
10305+#endif
10306+
10307 "jz 1f\n"
10308 "call __write_lock_failed\n\t"
10309 "1:\n"
10310@@ -286,12 +302,29 @@ static inline int __raw_write_trylock(ra
10311
10312 static inline void __raw_read_unlock(raw_rwlock_t *rw)
10313 {
10314- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory");
10315+ asm volatile(LOCK_PREFIX "incl %0\n"
10316+
10317+#ifdef CONFIG_PAX_REFCOUNT
10318+ "jno 0f\n"
10319+ LOCK_PREFIX "decl %0\n"
10320+ "int $4\n0:\n"
10321+ _ASM_EXTABLE(0b, 0b)
10322+#endif
10323+
10324+ :"+m" (rw->lock) : : "memory");
10325 }
10326
10327 static inline void __raw_write_unlock(raw_rwlock_t *rw)
10328 {
10329- asm volatile(LOCK_PREFIX "addl %1, %0"
10330+ asm volatile(LOCK_PREFIX "addl %1, %0\n"
10331+
10332+#ifdef CONFIG_PAX_REFCOUNT
10333+ "jno 0f\n"
10334+ LOCK_PREFIX "subl %1, %0\n"
10335+ "int $4\n0:\n"
10336+ _ASM_EXTABLE(0b, 0b)
10337+#endif
10338+
10339 : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory");
10340 }
10341
10342diff -urNp linux-2.6.32.43/arch/x86/include/asm/stackprotector.h linux-2.6.32.43/arch/x86/include/asm/stackprotector.h
10343--- linux-2.6.32.43/arch/x86/include/asm/stackprotector.h 2011-03-27 14:31:47.000000000 -0400
10344+++ linux-2.6.32.43/arch/x86/include/asm/stackprotector.h 2011-07-06 19:53:33.000000000 -0400
10345@@ -48,7 +48,7 @@
10346 * head_32 for boot CPU and setup_per_cpu_areas() for others.
10347 */
10348 #define GDT_STACK_CANARY_INIT \
10349- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
10350+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
10351
10352 /*
10353 * Initialize the stackprotector canary value.
10354@@ -113,7 +113,7 @@ static inline void setup_stack_canary_se
10355
10356 static inline void load_stack_canary_segment(void)
10357 {
10358-#ifdef CONFIG_X86_32
10359+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
10360 asm volatile ("mov %0, %%gs" : : "r" (0));
10361 #endif
10362 }
10363diff -urNp linux-2.6.32.43/arch/x86/include/asm/system.h linux-2.6.32.43/arch/x86/include/asm/system.h
10364--- linux-2.6.32.43/arch/x86/include/asm/system.h 2011-03-27 14:31:47.000000000 -0400
10365+++ linux-2.6.32.43/arch/x86/include/asm/system.h 2011-05-22 23:02:03.000000000 -0400
10366@@ -132,7 +132,7 @@ do { \
10367 "thread_return:\n\t" \
10368 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
10369 __switch_canary \
10370- "movq %P[thread_info](%%rsi),%%r8\n\t" \
10371+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
10372 "movq %%rax,%%rdi\n\t" \
10373 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
10374 "jnz ret_from_fork\n\t" \
10375@@ -143,7 +143,7 @@ do { \
10376 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
10377 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
10378 [_tif_fork] "i" (_TIF_FORK), \
10379- [thread_info] "i" (offsetof(struct task_struct, stack)), \
10380+ [thread_info] "m" (per_cpu_var(current_tinfo)), \
10381 [current_task] "m" (per_cpu_var(current_task)) \
10382 __switch_canary_iparam \
10383 : "memory", "cc" __EXTRA_CLOBBER)
10384@@ -200,7 +200,7 @@ static inline unsigned long get_limit(un
10385 {
10386 unsigned long __limit;
10387 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
10388- return __limit + 1;
10389+ return __limit;
10390 }
10391
10392 static inline void native_clts(void)
10393@@ -340,12 +340,12 @@ void enable_hlt(void);
10394
10395 void cpu_idle_wait(void);
10396
10397-extern unsigned long arch_align_stack(unsigned long sp);
10398+#define arch_align_stack(x) ((x) & ~0xfUL)
10399 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
10400
10401 void default_idle(void);
10402
10403-void stop_this_cpu(void *dummy);
10404+void stop_this_cpu(void *dummy) __noreturn;
10405
10406 /*
10407 * Force strict CPU ordering.
10408diff -urNp linux-2.6.32.43/arch/x86/include/asm/thread_info.h linux-2.6.32.43/arch/x86/include/asm/thread_info.h
10409--- linux-2.6.32.43/arch/x86/include/asm/thread_info.h 2011-03-27 14:31:47.000000000 -0400
10410+++ linux-2.6.32.43/arch/x86/include/asm/thread_info.h 2011-05-17 19:26:34.000000000 -0400
10411@@ -10,6 +10,7 @@
10412 #include <linux/compiler.h>
10413 #include <asm/page.h>
10414 #include <asm/types.h>
10415+#include <asm/percpu.h>
10416
10417 /*
10418 * low level task data that entry.S needs immediate access to
10419@@ -24,7 +25,6 @@ struct exec_domain;
10420 #include <asm/atomic.h>
10421
10422 struct thread_info {
10423- struct task_struct *task; /* main task structure */
10424 struct exec_domain *exec_domain; /* execution domain */
10425 __u32 flags; /* low level flags */
10426 __u32 status; /* thread synchronous flags */
10427@@ -34,18 +34,12 @@ struct thread_info {
10428 mm_segment_t addr_limit;
10429 struct restart_block restart_block;
10430 void __user *sysenter_return;
10431-#ifdef CONFIG_X86_32
10432- unsigned long previous_esp; /* ESP of the previous stack in
10433- case of nested (IRQ) stacks
10434- */
10435- __u8 supervisor_stack[0];
10436-#endif
10437+ unsigned long lowest_stack;
10438 int uaccess_err;
10439 };
10440
10441-#define INIT_THREAD_INFO(tsk) \
10442+#define INIT_THREAD_INFO \
10443 { \
10444- .task = &tsk, \
10445 .exec_domain = &default_exec_domain, \
10446 .flags = 0, \
10447 .cpu = 0, \
10448@@ -56,7 +50,7 @@ struct thread_info {
10449 }, \
10450 }
10451
10452-#define init_thread_info (init_thread_union.thread_info)
10453+#define init_thread_info (init_thread_union.stack)
10454 #define init_stack (init_thread_union.stack)
10455
10456 #else /* !__ASSEMBLY__ */
10457@@ -163,6 +157,23 @@ struct thread_info {
10458 #define alloc_thread_info(tsk) \
10459 ((struct thread_info *)__get_free_pages(THREAD_FLAGS, THREAD_ORDER))
10460
10461+#ifdef __ASSEMBLY__
10462+/* how to get the thread information struct from ASM */
10463+#define GET_THREAD_INFO(reg) \
10464+ mov PER_CPU_VAR(current_tinfo), reg
10465+
10466+/* use this one if reg already contains %esp */
10467+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
10468+#else
10469+/* how to get the thread information struct from C */
10470+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
10471+
10472+static __always_inline struct thread_info *current_thread_info(void)
10473+{
10474+ return percpu_read_stable(current_tinfo);
10475+}
10476+#endif
10477+
10478 #ifdef CONFIG_X86_32
10479
10480 #define STACK_WARN (THREAD_SIZE/8)
10481@@ -173,35 +184,13 @@ struct thread_info {
10482 */
10483 #ifndef __ASSEMBLY__
10484
10485-
10486 /* how to get the current stack pointer from C */
10487 register unsigned long current_stack_pointer asm("esp") __used;
10488
10489-/* how to get the thread information struct from C */
10490-static inline struct thread_info *current_thread_info(void)
10491-{
10492- return (struct thread_info *)
10493- (current_stack_pointer & ~(THREAD_SIZE - 1));
10494-}
10495-
10496-#else /* !__ASSEMBLY__ */
10497-
10498-/* how to get the thread information struct from ASM */
10499-#define GET_THREAD_INFO(reg) \
10500- movl $-THREAD_SIZE, reg; \
10501- andl %esp, reg
10502-
10503-/* use this one if reg already contains %esp */
10504-#define GET_THREAD_INFO_WITH_ESP(reg) \
10505- andl $-THREAD_SIZE, reg
10506-
10507 #endif
10508
10509 #else /* X86_32 */
10510
10511-#include <asm/percpu.h>
10512-#define KERNEL_STACK_OFFSET (5*8)
10513-
10514 /*
10515 * macros/functions for gaining access to the thread information structure
10516 * preempt_count needs to be 1 initially, until the scheduler is functional.
10517@@ -209,21 +198,8 @@ static inline struct thread_info *curren
10518 #ifndef __ASSEMBLY__
10519 DECLARE_PER_CPU(unsigned long, kernel_stack);
10520
10521-static inline struct thread_info *current_thread_info(void)
10522-{
10523- struct thread_info *ti;
10524- ti = (void *)(percpu_read_stable(kernel_stack) +
10525- KERNEL_STACK_OFFSET - THREAD_SIZE);
10526- return ti;
10527-}
10528-
10529-#else /* !__ASSEMBLY__ */
10530-
10531-/* how to get the thread information struct from ASM */
10532-#define GET_THREAD_INFO(reg) \
10533- movq PER_CPU_VAR(kernel_stack),reg ; \
10534- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
10535-
10536+/* how to get the current stack pointer from C */
10537+register unsigned long current_stack_pointer asm("rsp") __used;
10538 #endif
10539
10540 #endif /* !X86_32 */
10541@@ -260,5 +236,16 @@ extern void arch_task_cache_init(void);
10542 extern void free_thread_info(struct thread_info *ti);
10543 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
10544 #define arch_task_cache_init arch_task_cache_init
10545+
10546+#define __HAVE_THREAD_FUNCTIONS
10547+#define task_thread_info(task) (&(task)->tinfo)
10548+#define task_stack_page(task) ((task)->stack)
10549+#define setup_thread_stack(p, org) do {} while (0)
10550+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
10551+
10552+#define __HAVE_ARCH_TASK_STRUCT_ALLOCATOR
10553+extern struct task_struct *alloc_task_struct(void);
10554+extern void free_task_struct(struct task_struct *);
10555+
10556 #endif
10557 #endif /* _ASM_X86_THREAD_INFO_H */
10558diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h
10559--- linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h 2011-03-27 14:31:47.000000000 -0400
10560+++ linux-2.6.32.43/arch/x86/include/asm/uaccess_32.h 2011-05-16 21:46:57.000000000 -0400
10561@@ -44,6 +44,11 @@ unsigned long __must_check __copy_from_u
10562 static __always_inline unsigned long __must_check
10563 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
10564 {
10565+ pax_track_stack();
10566+
10567+ if ((long)n < 0)
10568+ return n;
10569+
10570 if (__builtin_constant_p(n)) {
10571 unsigned long ret;
10572
10573@@ -62,6 +67,8 @@ __copy_to_user_inatomic(void __user *to,
10574 return ret;
10575 }
10576 }
10577+ if (!__builtin_constant_p(n))
10578+ check_object_size(from, n, true);
10579 return __copy_to_user_ll(to, from, n);
10580 }
10581
10582@@ -83,12 +90,16 @@ static __always_inline unsigned long __m
10583 __copy_to_user(void __user *to, const void *from, unsigned long n)
10584 {
10585 might_fault();
10586+
10587 return __copy_to_user_inatomic(to, from, n);
10588 }
10589
10590 static __always_inline unsigned long
10591 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
10592 {
10593+ if ((long)n < 0)
10594+ return n;
10595+
10596 /* Avoid zeroing the tail if the copy fails..
10597 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
10598 * but as the zeroing behaviour is only significant when n is not
10599@@ -138,6 +149,12 @@ static __always_inline unsigned long
10600 __copy_from_user(void *to, const void __user *from, unsigned long n)
10601 {
10602 might_fault();
10603+
10604+ pax_track_stack();
10605+
10606+ if ((long)n < 0)
10607+ return n;
10608+
10609 if (__builtin_constant_p(n)) {
10610 unsigned long ret;
10611
10612@@ -153,6 +170,8 @@ __copy_from_user(void *to, const void __
10613 return ret;
10614 }
10615 }
10616+ if (!__builtin_constant_p(n))
10617+ check_object_size(to, n, false);
10618 return __copy_from_user_ll(to, from, n);
10619 }
10620
10621@@ -160,6 +179,10 @@ static __always_inline unsigned long __c
10622 const void __user *from, unsigned long n)
10623 {
10624 might_fault();
10625+
10626+ if ((long)n < 0)
10627+ return n;
10628+
10629 if (__builtin_constant_p(n)) {
10630 unsigned long ret;
10631
10632@@ -182,14 +205,62 @@ static __always_inline unsigned long
10633 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
10634 unsigned long n)
10635 {
10636- return __copy_from_user_ll_nocache_nozero(to, from, n);
10637+ if ((long)n < 0)
10638+ return n;
10639+
10640+ return __copy_from_user_ll_nocache_nozero(to, from, n);
10641+}
10642+
10643+/**
10644+ * copy_to_user: - Copy a block of data into user space.
10645+ * @to: Destination address, in user space.
10646+ * @from: Source address, in kernel space.
10647+ * @n: Number of bytes to copy.
10648+ *
10649+ * Context: User context only. This function may sleep.
10650+ *
10651+ * Copy data from kernel space to user space.
10652+ *
10653+ * Returns number of bytes that could not be copied.
10654+ * On success, this will be zero.
10655+ */
10656+static __always_inline unsigned long __must_check
10657+copy_to_user(void __user *to, const void *from, unsigned long n)
10658+{
10659+ if (access_ok(VERIFY_WRITE, to, n))
10660+ n = __copy_to_user(to, from, n);
10661+ return n;
10662+}
10663+
10664+/**
10665+ * copy_from_user: - Copy a block of data from user space.
10666+ * @to: Destination address, in kernel space.
10667+ * @from: Source address, in user space.
10668+ * @n: Number of bytes to copy.
10669+ *
10670+ * Context: User context only. This function may sleep.
10671+ *
10672+ * Copy data from user space to kernel space.
10673+ *
10674+ * Returns number of bytes that could not be copied.
10675+ * On success, this will be zero.
10676+ *
10677+ * If some data could not be copied, this function will pad the copied
10678+ * data to the requested size using zero bytes.
10679+ */
10680+static __always_inline unsigned long __must_check
10681+copy_from_user(void *to, const void __user *from, unsigned long n)
10682+{
10683+ if (access_ok(VERIFY_READ, from, n))
10684+ n = __copy_from_user(to, from, n);
10685+ else if ((long)n > 0) {
10686+ if (!__builtin_constant_p(n))
10687+ check_object_size(to, n, false);
10688+ memset(to, 0, n);
10689+ }
10690+ return n;
10691 }
10692
10693-unsigned long __must_check copy_to_user(void __user *to,
10694- const void *from, unsigned long n);
10695-unsigned long __must_check copy_from_user(void *to,
10696- const void __user *from,
10697- unsigned long n);
10698 long __must_check strncpy_from_user(char *dst, const char __user *src,
10699 long count);
10700 long __must_check __strncpy_from_user(char *dst,
10701diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h
10702--- linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h 2011-03-27 14:31:47.000000000 -0400
10703+++ linux-2.6.32.43/arch/x86/include/asm/uaccess_64.h 2011-05-16 21:46:57.000000000 -0400
10704@@ -9,6 +9,9 @@
10705 #include <linux/prefetch.h>
10706 #include <linux/lockdep.h>
10707 #include <asm/page.h>
10708+#include <asm/pgtable.h>
10709+
10710+#define set_fs(x) (current_thread_info()->addr_limit = (x))
10711
10712 /*
10713 * Copy To/From Userspace
10714@@ -19,113 +22,203 @@ __must_check unsigned long
10715 copy_user_generic(void *to, const void *from, unsigned len);
10716
10717 __must_check unsigned long
10718-copy_to_user(void __user *to, const void *from, unsigned len);
10719-__must_check unsigned long
10720-copy_from_user(void *to, const void __user *from, unsigned len);
10721-__must_check unsigned long
10722 copy_in_user(void __user *to, const void __user *from, unsigned len);
10723
10724 static __always_inline __must_check
10725-int __copy_from_user(void *dst, const void __user *src, unsigned size)
10726+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size)
10727 {
10728- int ret = 0;
10729+ unsigned ret = 0;
10730
10731 might_fault();
10732- if (!__builtin_constant_p(size))
10733- return copy_user_generic(dst, (__force void *)src, size);
10734+
10735+ if ((int)size < 0)
10736+ return size;
10737+
10738+#ifdef CONFIG_PAX_MEMORY_UDEREF
10739+ if (!__access_ok(VERIFY_READ, src, size))
10740+ return size;
10741+#endif
10742+
10743+ if (!__builtin_constant_p(size)) {
10744+ check_object_size(dst, size, false);
10745+
10746+#ifdef CONFIG_PAX_MEMORY_UDEREF
10747+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10748+ src += PAX_USER_SHADOW_BASE;
10749+#endif
10750+
10751+ return copy_user_generic(dst, (__force const void *)src, size);
10752+ }
10753 switch (size) {
10754- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
10755+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
10756 ret, "b", "b", "=q", 1);
10757 return ret;
10758- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
10759+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
10760 ret, "w", "w", "=r", 2);
10761 return ret;
10762- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
10763+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
10764 ret, "l", "k", "=r", 4);
10765 return ret;
10766- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
10767+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10768 ret, "q", "", "=r", 8);
10769 return ret;
10770 case 10:
10771- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10772+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10773 ret, "q", "", "=r", 10);
10774 if (unlikely(ret))
10775 return ret;
10776 __get_user_asm(*(u16 *)(8 + (char *)dst),
10777- (u16 __user *)(8 + (char __user *)src),
10778+ (const u16 __user *)(8 + (const char __user *)src),
10779 ret, "w", "w", "=r", 2);
10780 return ret;
10781 case 16:
10782- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
10783+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
10784 ret, "q", "", "=r", 16);
10785 if (unlikely(ret))
10786 return ret;
10787 __get_user_asm(*(u64 *)(8 + (char *)dst),
10788- (u64 __user *)(8 + (char __user *)src),
10789+ (const u64 __user *)(8 + (const char __user *)src),
10790 ret, "q", "", "=r", 8);
10791 return ret;
10792 default:
10793- return copy_user_generic(dst, (__force void *)src, size);
10794+
10795+#ifdef CONFIG_PAX_MEMORY_UDEREF
10796+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10797+ src += PAX_USER_SHADOW_BASE;
10798+#endif
10799+
10800+ return copy_user_generic(dst, (__force const void *)src, size);
10801 }
10802 }
10803
10804 static __always_inline __must_check
10805-int __copy_to_user(void __user *dst, const void *src, unsigned size)
10806+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size)
10807 {
10808- int ret = 0;
10809+ unsigned ret = 0;
10810
10811 might_fault();
10812- if (!__builtin_constant_p(size))
10813+
10814+ pax_track_stack();
10815+
10816+ if ((int)size < 0)
10817+ return size;
10818+
10819+#ifdef CONFIG_PAX_MEMORY_UDEREF
10820+ if (!__access_ok(VERIFY_WRITE, dst, size))
10821+ return size;
10822+#endif
10823+
10824+ if (!__builtin_constant_p(size)) {
10825+ check_object_size(src, size, true);
10826+
10827+#ifdef CONFIG_PAX_MEMORY_UDEREF
10828+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10829+ dst += PAX_USER_SHADOW_BASE;
10830+#endif
10831+
10832 return copy_user_generic((__force void *)dst, src, size);
10833+ }
10834 switch (size) {
10835- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
10836+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
10837 ret, "b", "b", "iq", 1);
10838 return ret;
10839- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
10840+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
10841 ret, "w", "w", "ir", 2);
10842 return ret;
10843- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
10844+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
10845 ret, "l", "k", "ir", 4);
10846 return ret;
10847- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
10848+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10849 ret, "q", "", "er", 8);
10850 return ret;
10851 case 10:
10852- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10853+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10854 ret, "q", "", "er", 10);
10855 if (unlikely(ret))
10856 return ret;
10857 asm("":::"memory");
10858- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
10859+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
10860 ret, "w", "w", "ir", 2);
10861 return ret;
10862 case 16:
10863- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
10864+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
10865 ret, "q", "", "er", 16);
10866 if (unlikely(ret))
10867 return ret;
10868 asm("":::"memory");
10869- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
10870+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
10871 ret, "q", "", "er", 8);
10872 return ret;
10873 default:
10874+
10875+#ifdef CONFIG_PAX_MEMORY_UDEREF
10876+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10877+ dst += PAX_USER_SHADOW_BASE;
10878+#endif
10879+
10880 return copy_user_generic((__force void *)dst, src, size);
10881 }
10882 }
10883
10884 static __always_inline __must_check
10885-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10886+unsigned long copy_to_user(void __user *to, const void *from, unsigned len)
10887+{
10888+ if (access_ok(VERIFY_WRITE, to, len))
10889+ len = __copy_to_user(to, from, len);
10890+ return len;
10891+}
10892+
10893+static __always_inline __must_check
10894+unsigned long copy_from_user(void *to, const void __user *from, unsigned len)
10895+{
10896+ if ((int)len < 0)
10897+ return len;
10898+
10899+ if (access_ok(VERIFY_READ, from, len))
10900+ len = __copy_from_user(to, from, len);
10901+ else if ((int)len > 0) {
10902+ if (!__builtin_constant_p(len))
10903+ check_object_size(to, len, false);
10904+ memset(to, 0, len);
10905+ }
10906+ return len;
10907+}
10908+
10909+static __always_inline __must_check
10910+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size)
10911 {
10912- int ret = 0;
10913+ unsigned ret = 0;
10914
10915 might_fault();
10916- if (!__builtin_constant_p(size))
10917+
10918+ pax_track_stack();
10919+
10920+ if ((int)size < 0)
10921+ return size;
10922+
10923+#ifdef CONFIG_PAX_MEMORY_UDEREF
10924+ if (!__access_ok(VERIFY_READ, src, size))
10925+ return size;
10926+ if (!__access_ok(VERIFY_WRITE, dst, size))
10927+ return size;
10928+#endif
10929+
10930+ if (!__builtin_constant_p(size)) {
10931+
10932+#ifdef CONFIG_PAX_MEMORY_UDEREF
10933+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10934+ src += PAX_USER_SHADOW_BASE;
10935+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10936+ dst += PAX_USER_SHADOW_BASE;
10937+#endif
10938+
10939 return copy_user_generic((__force void *)dst,
10940- (__force void *)src, size);
10941+ (__force const void *)src, size);
10942+ }
10943 switch (size) {
10944 case 1: {
10945 u8 tmp;
10946- __get_user_asm(tmp, (u8 __user *)src,
10947+ __get_user_asm(tmp, (const u8 __user *)src,
10948 ret, "b", "b", "=q", 1);
10949 if (likely(!ret))
10950 __put_user_asm(tmp, (u8 __user *)dst,
10951@@ -134,7 +227,7 @@ int __copy_in_user(void __user *dst, con
10952 }
10953 case 2: {
10954 u16 tmp;
10955- __get_user_asm(tmp, (u16 __user *)src,
10956+ __get_user_asm(tmp, (const u16 __user *)src,
10957 ret, "w", "w", "=r", 2);
10958 if (likely(!ret))
10959 __put_user_asm(tmp, (u16 __user *)dst,
10960@@ -144,7 +237,7 @@ int __copy_in_user(void __user *dst, con
10961
10962 case 4: {
10963 u32 tmp;
10964- __get_user_asm(tmp, (u32 __user *)src,
10965+ __get_user_asm(tmp, (const u32 __user *)src,
10966 ret, "l", "k", "=r", 4);
10967 if (likely(!ret))
10968 __put_user_asm(tmp, (u32 __user *)dst,
10969@@ -153,7 +246,7 @@ int __copy_in_user(void __user *dst, con
10970 }
10971 case 8: {
10972 u64 tmp;
10973- __get_user_asm(tmp, (u64 __user *)src,
10974+ __get_user_asm(tmp, (const u64 __user *)src,
10975 ret, "q", "", "=r", 8);
10976 if (likely(!ret))
10977 __put_user_asm(tmp, (u64 __user *)dst,
10978@@ -161,8 +254,16 @@ int __copy_in_user(void __user *dst, con
10979 return ret;
10980 }
10981 default:
10982+
10983+#ifdef CONFIG_PAX_MEMORY_UDEREF
10984+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
10985+ src += PAX_USER_SHADOW_BASE;
10986+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
10987+ dst += PAX_USER_SHADOW_BASE;
10988+#endif
10989+
10990 return copy_user_generic((__force void *)dst,
10991- (__force void *)src, size);
10992+ (__force const void *)src, size);
10993 }
10994 }
10995
10996@@ -176,33 +277,75 @@ __must_check long strlen_user(const char
10997 __must_check unsigned long clear_user(void __user *mem, unsigned long len);
10998 __must_check unsigned long __clear_user(void __user *mem, unsigned long len);
10999
11000-__must_check long __copy_from_user_inatomic(void *dst, const void __user *src,
11001- unsigned size);
11002+static __must_check __always_inline unsigned long
11003+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
11004+{
11005+ pax_track_stack();
11006+
11007+ if ((int)size < 0)
11008+ return size;
11009
11010-static __must_check __always_inline int
11011+#ifdef CONFIG_PAX_MEMORY_UDEREF
11012+ if (!__access_ok(VERIFY_READ, src, size))
11013+ return size;
11014+
11015+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
11016+ src += PAX_USER_SHADOW_BASE;
11017+#endif
11018+
11019+ return copy_user_generic(dst, (__force const void *)src, size);
11020+}
11021+
11022+static __must_check __always_inline unsigned long
11023 __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
11024 {
11025+ if ((int)size < 0)
11026+ return size;
11027+
11028+#ifdef CONFIG_PAX_MEMORY_UDEREF
11029+ if (!__access_ok(VERIFY_WRITE, dst, size))
11030+ return size;
11031+
11032+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
11033+ dst += PAX_USER_SHADOW_BASE;
11034+#endif
11035+
11036 return copy_user_generic((__force void *)dst, src, size);
11037 }
11038
11039-extern long __copy_user_nocache(void *dst, const void __user *src,
11040+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
11041 unsigned size, int zerorest);
11042
11043-static inline int
11044-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11045+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
11046 {
11047 might_sleep();
11048+
11049+ if ((int)size < 0)
11050+ return size;
11051+
11052+#ifdef CONFIG_PAX_MEMORY_UDEREF
11053+ if (!__access_ok(VERIFY_READ, src, size))
11054+ return size;
11055+#endif
11056+
11057 return __copy_user_nocache(dst, src, size, 1);
11058 }
11059
11060-static inline int
11061-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11062+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
11063 unsigned size)
11064 {
11065+ if ((int)size < 0)
11066+ return size;
11067+
11068+#ifdef CONFIG_PAX_MEMORY_UDEREF
11069+ if (!__access_ok(VERIFY_READ, src, size))
11070+ return size;
11071+#endif
11072+
11073 return __copy_user_nocache(dst, src, size, 0);
11074 }
11075
11076-unsigned long
11077+extern unsigned long
11078 copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
11079
11080 #endif /* _ASM_X86_UACCESS_64_H */
11081diff -urNp linux-2.6.32.43/arch/x86/include/asm/uaccess.h linux-2.6.32.43/arch/x86/include/asm/uaccess.h
11082--- linux-2.6.32.43/arch/x86/include/asm/uaccess.h 2011-06-25 12:55:34.000000000 -0400
11083+++ linux-2.6.32.43/arch/x86/include/asm/uaccess.h 2011-06-25 12:56:37.000000000 -0400
11084@@ -8,12 +8,15 @@
11085 #include <linux/thread_info.h>
11086 #include <linux/prefetch.h>
11087 #include <linux/string.h>
11088+#include <linux/sched.h>
11089 #include <asm/asm.h>
11090 #include <asm/page.h>
11091
11092 #define VERIFY_READ 0
11093 #define VERIFY_WRITE 1
11094
11095+extern void check_object_size(const void *ptr, unsigned long n, bool to);
11096+
11097 /*
11098 * The fs value determines whether argument validity checking should be
11099 * performed or not. If get_fs() == USER_DS, checking is performed, with
11100@@ -29,7 +32,12 @@
11101
11102 #define get_ds() (KERNEL_DS)
11103 #define get_fs() (current_thread_info()->addr_limit)
11104+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11105+void __set_fs(mm_segment_t x);
11106+void set_fs(mm_segment_t x);
11107+#else
11108 #define set_fs(x) (current_thread_info()->addr_limit = (x))
11109+#endif
11110
11111 #define segment_eq(a, b) ((a).seg == (b).seg)
11112
11113@@ -77,7 +85,33 @@
11114 * checks that the pointer is in the user space range - after calling
11115 * this function, memory access functions may still return -EFAULT.
11116 */
11117-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11118+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0))
11119+#define access_ok(type, addr, size) \
11120+({ \
11121+ long __size = size; \
11122+ unsigned long __addr = (unsigned long)addr; \
11123+ unsigned long __addr_ao = __addr & PAGE_MASK; \
11124+ unsigned long __end_ao = __addr + __size - 1; \
11125+ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \
11126+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
11127+ while(__addr_ao <= __end_ao) { \
11128+ char __c_ao; \
11129+ __addr_ao += PAGE_SIZE; \
11130+ if (__size > PAGE_SIZE) \
11131+ cond_resched(); \
11132+ if (__get_user(__c_ao, (char __user *)__addr)) \
11133+ break; \
11134+ if (type != VERIFY_WRITE) { \
11135+ __addr = __addr_ao; \
11136+ continue; \
11137+ } \
11138+ if (__put_user(__c_ao, (char __user *)__addr)) \
11139+ break; \
11140+ __addr = __addr_ao; \
11141+ } \
11142+ } \
11143+ __ret_ao; \
11144+})
11145
11146 /*
11147 * The exception table consists of pairs of addresses: the first is the
11148@@ -183,12 +217,20 @@ extern int __get_user_bad(void);
11149 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
11150 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
11151
11152-
11153+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
11154+#define __copyuser_seg "gs;"
11155+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
11156+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
11157+#else
11158+#define __copyuser_seg
11159+#define __COPYUSER_SET_ES
11160+#define __COPYUSER_RESTORE_ES
11161+#endif
11162
11163 #ifdef CONFIG_X86_32
11164 #define __put_user_asm_u64(x, addr, err, errret) \
11165- asm volatile("1: movl %%eax,0(%2)\n" \
11166- "2: movl %%edx,4(%2)\n" \
11167+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%2)\n" \
11168+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
11169 "3:\n" \
11170 ".section .fixup,\"ax\"\n" \
11171 "4: movl %3,%0\n" \
11172@@ -200,8 +242,8 @@ extern int __get_user_bad(void);
11173 : "A" (x), "r" (addr), "i" (errret), "0" (err))
11174
11175 #define __put_user_asm_ex_u64(x, addr) \
11176- asm volatile("1: movl %%eax,0(%1)\n" \
11177- "2: movl %%edx,4(%1)\n" \
11178+ asm volatile("1: "__copyuser_seg"movl %%eax,0(%1)\n" \
11179+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
11180 "3:\n" \
11181 _ASM_EXTABLE(1b, 2b - 1b) \
11182 _ASM_EXTABLE(2b, 3b - 2b) \
11183@@ -374,7 +416,7 @@ do { \
11184 } while (0)
11185
11186 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11187- asm volatile("1: mov"itype" %2,%"rtype"1\n" \
11188+ asm volatile("1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
11189 "2:\n" \
11190 ".section .fixup,\"ax\"\n" \
11191 "3: mov %3,%0\n" \
11192@@ -382,7 +424,7 @@ do { \
11193 " jmp 2b\n" \
11194 ".previous\n" \
11195 _ASM_EXTABLE(1b, 3b) \
11196- : "=r" (err), ltype(x) \
11197+ : "=r" (err), ltype (x) \
11198 : "m" (__m(addr)), "i" (errret), "0" (err))
11199
11200 #define __get_user_size_ex(x, ptr, size) \
11201@@ -407,7 +449,7 @@ do { \
11202 } while (0)
11203
11204 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
11205- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
11206+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
11207 "2:\n" \
11208 _ASM_EXTABLE(1b, 2b - 1b) \
11209 : ltype(x) : "m" (__m(addr)))
11210@@ -424,13 +466,24 @@ do { \
11211 int __gu_err; \
11212 unsigned long __gu_val; \
11213 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
11214- (x) = (__force __typeof__(*(ptr)))__gu_val; \
11215+ (x) = (__typeof__(*(ptr)))__gu_val; \
11216 __gu_err; \
11217 })
11218
11219 /* FIXME: this hack is definitely wrong -AK */
11220 struct __large_struct { unsigned long buf[100]; };
11221-#define __m(x) (*(struct __large_struct __user *)(x))
11222+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11223+#define ____m(x) \
11224+({ \
11225+ unsigned long ____x = (unsigned long)(x); \
11226+ if (____x < PAX_USER_SHADOW_BASE) \
11227+ ____x += PAX_USER_SHADOW_BASE; \
11228+ (void __user *)____x; \
11229+})
11230+#else
11231+#define ____m(x) (x)
11232+#endif
11233+#define __m(x) (*(struct __large_struct __user *)____m(x))
11234
11235 /*
11236 * Tell gcc we read from memory instead of writing: this is because
11237@@ -438,7 +491,7 @@ struct __large_struct { unsigned long bu
11238 * aliasing issues.
11239 */
11240 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
11241- asm volatile("1: mov"itype" %"rtype"1,%2\n" \
11242+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
11243 "2:\n" \
11244 ".section .fixup,\"ax\"\n" \
11245 "3: mov %3,%0\n" \
11246@@ -446,10 +499,10 @@ struct __large_struct { unsigned long bu
11247 ".previous\n" \
11248 _ASM_EXTABLE(1b, 3b) \
11249 : "=r"(err) \
11250- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
11251+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
11252
11253 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
11254- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
11255+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
11256 "2:\n" \
11257 _ASM_EXTABLE(1b, 2b - 1b) \
11258 : : ltype(x), "m" (__m(addr)))
11259@@ -488,8 +541,12 @@ struct __large_struct { unsigned long bu
11260 * On error, the variable @x is set to zero.
11261 */
11262
11263+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11264+#define __get_user(x, ptr) get_user((x), (ptr))
11265+#else
11266 #define __get_user(x, ptr) \
11267 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
11268+#endif
11269
11270 /**
11271 * __put_user: - Write a simple value into user space, with less checking.
11272@@ -511,8 +568,12 @@ struct __large_struct { unsigned long bu
11273 * Returns zero on success, or -EFAULT on error.
11274 */
11275
11276+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11277+#define __put_user(x, ptr) put_user((x), (ptr))
11278+#else
11279 #define __put_user(x, ptr) \
11280 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
11281+#endif
11282
11283 #define __get_user_unaligned __get_user
11284 #define __put_user_unaligned __put_user
11285@@ -530,7 +591,7 @@ struct __large_struct { unsigned long bu
11286 #define get_user_ex(x, ptr) do { \
11287 unsigned long __gue_val; \
11288 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
11289- (x) = (__force __typeof__(*(ptr)))__gue_val; \
11290+ (x) = (__typeof__(*(ptr)))__gue_val; \
11291 } while (0)
11292
11293 #ifdef CONFIG_X86_WP_WORKS_OK
11294@@ -567,6 +628,7 @@ extern struct movsl_mask {
11295
11296 #define ARCH_HAS_NOCACHE_UACCESS 1
11297
11298+#define ARCH_HAS_SORT_EXTABLE
11299 #ifdef CONFIG_X86_32
11300 # include "uaccess_32.h"
11301 #else
11302diff -urNp linux-2.6.32.43/arch/x86/include/asm/vgtod.h linux-2.6.32.43/arch/x86/include/asm/vgtod.h
11303--- linux-2.6.32.43/arch/x86/include/asm/vgtod.h 2011-03-27 14:31:47.000000000 -0400
11304+++ linux-2.6.32.43/arch/x86/include/asm/vgtod.h 2011-04-17 15:56:46.000000000 -0400
11305@@ -14,6 +14,7 @@ struct vsyscall_gtod_data {
11306 int sysctl_enabled;
11307 struct timezone sys_tz;
11308 struct { /* extract of a clocksource struct */
11309+ char name[8];
11310 cycle_t (*vread)(void);
11311 cycle_t cycle_last;
11312 cycle_t mask;
11313diff -urNp linux-2.6.32.43/arch/x86/include/asm/vmi.h linux-2.6.32.43/arch/x86/include/asm/vmi.h
11314--- linux-2.6.32.43/arch/x86/include/asm/vmi.h 2011-03-27 14:31:47.000000000 -0400
11315+++ linux-2.6.32.43/arch/x86/include/asm/vmi.h 2011-04-17 15:56:46.000000000 -0400
11316@@ -191,6 +191,7 @@ struct vrom_header {
11317 u8 reserved[96]; /* Reserved for headers */
11318 char vmi_init[8]; /* VMI_Init jump point */
11319 char get_reloc[8]; /* VMI_GetRelocationInfo jump point */
11320+ char rom_data[8048]; /* rest of the option ROM */
11321 } __attribute__((packed));
11322
11323 struct pnp_header {
11324diff -urNp linux-2.6.32.43/arch/x86/include/asm/vmi_time.h linux-2.6.32.43/arch/x86/include/asm/vmi_time.h
11325--- linux-2.6.32.43/arch/x86/include/asm/vmi_time.h 2011-03-27 14:31:47.000000000 -0400
11326+++ linux-2.6.32.43/arch/x86/include/asm/vmi_time.h 2011-08-05 20:33:55.000000000 -0400
11327@@ -43,7 +43,7 @@ extern struct vmi_timer_ops {
11328 int (*wallclock_updated)(void);
11329 void (*set_alarm)(u32 flags, u64 expiry, u64 period);
11330 void (*cancel_alarm)(u32 flags);
11331-} vmi_timer_ops;
11332+} __no_const vmi_timer_ops;
11333
11334 /* Prototypes */
11335 extern void __init vmi_time_init(void);
11336diff -urNp linux-2.6.32.43/arch/x86/include/asm/vsyscall.h linux-2.6.32.43/arch/x86/include/asm/vsyscall.h
11337--- linux-2.6.32.43/arch/x86/include/asm/vsyscall.h 2011-03-27 14:31:47.000000000 -0400
11338+++ linux-2.6.32.43/arch/x86/include/asm/vsyscall.h 2011-04-17 15:56:46.000000000 -0400
11339@@ -15,9 +15,10 @@ enum vsyscall_num {
11340
11341 #ifdef __KERNEL__
11342 #include <linux/seqlock.h>
11343+#include <linux/getcpu.h>
11344+#include <linux/time.h>
11345
11346 #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16)))
11347-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16)))
11348
11349 /* Definitions for CONFIG_GENERIC_TIME definitions */
11350 #define __section_vsyscall_gtod_data __attribute__ \
11351@@ -31,7 +32,6 @@ enum vsyscall_num {
11352 #define VGETCPU_LSL 2
11353
11354 extern int __vgetcpu_mode;
11355-extern volatile unsigned long __jiffies;
11356
11357 /* kernel space (writeable) */
11358 extern int vgetcpu_mode;
11359@@ -39,6 +39,9 @@ extern struct timezone sys_tz;
11360
11361 extern void map_vsyscall(void);
11362
11363+extern int vgettimeofday(struct timeval * tv, struct timezone * tz);
11364+extern time_t vtime(time_t *t);
11365+extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache);
11366 #endif /* __KERNEL__ */
11367
11368 #endif /* _ASM_X86_VSYSCALL_H */
11369diff -urNp linux-2.6.32.43/arch/x86/include/asm/x86_init.h linux-2.6.32.43/arch/x86/include/asm/x86_init.h
11370--- linux-2.6.32.43/arch/x86/include/asm/x86_init.h 2011-03-27 14:31:47.000000000 -0400
11371+++ linux-2.6.32.43/arch/x86/include/asm/x86_init.h 2011-08-05 20:33:55.000000000 -0400
11372@@ -28,7 +28,7 @@ struct x86_init_mpparse {
11373 void (*mpc_oem_bus_info)(struct mpc_bus *m, char *name);
11374 void (*find_smp_config)(unsigned int reserve);
11375 void (*get_smp_config)(unsigned int early);
11376-};
11377+} __no_const;
11378
11379 /**
11380 * struct x86_init_resources - platform specific resource related ops
11381@@ -42,7 +42,7 @@ struct x86_init_resources {
11382 void (*probe_roms)(void);
11383 void (*reserve_resources)(void);
11384 char *(*memory_setup)(void);
11385-};
11386+} __no_const;
11387
11388 /**
11389 * struct x86_init_irqs - platform specific interrupt setup
11390@@ -55,7 +55,7 @@ struct x86_init_irqs {
11391 void (*pre_vector_init)(void);
11392 void (*intr_init)(void);
11393 void (*trap_init)(void);
11394-};
11395+} __no_const;
11396
11397 /**
11398 * struct x86_init_oem - oem platform specific customizing functions
11399@@ -65,7 +65,7 @@ struct x86_init_irqs {
11400 struct x86_init_oem {
11401 void (*arch_setup)(void);
11402 void (*banner)(void);
11403-};
11404+} __no_const;
11405
11406 /**
11407 * struct x86_init_paging - platform specific paging functions
11408@@ -75,7 +75,7 @@ struct x86_init_oem {
11409 struct x86_init_paging {
11410 void (*pagetable_setup_start)(pgd_t *base);
11411 void (*pagetable_setup_done)(pgd_t *base);
11412-};
11413+} __no_const;
11414
11415 /**
11416 * struct x86_init_timers - platform specific timer setup
11417@@ -88,7 +88,7 @@ struct x86_init_timers {
11418 void (*setup_percpu_clockev)(void);
11419 void (*tsc_pre_init)(void);
11420 void (*timer_init)(void);
11421-};
11422+} __no_const;
11423
11424 /**
11425 * struct x86_init_ops - functions for platform specific setup
11426@@ -101,7 +101,7 @@ struct x86_init_ops {
11427 struct x86_init_oem oem;
11428 struct x86_init_paging paging;
11429 struct x86_init_timers timers;
11430-};
11431+} __no_const;
11432
11433 /**
11434 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
11435@@ -109,7 +109,7 @@ struct x86_init_ops {
11436 */
11437 struct x86_cpuinit_ops {
11438 void (*setup_percpu_clockev)(void);
11439-};
11440+} __no_const;
11441
11442 /**
11443 * struct x86_platform_ops - platform specific runtime functions
11444@@ -121,7 +121,7 @@ struct x86_platform_ops {
11445 unsigned long (*calibrate_tsc)(void);
11446 unsigned long (*get_wallclock)(void);
11447 int (*set_wallclock)(unsigned long nowtime);
11448-};
11449+} __no_const;
11450
11451 extern struct x86_init_ops x86_init;
11452 extern struct x86_cpuinit_ops x86_cpuinit;
11453diff -urNp linux-2.6.32.43/arch/x86/include/asm/xsave.h linux-2.6.32.43/arch/x86/include/asm/xsave.h
11454--- linux-2.6.32.43/arch/x86/include/asm/xsave.h 2011-03-27 14:31:47.000000000 -0400
11455+++ linux-2.6.32.43/arch/x86/include/asm/xsave.h 2011-04-17 15:56:46.000000000 -0400
11456@@ -56,6 +56,12 @@ static inline int xrstor_checking(struct
11457 static inline int xsave_user(struct xsave_struct __user *buf)
11458 {
11459 int err;
11460+
11461+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11462+ if ((unsigned long)buf < PAX_USER_SHADOW_BASE)
11463+ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE);
11464+#endif
11465+
11466 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
11467 "2:\n"
11468 ".section .fixup,\"ax\"\n"
11469@@ -82,6 +88,11 @@ static inline int xrestore_user(struct x
11470 u32 lmask = mask;
11471 u32 hmask = mask >> 32;
11472
11473+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
11474+ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE)
11475+ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE);
11476+#endif
11477+
11478 __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
11479 "2:\n"
11480 ".section .fixup,\"ax\"\n"
11481diff -urNp linux-2.6.32.43/arch/x86/Kconfig linux-2.6.32.43/arch/x86/Kconfig
11482--- linux-2.6.32.43/arch/x86/Kconfig 2011-03-27 14:31:47.000000000 -0400
11483+++ linux-2.6.32.43/arch/x86/Kconfig 2011-04-17 15:56:46.000000000 -0400
11484@@ -223,7 +223,7 @@ config X86_TRAMPOLINE
11485
11486 config X86_32_LAZY_GS
11487 def_bool y
11488- depends on X86_32 && !CC_STACKPROTECTOR
11489+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
11490
11491 config KTIME_SCALAR
11492 def_bool X86_32
11493@@ -1008,7 +1008,7 @@ choice
11494
11495 config NOHIGHMEM
11496 bool "off"
11497- depends on !X86_NUMAQ
11498+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11499 ---help---
11500 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
11501 However, the address space of 32-bit x86 processors is only 4
11502@@ -1045,7 +1045,7 @@ config NOHIGHMEM
11503
11504 config HIGHMEM4G
11505 bool "4GB"
11506- depends on !X86_NUMAQ
11507+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
11508 ---help---
11509 Select this if you have a 32-bit processor and between 1 and 4
11510 gigabytes of physical RAM.
11511@@ -1099,7 +1099,7 @@ config PAGE_OFFSET
11512 hex
11513 default 0xB0000000 if VMSPLIT_3G_OPT
11514 default 0x80000000 if VMSPLIT_2G
11515- default 0x78000000 if VMSPLIT_2G_OPT
11516+ default 0x70000000 if VMSPLIT_2G_OPT
11517 default 0x40000000 if VMSPLIT_1G
11518 default 0xC0000000
11519 depends on X86_32
11520@@ -1430,7 +1430,7 @@ config ARCH_USES_PG_UNCACHED
11521
11522 config EFI
11523 bool "EFI runtime service support"
11524- depends on ACPI
11525+ depends on ACPI && !PAX_KERNEXEC
11526 ---help---
11527 This enables the kernel to use EFI runtime services that are
11528 available (such as the EFI variable services).
11529@@ -1460,6 +1460,7 @@ config SECCOMP
11530
11531 config CC_STACKPROTECTOR
11532 bool "Enable -fstack-protector buffer overflow detection (EXPERIMENTAL)"
11533+ depends on X86_64 || !PAX_MEMORY_UDEREF
11534 ---help---
11535 This option turns on the -fstack-protector GCC feature. This
11536 feature puts, at the beginning of functions, a canary value on
11537@@ -1517,6 +1518,7 @@ config KEXEC_JUMP
11538 config PHYSICAL_START
11539 hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP)
11540 default "0x1000000"
11541+ range 0x400000 0x40000000
11542 ---help---
11543 This gives the physical address where the kernel is loaded.
11544
11545@@ -1581,6 +1583,7 @@ config PHYSICAL_ALIGN
11546 hex
11547 prompt "Alignment value to which kernel should be aligned" if X86_32
11548 default "0x1000000"
11549+ range 0x400000 0x1000000 if PAX_KERNEXEC
11550 range 0x2000 0x1000000
11551 ---help---
11552 This value puts the alignment restrictions on physical address
11553@@ -1612,9 +1615,10 @@ config HOTPLUG_CPU
11554 Say N if you want to disable CPU hotplug.
11555
11556 config COMPAT_VDSO
11557- def_bool y
11558+ def_bool n
11559 prompt "Compat VDSO support"
11560 depends on X86_32 || IA32_EMULATION
11561+ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF
11562 ---help---
11563 Map the 32-bit VDSO to the predictable old-style address too.
11564 ---help---
11565diff -urNp linux-2.6.32.43/arch/x86/Kconfig.cpu linux-2.6.32.43/arch/x86/Kconfig.cpu
11566--- linux-2.6.32.43/arch/x86/Kconfig.cpu 2011-03-27 14:31:47.000000000 -0400
11567+++ linux-2.6.32.43/arch/x86/Kconfig.cpu 2011-04-17 15:56:46.000000000 -0400
11568@@ -340,7 +340,7 @@ config X86_PPRO_FENCE
11569
11570 config X86_F00F_BUG
11571 def_bool y
11572- depends on M586MMX || M586TSC || M586 || M486 || M386
11573+ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC
11574
11575 config X86_WP_WORKS_OK
11576 def_bool y
11577@@ -360,7 +360,7 @@ config X86_POPAD_OK
11578
11579 config X86_ALIGNMENT_16
11580 def_bool y
11581- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11582+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
11583
11584 config X86_INTEL_USERCOPY
11585 def_bool y
11586@@ -406,7 +406,7 @@ config X86_CMPXCHG64
11587 # generates cmov.
11588 config X86_CMOV
11589 def_bool y
11590- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11591+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM)
11592
11593 config X86_MINIMUM_CPU_FAMILY
11594 int
11595diff -urNp linux-2.6.32.43/arch/x86/Kconfig.debug linux-2.6.32.43/arch/x86/Kconfig.debug
11596--- linux-2.6.32.43/arch/x86/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
11597+++ linux-2.6.32.43/arch/x86/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
11598@@ -99,7 +99,7 @@ config X86_PTDUMP
11599 config DEBUG_RODATA
11600 bool "Write protect kernel read-only data structures"
11601 default y
11602- depends on DEBUG_KERNEL
11603+ depends on DEBUG_KERNEL && BROKEN
11604 ---help---
11605 Mark the kernel read-only data as write-protected in the pagetables,
11606 in order to catch accidental (and incorrect) writes to such const
11607diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/realmode/Makefile linux-2.6.32.43/arch/x86/kernel/acpi/realmode/Makefile
11608--- linux-2.6.32.43/arch/x86/kernel/acpi/realmode/Makefile 2011-03-27 14:31:47.000000000 -0400
11609+++ linux-2.6.32.43/arch/x86/kernel/acpi/realmode/Makefile 2011-08-07 14:38:58.000000000 -0400
11610@@ -41,6 +41,9 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os
11611 $(call cc-option, -fno-stack-protector) \
11612 $(call cc-option, -mpreferred-stack-boundary=2)
11613 KBUILD_CFLAGS += $(call cc-option, -m32)
11614+ifdef CONSTIFY_PLUGIN
11615+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) -fplugin-arg-constify_plugin-no-constify
11616+endif
11617 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
11618 GCOV_PROFILE := n
11619
11620diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S
11621--- linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S 2011-03-27 14:31:47.000000000 -0400
11622+++ linux-2.6.32.43/arch/x86/kernel/acpi/realmode/wakeup.S 2011-07-01 18:53:40.000000000 -0400
11623@@ -91,6 +91,9 @@ _start:
11624 /* Do any other stuff... */
11625
11626 #ifndef CONFIG_64BIT
11627+ /* Recheck NX bit overrides (64bit path does this in trampoline) */
11628+ call verify_cpu
11629+
11630 /* This could also be done in C code... */
11631 movl pmode_cr3, %eax
11632 movl %eax, %cr3
11633@@ -104,7 +107,7 @@ _start:
11634 movl %eax, %ecx
11635 orl %edx, %ecx
11636 jz 1f
11637- movl $0xc0000080, %ecx
11638+ mov $MSR_EFER, %ecx
11639 wrmsr
11640 1:
11641
11642@@ -114,6 +117,7 @@ _start:
11643 movl pmode_cr0, %eax
11644 movl %eax, %cr0
11645 jmp pmode_return
11646+# include "../../verify_cpu.S"
11647 #else
11648 pushw $0
11649 pushw trampoline_segment
11650diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c
11651--- linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
11652+++ linux-2.6.32.43/arch/x86/kernel/acpi/sleep.c 2011-07-01 19:01:34.000000000 -0400
11653@@ -11,11 +11,12 @@
11654 #include <linux/cpumask.h>
11655 #include <asm/segment.h>
11656 #include <asm/desc.h>
11657+#include <asm/e820.h>
11658
11659 #include "realmode/wakeup.h"
11660 #include "sleep.h"
11661
11662-unsigned long acpi_wakeup_address;
11663+unsigned long acpi_wakeup_address = 0x2000;
11664 unsigned long acpi_realmode_flags;
11665
11666 /* address in low memory of the wakeup routine. */
11667@@ -98,9 +99,13 @@ int acpi_save_state_mem(void)
11668 #else /* CONFIG_64BIT */
11669 header->trampoline_segment = setup_trampoline() >> 4;
11670 #ifdef CONFIG_SMP
11671- stack_start.sp = temp_stack + sizeof(temp_stack);
11672+ stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
11673+
11674+ pax_open_kernel();
11675 early_gdt_descr.address =
11676 (unsigned long)get_cpu_gdt_table(smp_processor_id());
11677+ pax_close_kernel();
11678+
11679 initial_gs = per_cpu_offset(smp_processor_id());
11680 #endif
11681 initial_code = (unsigned long)wakeup_long64;
11682@@ -134,14 +139,8 @@ void __init acpi_reserve_bootmem(void)
11683 return;
11684 }
11685
11686- acpi_realmode = (unsigned long)alloc_bootmem_low(WAKEUP_SIZE);
11687-
11688- if (!acpi_realmode) {
11689- printk(KERN_ERR "ACPI: Cannot allocate lowmem, S3 disabled.\n");
11690- return;
11691- }
11692-
11693- acpi_wakeup_address = virt_to_phys((void *)acpi_realmode);
11694+ reserve_early(acpi_wakeup_address, acpi_wakeup_address + WAKEUP_SIZE, "ACPI Wakeup Code");
11695+ acpi_realmode = (unsigned long)__va(acpi_wakeup_address);;
11696 }
11697
11698
11699diff -urNp linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S
11700--- linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S 2011-03-27 14:31:47.000000000 -0400
11701+++ linux-2.6.32.43/arch/x86/kernel/acpi/wakeup_32.S 2011-04-17 15:56:46.000000000 -0400
11702@@ -30,13 +30,11 @@ wakeup_pmode_return:
11703 # and restore the stack ... but you need gdt for this to work
11704 movl saved_context_esp, %esp
11705
11706- movl %cs:saved_magic, %eax
11707- cmpl $0x12345678, %eax
11708+ cmpl $0x12345678, saved_magic
11709 jne bogus_magic
11710
11711 # jump to place where we left off
11712- movl saved_eip, %eax
11713- jmp *%eax
11714+ jmp *(saved_eip)
11715
11716 bogus_magic:
11717 jmp bogus_magic
11718diff -urNp linux-2.6.32.43/arch/x86/kernel/alternative.c linux-2.6.32.43/arch/x86/kernel/alternative.c
11719--- linux-2.6.32.43/arch/x86/kernel/alternative.c 2011-03-27 14:31:47.000000000 -0400
11720+++ linux-2.6.32.43/arch/x86/kernel/alternative.c 2011-04-17 15:56:46.000000000 -0400
11721@@ -407,7 +407,7 @@ void __init_or_module apply_paravirt(str
11722
11723 BUG_ON(p->len > MAX_PATCH_LEN);
11724 /* prep the buffer with the original instructions */
11725- memcpy(insnbuf, p->instr, p->len);
11726+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
11727 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
11728 (unsigned long)p->instr, p->len);
11729
11730@@ -475,7 +475,7 @@ void __init alternative_instructions(voi
11731 if (smp_alt_once)
11732 free_init_pages("SMP alternatives",
11733 (unsigned long)__smp_locks,
11734- (unsigned long)__smp_locks_end);
11735+ PAGE_ALIGN((unsigned long)__smp_locks_end));
11736
11737 restart_nmi();
11738 }
11739@@ -492,13 +492,17 @@ void __init alternative_instructions(voi
11740 * instructions. And on the local CPU you need to be protected again NMI or MCE
11741 * handlers seeing an inconsistent instruction while you patch.
11742 */
11743-static void *__init_or_module text_poke_early(void *addr, const void *opcode,
11744+static void *__kprobes text_poke_early(void *addr, const void *opcode,
11745 size_t len)
11746 {
11747 unsigned long flags;
11748 local_irq_save(flags);
11749- memcpy(addr, opcode, len);
11750+
11751+ pax_open_kernel();
11752+ memcpy(ktla_ktva(addr), opcode, len);
11753 sync_core();
11754+ pax_close_kernel();
11755+
11756 local_irq_restore(flags);
11757 /* Could also do a CLFLUSH here to speed up CPU recovery; but
11758 that causes hangs on some VIA CPUs. */
11759@@ -520,35 +524,21 @@ static void *__init_or_module text_poke_
11760 */
11761 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
11762 {
11763- unsigned long flags;
11764- char *vaddr;
11765+ unsigned char *vaddr = ktla_ktva(addr);
11766 struct page *pages[2];
11767- int i;
11768+ size_t i;
11769
11770 if (!core_kernel_text((unsigned long)addr)) {
11771- pages[0] = vmalloc_to_page(addr);
11772- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
11773+ pages[0] = vmalloc_to_page(vaddr);
11774+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
11775 } else {
11776- pages[0] = virt_to_page(addr);
11777+ pages[0] = virt_to_page(vaddr);
11778 WARN_ON(!PageReserved(pages[0]));
11779- pages[1] = virt_to_page(addr + PAGE_SIZE);
11780+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
11781 }
11782 BUG_ON(!pages[0]);
11783- local_irq_save(flags);
11784- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
11785- if (pages[1])
11786- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
11787- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
11788- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
11789- clear_fixmap(FIX_TEXT_POKE0);
11790- if (pages[1])
11791- clear_fixmap(FIX_TEXT_POKE1);
11792- local_flush_tlb();
11793- sync_core();
11794- /* Could also do a CLFLUSH here to speed up CPU recovery; but
11795- that causes hangs on some VIA CPUs. */
11796+ text_poke_early(addr, opcode, len);
11797 for (i = 0; i < len; i++)
11798- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
11799- local_irq_restore(flags);
11800+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
11801 return addr;
11802 }
11803diff -urNp linux-2.6.32.43/arch/x86/kernel/amd_iommu.c linux-2.6.32.43/arch/x86/kernel/amd_iommu.c
11804--- linux-2.6.32.43/arch/x86/kernel/amd_iommu.c 2011-03-27 14:31:47.000000000 -0400
11805+++ linux-2.6.32.43/arch/x86/kernel/amd_iommu.c 2011-04-17 15:56:46.000000000 -0400
11806@@ -2076,7 +2076,7 @@ static void prealloc_protection_domains(
11807 }
11808 }
11809
11810-static struct dma_map_ops amd_iommu_dma_ops = {
11811+static const struct dma_map_ops amd_iommu_dma_ops = {
11812 .alloc_coherent = alloc_coherent,
11813 .free_coherent = free_coherent,
11814 .map_page = map_page,
11815diff -urNp linux-2.6.32.43/arch/x86/kernel/apic/apic.c linux-2.6.32.43/arch/x86/kernel/apic/apic.c
11816--- linux-2.6.32.43/arch/x86/kernel/apic/apic.c 2011-03-27 14:31:47.000000000 -0400
11817+++ linux-2.6.32.43/arch/x86/kernel/apic/apic.c 2011-05-16 21:46:57.000000000 -0400
11818@@ -1794,7 +1794,7 @@ void smp_error_interrupt(struct pt_regs
11819 apic_write(APIC_ESR, 0);
11820 v1 = apic_read(APIC_ESR);
11821 ack_APIC_irq();
11822- atomic_inc(&irq_err_count);
11823+ atomic_inc_unchecked(&irq_err_count);
11824
11825 /*
11826 * Here is what the APIC error bits mean:
11827@@ -2184,6 +2184,8 @@ static int __cpuinit apic_cluster_num(vo
11828 u16 *bios_cpu_apicid;
11829 DECLARE_BITMAP(clustermap, NUM_APIC_CLUSTERS);
11830
11831+ pax_track_stack();
11832+
11833 bios_cpu_apicid = early_per_cpu_ptr(x86_bios_cpu_apicid);
11834 bitmap_zero(clustermap, NUM_APIC_CLUSTERS);
11835
11836diff -urNp linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c
11837--- linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c 2011-03-27 14:31:47.000000000 -0400
11838+++ linux-2.6.32.43/arch/x86/kernel/apic/io_apic.c 2011-05-04 17:56:20.000000000 -0400
11839@@ -716,7 +716,7 @@ struct IO_APIC_route_entry **alloc_ioapi
11840 ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics,
11841 GFP_ATOMIC);
11842 if (!ioapic_entries)
11843- return 0;
11844+ return NULL;
11845
11846 for (apic = 0; apic < nr_ioapics; apic++) {
11847 ioapic_entries[apic] =
11848@@ -733,7 +733,7 @@ nomem:
11849 kfree(ioapic_entries[apic]);
11850 kfree(ioapic_entries);
11851
11852- return 0;
11853+ return NULL;
11854 }
11855
11856 /*
11857@@ -1150,7 +1150,7 @@ int IO_APIC_get_PCI_irq_vector(int bus,
11858 }
11859 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
11860
11861-void lock_vector_lock(void)
11862+void lock_vector_lock(void) __acquires(vector_lock)
11863 {
11864 /* Used to the online set of cpus does not change
11865 * during assign_irq_vector.
11866@@ -1158,7 +1158,7 @@ void lock_vector_lock(void)
11867 spin_lock(&vector_lock);
11868 }
11869
11870-void unlock_vector_lock(void)
11871+void unlock_vector_lock(void) __releases(vector_lock)
11872 {
11873 spin_unlock(&vector_lock);
11874 }
11875@@ -2542,7 +2542,7 @@ static void ack_apic_edge(unsigned int i
11876 ack_APIC_irq();
11877 }
11878
11879-atomic_t irq_mis_count;
11880+atomic_unchecked_t irq_mis_count;
11881
11882 static void ack_apic_level(unsigned int irq)
11883 {
11884@@ -2626,7 +2626,7 @@ static void ack_apic_level(unsigned int
11885
11886 /* Tail end of version 0x11 I/O APIC bug workaround */
11887 if (!(v & (1 << (i & 0x1f)))) {
11888- atomic_inc(&irq_mis_count);
11889+ atomic_inc_unchecked(&irq_mis_count);
11890 spin_lock(&ioapic_lock);
11891 __mask_and_edge_IO_APIC_irq(cfg);
11892 __unmask_and_level_IO_APIC_irq(cfg);
11893diff -urNp linux-2.6.32.43/arch/x86/kernel/apm_32.c linux-2.6.32.43/arch/x86/kernel/apm_32.c
11894--- linux-2.6.32.43/arch/x86/kernel/apm_32.c 2011-03-27 14:31:47.000000000 -0400
11895+++ linux-2.6.32.43/arch/x86/kernel/apm_32.c 2011-04-23 12:56:10.000000000 -0400
11896@@ -410,7 +410,7 @@ static DEFINE_SPINLOCK(user_list_lock);
11897 * This is for buggy BIOS's that refer to (real mode) segment 0x40
11898 * even though they are called in protected mode.
11899 */
11900-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
11901+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
11902 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
11903
11904 static const char driver_version[] = "1.16ac"; /* no spaces */
11905@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call)
11906 BUG_ON(cpu != 0);
11907 gdt = get_cpu_gdt_table(cpu);
11908 save_desc_40 = gdt[0x40 / 8];
11909+
11910+ pax_open_kernel();
11911 gdt[0x40 / 8] = bad_bios_desc;
11912+ pax_close_kernel();
11913
11914 apm_irq_save(flags);
11915 APM_DO_SAVE_SEGS;
11916@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call)
11917 &call->esi);
11918 APM_DO_RESTORE_SEGS;
11919 apm_irq_restore(flags);
11920+
11921+ pax_open_kernel();
11922 gdt[0x40 / 8] = save_desc_40;
11923+ pax_close_kernel();
11924+
11925 put_cpu();
11926
11927 return call->eax & 0xff;
11928@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void
11929 BUG_ON(cpu != 0);
11930 gdt = get_cpu_gdt_table(cpu);
11931 save_desc_40 = gdt[0x40 / 8];
11932+
11933+ pax_open_kernel();
11934 gdt[0x40 / 8] = bad_bios_desc;
11935+ pax_close_kernel();
11936
11937 apm_irq_save(flags);
11938 APM_DO_SAVE_SEGS;
11939@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void
11940 &call->eax);
11941 APM_DO_RESTORE_SEGS;
11942 apm_irq_restore(flags);
11943+
11944+ pax_open_kernel();
11945 gdt[0x40 / 8] = save_desc_40;
11946+ pax_close_kernel();
11947+
11948 put_cpu();
11949 return error;
11950 }
11951@@ -975,7 +989,7 @@ recalc:
11952
11953 static void apm_power_off(void)
11954 {
11955- unsigned char po_bios_call[] = {
11956+ const unsigned char po_bios_call[] = {
11957 0xb8, 0x00, 0x10, /* movw $0x1000,ax */
11958 0x8e, 0xd0, /* movw ax,ss */
11959 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */
11960@@ -2357,12 +2371,15 @@ static int __init apm_init(void)
11961 * code to that CPU.
11962 */
11963 gdt = get_cpu_gdt_table(0);
11964+
11965+ pax_open_kernel();
11966 set_desc_base(&gdt[APM_CS >> 3],
11967 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
11968 set_desc_base(&gdt[APM_CS_16 >> 3],
11969 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
11970 set_desc_base(&gdt[APM_DS >> 3],
11971 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
11972+ pax_close_kernel();
11973
11974 proc_create("apm", 0, NULL, &apm_file_ops);
11975
11976diff -urNp linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c
11977--- linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c 2011-03-27 14:31:47.000000000 -0400
11978+++ linux-2.6.32.43/arch/x86/kernel/asm-offsets_32.c 2011-05-16 21:46:57.000000000 -0400
11979@@ -51,7 +51,6 @@ void foo(void)
11980 OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
11981 BLANK();
11982
11983- OFFSET(TI_task, thread_info, task);
11984 OFFSET(TI_exec_domain, thread_info, exec_domain);
11985 OFFSET(TI_flags, thread_info, flags);
11986 OFFSET(TI_status, thread_info, status);
11987@@ -60,6 +59,8 @@ void foo(void)
11988 OFFSET(TI_restart_block, thread_info, restart_block);
11989 OFFSET(TI_sysenter_return, thread_info, sysenter_return);
11990 OFFSET(TI_cpu, thread_info, cpu);
11991+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
11992+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
11993 BLANK();
11994
11995 OFFSET(GDS_size, desc_ptr, size);
11996@@ -99,6 +100,7 @@ void foo(void)
11997
11998 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
11999 DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
12000+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12001 DEFINE(PTRS_PER_PTE, PTRS_PER_PTE);
12002 DEFINE(PTRS_PER_PMD, PTRS_PER_PMD);
12003 DEFINE(PTRS_PER_PGD, PTRS_PER_PGD);
12004@@ -115,6 +117,11 @@ void foo(void)
12005 OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
12006 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12007 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12008+
12009+#ifdef CONFIG_PAX_KERNEXEC
12010+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12011+#endif
12012+
12013 #endif
12014
12015 #ifdef CONFIG_XEN
12016diff -urNp linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c
12017--- linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c 2011-03-27 14:31:47.000000000 -0400
12018+++ linux-2.6.32.43/arch/x86/kernel/asm-offsets_64.c 2011-05-16 21:46:57.000000000 -0400
12019@@ -44,6 +44,8 @@ int main(void)
12020 ENTRY(addr_limit);
12021 ENTRY(preempt_count);
12022 ENTRY(status);
12023+ ENTRY(lowest_stack);
12024+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
12025 #ifdef CONFIG_IA32_EMULATION
12026 ENTRY(sysenter_return);
12027 #endif
12028@@ -63,6 +65,18 @@ int main(void)
12029 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
12030 OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs);
12031 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
12032+
12033+#ifdef CONFIG_PAX_KERNEXEC
12034+ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
12035+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
12036+#endif
12037+
12038+#ifdef CONFIG_PAX_MEMORY_UDEREF
12039+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
12040+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
12041+ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd);
12042+#endif
12043+
12044 #endif
12045
12046
12047@@ -115,6 +129,7 @@ int main(void)
12048 ENTRY(cr8);
12049 BLANK();
12050 #undef ENTRY
12051+ DEFINE(TSS_size, sizeof(struct tss_struct));
12052 DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist));
12053 BLANK();
12054 DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx));
12055@@ -130,6 +145,7 @@ int main(void)
12056
12057 BLANK();
12058 DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
12059+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
12060 #ifdef CONFIG_XEN
12061 BLANK();
12062 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
12063diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/amd.c linux-2.6.32.43/arch/x86/kernel/cpu/amd.c
12064--- linux-2.6.32.43/arch/x86/kernel/cpu/amd.c 2011-06-25 12:55:34.000000000 -0400
12065+++ linux-2.6.32.43/arch/x86/kernel/cpu/amd.c 2011-06-25 12:56:37.000000000 -0400
12066@@ -602,7 +602,7 @@ static unsigned int __cpuinit amd_size_c
12067 unsigned int size)
12068 {
12069 /* AMD errata T13 (order #21922) */
12070- if ((c->x86 == 6)) {
12071+ if (c->x86 == 6) {
12072 /* Duron Rev A0 */
12073 if (c->x86_model == 3 && c->x86_mask == 0)
12074 size = 64;
12075diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/common.c linux-2.6.32.43/arch/x86/kernel/cpu/common.c
12076--- linux-2.6.32.43/arch/x86/kernel/cpu/common.c 2011-03-27 14:31:47.000000000 -0400
12077+++ linux-2.6.32.43/arch/x86/kernel/cpu/common.c 2011-05-11 18:25:15.000000000 -0400
12078@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon
12079
12080 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
12081
12082-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
12083-#ifdef CONFIG_X86_64
12084- /*
12085- * We need valid kernel segments for data and code in long mode too
12086- * IRET will check the segment types kkeil 2000/10/28
12087- * Also sysret mandates a special GDT layout
12088- *
12089- * TLS descriptors are currently at a different place compared to i386.
12090- * Hopefully nobody expects them at a fixed place (Wine?)
12091- */
12092- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
12093- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
12094- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
12095- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
12096- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
12097- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
12098-#else
12099- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
12100- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12101- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
12102- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
12103- /*
12104- * Segments used for calling PnP BIOS have byte granularity.
12105- * They code segments and data segments have fixed 64k limits,
12106- * the transfer segment sizes are set at run time.
12107- */
12108- /* 32-bit code */
12109- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12110- /* 16-bit code */
12111- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12112- /* 16-bit data */
12113- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
12114- /* 16-bit data */
12115- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
12116- /* 16-bit data */
12117- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
12118- /*
12119- * The APM segments have byte granularity and their bases
12120- * are set at run time. All have 64k limits.
12121- */
12122- /* 32-bit code */
12123- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
12124- /* 16-bit code */
12125- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
12126- /* data */
12127- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
12128-
12129- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12130- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
12131- GDT_STACK_CANARY_INIT
12132-#endif
12133-} };
12134-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
12135-
12136 static int __init x86_xsave_setup(char *s)
12137 {
12138 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
12139@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu)
12140 {
12141 struct desc_ptr gdt_descr;
12142
12143- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
12144+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
12145 gdt_descr.size = GDT_SIZE - 1;
12146 load_gdt(&gdt_descr);
12147 /* Reload the per-cpu base */
12148@@ -798,6 +744,10 @@ static void __cpuinit identify_cpu(struc
12149 /* Filter out anything that depends on CPUID levels we don't have */
12150 filter_cpuid_features(c, true);
12151
12152+#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32))
12153+ setup_clear_cpu_cap(X86_FEATURE_SEP);
12154+#endif
12155+
12156 /* If the model name is still unset, do table lookup. */
12157 if (!c->x86_model_id[0]) {
12158 const char *p;
12159@@ -980,6 +930,9 @@ static __init int setup_disablecpuid(cha
12160 }
12161 __setup("clearcpuid=", setup_disablecpuid);
12162
12163+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
12164+EXPORT_PER_CPU_SYMBOL(current_tinfo);
12165+
12166 #ifdef CONFIG_X86_64
12167 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
12168
12169@@ -995,7 +948,7 @@ DEFINE_PER_CPU(struct task_struct *, cur
12170 EXPORT_PER_CPU_SYMBOL(current_task);
12171
12172 DEFINE_PER_CPU(unsigned long, kernel_stack) =
12173- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
12174+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
12175 EXPORT_PER_CPU_SYMBOL(kernel_stack);
12176
12177 DEFINE_PER_CPU(char *, irq_stack_ptr) =
12178@@ -1060,7 +1013,7 @@ struct pt_regs * __cpuinit idle_regs(str
12179 {
12180 memset(regs, 0, sizeof(struct pt_regs));
12181 regs->fs = __KERNEL_PERCPU;
12182- regs->gs = __KERNEL_STACK_CANARY;
12183+ savesegment(gs, regs->gs);
12184
12185 return regs;
12186 }
12187@@ -1101,7 +1054,7 @@ void __cpuinit cpu_init(void)
12188 int i;
12189
12190 cpu = stack_smp_processor_id();
12191- t = &per_cpu(init_tss, cpu);
12192+ t = init_tss + cpu;
12193 orig_ist = &per_cpu(orig_ist, cpu);
12194
12195 #ifdef CONFIG_NUMA
12196@@ -1127,7 +1080,7 @@ void __cpuinit cpu_init(void)
12197 switch_to_new_gdt(cpu);
12198 loadsegment(fs, 0);
12199
12200- load_idt((const struct desc_ptr *)&idt_descr);
12201+ load_idt(&idt_descr);
12202
12203 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
12204 syscall_init();
12205@@ -1136,7 +1089,6 @@ void __cpuinit cpu_init(void)
12206 wrmsrl(MSR_KERNEL_GS_BASE, 0);
12207 barrier();
12208
12209- check_efer();
12210 if (cpu != 0)
12211 enable_x2apic();
12212
12213@@ -1199,7 +1151,7 @@ void __cpuinit cpu_init(void)
12214 {
12215 int cpu = smp_processor_id();
12216 struct task_struct *curr = current;
12217- struct tss_struct *t = &per_cpu(init_tss, cpu);
12218+ struct tss_struct *t = init_tss + cpu;
12219 struct thread_struct *thread = &curr->thread;
12220
12221 if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) {
12222diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/intel.c linux-2.6.32.43/arch/x86/kernel/cpu/intel.c
12223--- linux-2.6.32.43/arch/x86/kernel/cpu/intel.c 2011-03-27 14:31:47.000000000 -0400
12224+++ linux-2.6.32.43/arch/x86/kernel/cpu/intel.c 2011-04-17 15:56:46.000000000 -0400
12225@@ -162,7 +162,7 @@ static void __cpuinit trap_init_f00f_bug
12226 * Update the IDT descriptor and reload the IDT so that
12227 * it uses the read-only mapped virtual address.
12228 */
12229- idt_descr.address = fix_to_virt(FIX_F00F_IDT);
12230+ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT);
12231 load_idt(&idt_descr);
12232 }
12233 #endif
12234diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c
12235--- linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-03-27 14:31:47.000000000 -0400
12236+++ linux-2.6.32.43/arch/x86/kernel/cpu/intel_cacheinfo.c 2011-04-17 15:56:46.000000000 -0400
12237@@ -921,7 +921,7 @@ static ssize_t store(struct kobject *kob
12238 return ret;
12239 }
12240
12241-static struct sysfs_ops sysfs_ops = {
12242+static const struct sysfs_ops sysfs_ops = {
12243 .show = show,
12244 .store = store,
12245 };
12246diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/Makefile linux-2.6.32.43/arch/x86/kernel/cpu/Makefile
12247--- linux-2.6.32.43/arch/x86/kernel/cpu/Makefile 2011-03-27 14:31:47.000000000 -0400
12248+++ linux-2.6.32.43/arch/x86/kernel/cpu/Makefile 2011-04-17 15:56:46.000000000 -0400
12249@@ -7,10 +7,6 @@ ifdef CONFIG_FUNCTION_TRACER
12250 CFLAGS_REMOVE_common.o = -pg
12251 endif
12252
12253-# Make sure load_percpu_segment has no stackprotector
12254-nostackp := $(call cc-option, -fno-stack-protector)
12255-CFLAGS_common.o := $(nostackp)
12256-
12257 obj-y := intel_cacheinfo.o addon_cpuid_features.o
12258 obj-y += proc.o capflags.o powerflags.o common.o
12259 obj-y += vmware.o hypervisor.o sched.o
12260diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c
12261--- linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:56:59.000000000 -0400
12262+++ linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce_amd.c 2011-05-23 16:57:13.000000000 -0400
12263@@ -385,7 +385,7 @@ static ssize_t store(struct kobject *kob
12264 return ret;
12265 }
12266
12267-static struct sysfs_ops threshold_ops = {
12268+static const struct sysfs_ops threshold_ops = {
12269 .show = show,
12270 .store = store,
12271 };
12272diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c
12273--- linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c 2011-03-27 14:31:47.000000000 -0400
12274+++ linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce.c 2011-05-04 17:56:20.000000000 -0400
12275@@ -43,6 +43,7 @@
12276 #include <asm/ipi.h>
12277 #include <asm/mce.h>
12278 #include <asm/msr.h>
12279+#include <asm/local.h>
12280
12281 #include "mce-internal.h"
12282
12283@@ -187,7 +188,7 @@ static void print_mce(struct mce *m)
12284 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
12285 m->cs, m->ip);
12286
12287- if (m->cs == __KERNEL_CS)
12288+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
12289 print_symbol("{%s}", m->ip);
12290 pr_cont("\n");
12291 }
12292@@ -221,10 +222,10 @@ static void print_mce_tail(void)
12293
12294 #define PANIC_TIMEOUT 5 /* 5 seconds */
12295
12296-static atomic_t mce_paniced;
12297+static atomic_unchecked_t mce_paniced;
12298
12299 static int fake_panic;
12300-static atomic_t mce_fake_paniced;
12301+static atomic_unchecked_t mce_fake_paniced;
12302
12303 /* Panic in progress. Enable interrupts and wait for final IPI */
12304 static void wait_for_panic(void)
12305@@ -248,7 +249,7 @@ static void mce_panic(char *msg, struct
12306 /*
12307 * Make sure only one CPU runs in machine check panic
12308 */
12309- if (atomic_inc_return(&mce_paniced) > 1)
12310+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
12311 wait_for_panic();
12312 barrier();
12313
12314@@ -256,7 +257,7 @@ static void mce_panic(char *msg, struct
12315 console_verbose();
12316 } else {
12317 /* Don't log too much for fake panic */
12318- if (atomic_inc_return(&mce_fake_paniced) > 1)
12319+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
12320 return;
12321 }
12322 print_mce_head();
12323@@ -616,7 +617,7 @@ static int mce_timed_out(u64 *t)
12324 * might have been modified by someone else.
12325 */
12326 rmb();
12327- if (atomic_read(&mce_paniced))
12328+ if (atomic_read_unchecked(&mce_paniced))
12329 wait_for_panic();
12330 if (!monarch_timeout)
12331 goto out;
12332@@ -1429,14 +1430,14 @@ void __cpuinit mcheck_init(struct cpuinf
12333 */
12334
12335 static DEFINE_SPINLOCK(mce_state_lock);
12336-static int open_count; /* #times opened */
12337+static local_t open_count; /* #times opened */
12338 static int open_exclu; /* already open exclusive? */
12339
12340 static int mce_open(struct inode *inode, struct file *file)
12341 {
12342 spin_lock(&mce_state_lock);
12343
12344- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) {
12345+ if (open_exclu || (local_read(&open_count) && (file->f_flags & O_EXCL))) {
12346 spin_unlock(&mce_state_lock);
12347
12348 return -EBUSY;
12349@@ -1444,7 +1445,7 @@ static int mce_open(struct inode *inode,
12350
12351 if (file->f_flags & O_EXCL)
12352 open_exclu = 1;
12353- open_count++;
12354+ local_inc(&open_count);
12355
12356 spin_unlock(&mce_state_lock);
12357
12358@@ -1455,7 +1456,7 @@ static int mce_release(struct inode *ino
12359 {
12360 spin_lock(&mce_state_lock);
12361
12362- open_count--;
12363+ local_dec(&open_count);
12364 open_exclu = 0;
12365
12366 spin_unlock(&mce_state_lock);
12367@@ -2082,7 +2083,7 @@ struct dentry *mce_get_debugfs_dir(void)
12368 static void mce_reset(void)
12369 {
12370 cpu_missing = 0;
12371- atomic_set(&mce_fake_paniced, 0);
12372+ atomic_set_unchecked(&mce_fake_paniced, 0);
12373 atomic_set(&mce_executing, 0);
12374 atomic_set(&mce_callin, 0);
12375 atomic_set(&global_nwo, 0);
12376diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce-inject.c linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce-inject.c
12377--- linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-03-27 14:31:47.000000000 -0400
12378+++ linux-2.6.32.43/arch/x86/kernel/cpu/mcheck/mce-inject.c 2011-08-05 20:33:55.000000000 -0400
12379@@ -211,7 +211,9 @@ static ssize_t mce_write(struct file *fi
12380 static int inject_init(void)
12381 {
12382 printk(KERN_INFO "Machine check injector initialized\n");
12383- mce_chrdev_ops.write = mce_write;
12384+ pax_open_kernel();
12385+ *(void **)&mce_chrdev_ops.write = mce_write;
12386+ pax_close_kernel();
12387 register_die_notifier(&mce_raise_nb);
12388 return 0;
12389 }
12390diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c
12391--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c 2011-03-27 14:31:47.000000000 -0400
12392+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/amd.c 2011-04-17 15:56:46.000000000 -0400
12393@@ -108,7 +108,7 @@ amd_validate_add_page(unsigned long base
12394 return 0;
12395 }
12396
12397-static struct mtrr_ops amd_mtrr_ops = {
12398+static const struct mtrr_ops amd_mtrr_ops = {
12399 .vendor = X86_VENDOR_AMD,
12400 .set = amd_set_mtrr,
12401 .get = amd_get_mtrr,
12402diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c
12403--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c 2011-03-27 14:31:47.000000000 -0400
12404+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/centaur.c 2011-04-17 15:56:46.000000000 -0400
12405@@ -110,7 +110,7 @@ centaur_validate_add_page(unsigned long
12406 return 0;
12407 }
12408
12409-static struct mtrr_ops centaur_mtrr_ops = {
12410+static const struct mtrr_ops centaur_mtrr_ops = {
12411 .vendor = X86_VENDOR_CENTAUR,
12412 .set = centaur_set_mcr,
12413 .get = centaur_get_mcr,
12414diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c
12415--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-03-27 14:31:47.000000000 -0400
12416+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/cyrix.c 2011-04-17 15:56:46.000000000 -0400
12417@@ -265,7 +265,7 @@ static void cyrix_set_all(void)
12418 post_set();
12419 }
12420
12421-static struct mtrr_ops cyrix_mtrr_ops = {
12422+static const struct mtrr_ops cyrix_mtrr_ops = {
12423 .vendor = X86_VENDOR_CYRIX,
12424 .set_all = cyrix_set_all,
12425 .set = cyrix_set_arr,
12426diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c
12427--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c 2011-03-27 14:31:47.000000000 -0400
12428+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/generic.c 2011-04-23 12:56:10.000000000 -0400
12429@@ -752,7 +752,7 @@ int positive_have_wrcomb(void)
12430 /*
12431 * Generic structure...
12432 */
12433-struct mtrr_ops generic_mtrr_ops = {
12434+const struct mtrr_ops generic_mtrr_ops = {
12435 .use_intel_if = 1,
12436 .set_all = generic_set_all,
12437 .get = generic_get_mtrr,
12438diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c
12439--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:00:52.000000000 -0400
12440+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/main.c 2011-04-17 17:03:05.000000000 -0400
12441@@ -60,14 +60,14 @@ static DEFINE_MUTEX(mtrr_mutex);
12442 u64 size_or_mask, size_and_mask;
12443 static bool mtrr_aps_delayed_init;
12444
12445-static struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
12446+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
12447
12448-struct mtrr_ops *mtrr_if;
12449+const struct mtrr_ops *mtrr_if;
12450
12451 static void set_mtrr(unsigned int reg, unsigned long base,
12452 unsigned long size, mtrr_type type);
12453
12454-void set_mtrr_ops(struct mtrr_ops *ops)
12455+void set_mtrr_ops(const struct mtrr_ops *ops)
12456 {
12457 if (ops->vendor && ops->vendor < X86_VENDOR_NUM)
12458 mtrr_ops[ops->vendor] = ops;
12459diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h
12460--- linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-03-27 14:31:47.000000000 -0400
12461+++ linux-2.6.32.43/arch/x86/kernel/cpu/mtrr/mtrr.h 2011-04-17 15:56:46.000000000 -0400
12462@@ -12,19 +12,19 @@
12463 extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
12464
12465 struct mtrr_ops {
12466- u32 vendor;
12467- u32 use_intel_if;
12468- void (*set)(unsigned int reg, unsigned long base,
12469+ const u32 vendor;
12470+ const u32 use_intel_if;
12471+ void (* const set)(unsigned int reg, unsigned long base,
12472 unsigned long size, mtrr_type type);
12473- void (*set_all)(void);
12474+ void (* const set_all)(void);
12475
12476- void (*get)(unsigned int reg, unsigned long *base,
12477+ void (* const get)(unsigned int reg, unsigned long *base,
12478 unsigned long *size, mtrr_type *type);
12479- int (*get_free_region)(unsigned long base, unsigned long size,
12480+ int (* const get_free_region)(unsigned long base, unsigned long size,
12481 int replace_reg);
12482- int (*validate_add_page)(unsigned long base, unsigned long size,
12483+ int (* const validate_add_page)(unsigned long base, unsigned long size,
12484 unsigned int type);
12485- int (*have_wrcomb)(void);
12486+ int (* const have_wrcomb)(void);
12487 };
12488
12489 extern int generic_get_free_region(unsigned long base, unsigned long size,
12490@@ -32,7 +32,7 @@ extern int generic_get_free_region(unsig
12491 extern int generic_validate_add_page(unsigned long base, unsigned long size,
12492 unsigned int type);
12493
12494-extern struct mtrr_ops generic_mtrr_ops;
12495+extern const struct mtrr_ops generic_mtrr_ops;
12496
12497 extern int positive_have_wrcomb(void);
12498
12499@@ -53,10 +53,10 @@ void fill_mtrr_var_range(unsigned int in
12500 u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
12501 void get_mtrr_state(void);
12502
12503-extern void set_mtrr_ops(struct mtrr_ops *ops);
12504+extern void set_mtrr_ops(const struct mtrr_ops *ops);
12505
12506 extern u64 size_or_mask, size_and_mask;
12507-extern struct mtrr_ops *mtrr_if;
12508+extern const struct mtrr_ops *mtrr_if;
12509
12510 #define is_cpu(vnd) (mtrr_if && mtrr_if->vendor == X86_VENDOR_##vnd)
12511 #define use_intel() (mtrr_if && mtrr_if->use_intel_if == 1)
12512diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c
12513--- linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-03-27 14:31:47.000000000 -0400
12514+++ linux-2.6.32.43/arch/x86/kernel/cpu/perfctr-watchdog.c 2011-04-17 15:56:46.000000000 -0400
12515@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk {
12516
12517 /* Interface defining a CPU specific perfctr watchdog */
12518 struct wd_ops {
12519- int (*reserve)(void);
12520- void (*unreserve)(void);
12521- int (*setup)(unsigned nmi_hz);
12522- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12523- void (*stop)(void);
12524+ int (* const reserve)(void);
12525+ void (* const unreserve)(void);
12526+ int (* const setup)(unsigned nmi_hz);
12527+ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz);
12528+ void (* const stop)(void);
12529 unsigned perfctr;
12530 unsigned evntsel;
12531 u64 checkbit;
12532@@ -645,6 +645,7 @@ static const struct wd_ops p4_wd_ops = {
12533 #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL
12534 #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK
12535
12536+/* cannot be const */
12537 static struct wd_ops intel_arch_wd_ops;
12538
12539 static int setup_intel_arch_watchdog(unsigned nmi_hz)
12540@@ -697,6 +698,7 @@ static int setup_intel_arch_watchdog(uns
12541 return 1;
12542 }
12543
12544+/* cannot be const */
12545 static struct wd_ops intel_arch_wd_ops __read_mostly = {
12546 .reserve = single_msr_reserve,
12547 .unreserve = single_msr_unreserve,
12548diff -urNp linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c
12549--- linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c 2011-03-27 14:31:47.000000000 -0400
12550+++ linux-2.6.32.43/arch/x86/kernel/cpu/perf_event.c 2011-05-04 17:56:20.000000000 -0400
12551@@ -723,10 +723,10 @@ x86_perf_event_update(struct perf_event
12552 * count to the generic event atomically:
12553 */
12554 again:
12555- prev_raw_count = atomic64_read(&hwc->prev_count);
12556+ prev_raw_count = atomic64_read_unchecked(&hwc->prev_count);
12557 rdmsrl(hwc->event_base + idx, new_raw_count);
12558
12559- if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
12560+ if (atomic64_cmpxchg_unchecked(&hwc->prev_count, prev_raw_count,
12561 new_raw_count) != prev_raw_count)
12562 goto again;
12563
12564@@ -741,7 +741,7 @@ again:
12565 delta = (new_raw_count << shift) - (prev_raw_count << shift);
12566 delta >>= shift;
12567
12568- atomic64_add(delta, &event->count);
12569+ atomic64_add_unchecked(delta, &event->count);
12570 atomic64_sub(delta, &hwc->period_left);
12571
12572 return new_raw_count;
12573@@ -1353,7 +1353,7 @@ x86_perf_event_set_period(struct perf_ev
12574 * The hw event starts counting from this event offset,
12575 * mark it to be able to extra future deltas:
12576 */
12577- atomic64_set(&hwc->prev_count, (u64)-left);
12578+ atomic64_set_unchecked(&hwc->prev_count, (u64)-left);
12579
12580 err = checking_wrmsrl(hwc->event_base + idx,
12581 (u64)(-left) & x86_pmu.event_mask);
12582@@ -2357,7 +2357,7 @@ perf_callchain_user(struct pt_regs *regs
12583 break;
12584
12585 callchain_store(entry, frame.return_address);
12586- fp = frame.next_frame;
12587+ fp = (__force const void __user *)frame.next_frame;
12588 }
12589 }
12590
12591diff -urNp linux-2.6.32.43/arch/x86/kernel/crash.c linux-2.6.32.43/arch/x86/kernel/crash.c
12592--- linux-2.6.32.43/arch/x86/kernel/crash.c 2011-03-27 14:31:47.000000000 -0400
12593+++ linux-2.6.32.43/arch/x86/kernel/crash.c 2011-04-17 15:56:46.000000000 -0400
12594@@ -41,7 +41,7 @@ static void kdump_nmi_callback(int cpu,
12595 regs = args->regs;
12596
12597 #ifdef CONFIG_X86_32
12598- if (!user_mode_vm(regs)) {
12599+ if (!user_mode(regs)) {
12600 crash_fixup_ss_esp(&fixed_regs, regs);
12601 regs = &fixed_regs;
12602 }
12603diff -urNp linux-2.6.32.43/arch/x86/kernel/doublefault_32.c linux-2.6.32.43/arch/x86/kernel/doublefault_32.c
12604--- linux-2.6.32.43/arch/x86/kernel/doublefault_32.c 2011-03-27 14:31:47.000000000 -0400
12605+++ linux-2.6.32.43/arch/x86/kernel/doublefault_32.c 2011-04-17 15:56:46.000000000 -0400
12606@@ -11,7 +11,7 @@
12607
12608 #define DOUBLEFAULT_STACKSIZE (1024)
12609 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
12610-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
12611+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
12612
12613 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
12614
12615@@ -21,7 +21,7 @@ static void doublefault_fn(void)
12616 unsigned long gdt, tss;
12617
12618 store_gdt(&gdt_desc);
12619- gdt = gdt_desc.address;
12620+ gdt = (unsigned long)gdt_desc.address;
12621
12622 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
12623
12624@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach
12625 /* 0x2 bit is always set */
12626 .flags = X86_EFLAGS_SF | 0x2,
12627 .sp = STACK_START,
12628- .es = __USER_DS,
12629+ .es = __KERNEL_DS,
12630 .cs = __KERNEL_CS,
12631 .ss = __KERNEL_DS,
12632- .ds = __USER_DS,
12633+ .ds = __KERNEL_DS,
12634 .fs = __KERNEL_PERCPU,
12635
12636 .__cr3 = __pa_nodebug(swapper_pg_dir),
12637diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c
12638--- linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c 2011-03-27 14:31:47.000000000 -0400
12639+++ linux-2.6.32.43/arch/x86/kernel/dumpstack_32.c 2011-04-17 15:56:46.000000000 -0400
12640@@ -53,16 +53,12 @@ void dump_trace(struct task_struct *task
12641 #endif
12642
12643 for (;;) {
12644- struct thread_info *context;
12645+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12646+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12647
12648- context = (struct thread_info *)
12649- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
12650- bp = print_context_stack(context, stack, bp, ops,
12651- data, NULL, &graph);
12652-
12653- stack = (unsigned long *)context->previous_esp;
12654- if (!stack)
12655+ if (stack_start == task_stack_page(task))
12656 break;
12657+ stack = *(unsigned long **)stack_start;
12658 if (ops->stack(data, "IRQ") < 0)
12659 break;
12660 touch_nmi_watchdog();
12661@@ -112,11 +108,12 @@ void show_registers(struct pt_regs *regs
12662 * When in-kernel, we also print out the stack and code at the
12663 * time of the fault..
12664 */
12665- if (!user_mode_vm(regs)) {
12666+ if (!user_mode(regs)) {
12667 unsigned int code_prologue = code_bytes * 43 / 64;
12668 unsigned int code_len = code_bytes;
12669 unsigned char c;
12670 u8 *ip;
12671+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]);
12672
12673 printk(KERN_EMERG "Stack:\n");
12674 show_stack_log_lvl(NULL, regs, &regs->sp,
12675@@ -124,10 +121,10 @@ void show_registers(struct pt_regs *regs
12676
12677 printk(KERN_EMERG "Code: ");
12678
12679- ip = (u8 *)regs->ip - code_prologue;
12680+ ip = (u8 *)regs->ip - code_prologue + cs_base;
12681 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
12682 /* try starting at IP */
12683- ip = (u8 *)regs->ip;
12684+ ip = (u8 *)regs->ip + cs_base;
12685 code_len = code_len - code_prologue + 1;
12686 }
12687 for (i = 0; i < code_len; i++, ip++) {
12688@@ -136,7 +133,7 @@ void show_registers(struct pt_regs *regs
12689 printk(" Bad EIP value.");
12690 break;
12691 }
12692- if (ip == (u8 *)regs->ip)
12693+ if (ip == (u8 *)regs->ip + cs_base)
12694 printk("<%02x> ", c);
12695 else
12696 printk("%02x ", c);
12697@@ -149,6 +146,7 @@ int is_valid_bugaddr(unsigned long ip)
12698 {
12699 unsigned short ud2;
12700
12701+ ip = ktla_ktva(ip);
12702 if (ip < PAGE_OFFSET)
12703 return 0;
12704 if (probe_kernel_address((unsigned short *)ip, ud2))
12705diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c
12706--- linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c 2011-03-27 14:31:47.000000000 -0400
12707+++ linux-2.6.32.43/arch/x86/kernel/dumpstack_64.c 2011-04-17 15:56:46.000000000 -0400
12708@@ -116,8 +116,8 @@ void dump_trace(struct task_struct *task
12709 unsigned long *irq_stack_end =
12710 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
12711 unsigned used = 0;
12712- struct thread_info *tinfo;
12713 int graph = 0;
12714+ void *stack_start;
12715
12716 if (!task)
12717 task = current;
12718@@ -146,10 +146,10 @@ void dump_trace(struct task_struct *task
12719 * current stack address. If the stacks consist of nested
12720 * exceptions
12721 */
12722- tinfo = task_thread_info(task);
12723 for (;;) {
12724 char *id;
12725 unsigned long *estack_end;
12726+
12727 estack_end = in_exception_stack(cpu, (unsigned long)stack,
12728 &used, &id);
12729
12730@@ -157,7 +157,7 @@ void dump_trace(struct task_struct *task
12731 if (ops->stack(data, id) < 0)
12732 break;
12733
12734- bp = print_context_stack(tinfo, stack, bp, ops,
12735+ bp = print_context_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
12736 data, estack_end, &graph);
12737 ops->stack(data, "<EOE>");
12738 /*
12739@@ -176,7 +176,7 @@ void dump_trace(struct task_struct *task
12740 if (stack >= irq_stack && stack < irq_stack_end) {
12741 if (ops->stack(data, "IRQ") < 0)
12742 break;
12743- bp = print_context_stack(tinfo, stack, bp,
12744+ bp = print_context_stack(task, irq_stack, stack, bp,
12745 ops, data, irq_stack_end, &graph);
12746 /*
12747 * We link to the next stack (which would be
12748@@ -195,7 +195,8 @@ void dump_trace(struct task_struct *task
12749 /*
12750 * This handles the process stack:
12751 */
12752- bp = print_context_stack(tinfo, stack, bp, ops, data, NULL, &graph);
12753+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
12754+ bp = print_context_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
12755 put_cpu();
12756 }
12757 EXPORT_SYMBOL(dump_trace);
12758diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack.c linux-2.6.32.43/arch/x86/kernel/dumpstack.c
12759--- linux-2.6.32.43/arch/x86/kernel/dumpstack.c 2011-03-27 14:31:47.000000000 -0400
12760+++ linux-2.6.32.43/arch/x86/kernel/dumpstack.c 2011-04-17 15:56:46.000000000 -0400
12761@@ -2,6 +2,9 @@
12762 * Copyright (C) 1991, 1992 Linus Torvalds
12763 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
12764 */
12765+#ifdef CONFIG_GRKERNSEC_HIDESYM
12766+#define __INCLUDED_BY_HIDESYM 1
12767+#endif
12768 #include <linux/kallsyms.h>
12769 #include <linux/kprobes.h>
12770 #include <linux/uaccess.h>
12771@@ -28,7 +31,7 @@ static int die_counter;
12772
12773 void printk_address(unsigned long address, int reliable)
12774 {
12775- printk(" [<%p>] %s%pS\n", (void *) address,
12776+ printk(" [<%p>] %s%pA\n", (void *) address,
12777 reliable ? "" : "? ", (void *) address);
12778 }
12779
12780@@ -36,9 +39,8 @@ void printk_address(unsigned long addres
12781 static void
12782 print_ftrace_graph_addr(unsigned long addr, void *data,
12783 const struct stacktrace_ops *ops,
12784- struct thread_info *tinfo, int *graph)
12785+ struct task_struct *task, int *graph)
12786 {
12787- struct task_struct *task = tinfo->task;
12788 unsigned long ret_addr;
12789 int index = task->curr_ret_stack;
12790
12791@@ -59,7 +61,7 @@ print_ftrace_graph_addr(unsigned long ad
12792 static inline void
12793 print_ftrace_graph_addr(unsigned long addr, void *data,
12794 const struct stacktrace_ops *ops,
12795- struct thread_info *tinfo, int *graph)
12796+ struct task_struct *task, int *graph)
12797 { }
12798 #endif
12799
12800@@ -70,10 +72,8 @@ print_ftrace_graph_addr(unsigned long ad
12801 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
12802 */
12803
12804-static inline int valid_stack_ptr(struct thread_info *tinfo,
12805- void *p, unsigned int size, void *end)
12806+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
12807 {
12808- void *t = tinfo;
12809 if (end) {
12810 if (p < end && p >= (end-THREAD_SIZE))
12811 return 1;
12812@@ -84,14 +84,14 @@ static inline int valid_stack_ptr(struct
12813 }
12814
12815 unsigned long
12816-print_context_stack(struct thread_info *tinfo,
12817+print_context_stack(struct task_struct *task, void *stack_start,
12818 unsigned long *stack, unsigned long bp,
12819 const struct stacktrace_ops *ops, void *data,
12820 unsigned long *end, int *graph)
12821 {
12822 struct stack_frame *frame = (struct stack_frame *)bp;
12823
12824- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
12825+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
12826 unsigned long addr;
12827
12828 addr = *stack;
12829@@ -103,7 +103,7 @@ print_context_stack(struct thread_info *
12830 } else {
12831 ops->address(data, addr, 0);
12832 }
12833- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
12834+ print_ftrace_graph_addr(addr, data, ops, task, graph);
12835 }
12836 stack++;
12837 }
12838@@ -180,7 +180,7 @@ void dump_stack(void)
12839 #endif
12840
12841 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
12842- current->pid, current->comm, print_tainted(),
12843+ task_pid_nr(current), current->comm, print_tainted(),
12844 init_utsname()->release,
12845 (int)strcspn(init_utsname()->version, " "),
12846 init_utsname()->version);
12847@@ -220,6 +220,8 @@ unsigned __kprobes long oops_begin(void)
12848 return flags;
12849 }
12850
12851+extern void gr_handle_kernel_exploit(void);
12852+
12853 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
12854 {
12855 if (regs && kexec_should_crash(current))
12856@@ -241,7 +243,10 @@ void __kprobes oops_end(unsigned long fl
12857 panic("Fatal exception in interrupt");
12858 if (panic_on_oops)
12859 panic("Fatal exception");
12860- do_exit(signr);
12861+
12862+ gr_handle_kernel_exploit();
12863+
12864+ do_group_exit(signr);
12865 }
12866
12867 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
12868@@ -295,7 +300,7 @@ void die(const char *str, struct pt_regs
12869 unsigned long flags = oops_begin();
12870 int sig = SIGSEGV;
12871
12872- if (!user_mode_vm(regs))
12873+ if (!user_mode(regs))
12874 report_bug(regs->ip, regs);
12875
12876 if (__die(str, regs, err))
12877diff -urNp linux-2.6.32.43/arch/x86/kernel/dumpstack.h linux-2.6.32.43/arch/x86/kernel/dumpstack.h
12878--- linux-2.6.32.43/arch/x86/kernel/dumpstack.h 2011-03-27 14:31:47.000000000 -0400
12879+++ linux-2.6.32.43/arch/x86/kernel/dumpstack.h 2011-04-23 13:25:26.000000000 -0400
12880@@ -15,7 +15,7 @@
12881 #endif
12882
12883 extern unsigned long
12884-print_context_stack(struct thread_info *tinfo,
12885+print_context_stack(struct task_struct *task, void *stack_start,
12886 unsigned long *stack, unsigned long bp,
12887 const struct stacktrace_ops *ops, void *data,
12888 unsigned long *end, int *graph);
12889diff -urNp linux-2.6.32.43/arch/x86/kernel/e820.c linux-2.6.32.43/arch/x86/kernel/e820.c
12890--- linux-2.6.32.43/arch/x86/kernel/e820.c 2011-03-27 14:31:47.000000000 -0400
12891+++ linux-2.6.32.43/arch/x86/kernel/e820.c 2011-04-17 15:56:46.000000000 -0400
12892@@ -733,7 +733,7 @@ struct early_res {
12893 };
12894 static struct early_res early_res[MAX_EARLY_RES] __initdata = {
12895 { 0, PAGE_SIZE, "BIOS data page" }, /* BIOS data page */
12896- {}
12897+ { 0, 0, {0}, 0 }
12898 };
12899
12900 static int __init find_overlapped_early(u64 start, u64 end)
12901diff -urNp linux-2.6.32.43/arch/x86/kernel/early_printk.c linux-2.6.32.43/arch/x86/kernel/early_printk.c
12902--- linux-2.6.32.43/arch/x86/kernel/early_printk.c 2011-03-27 14:31:47.000000000 -0400
12903+++ linux-2.6.32.43/arch/x86/kernel/early_printk.c 2011-05-16 21:46:57.000000000 -0400
12904@@ -7,6 +7,7 @@
12905 #include <linux/pci_regs.h>
12906 #include <linux/pci_ids.h>
12907 #include <linux/errno.h>
12908+#include <linux/sched.h>
12909 #include <asm/io.h>
12910 #include <asm/processor.h>
12911 #include <asm/fcntl.h>
12912@@ -170,6 +171,8 @@ asmlinkage void early_printk(const char
12913 int n;
12914 va_list ap;
12915
12916+ pax_track_stack();
12917+
12918 va_start(ap, fmt);
12919 n = vscnprintf(buf, sizeof(buf), fmt, ap);
12920 early_console->write(early_console, buf, n);
12921diff -urNp linux-2.6.32.43/arch/x86/kernel/efi_32.c linux-2.6.32.43/arch/x86/kernel/efi_32.c
12922--- linux-2.6.32.43/arch/x86/kernel/efi_32.c 2011-03-27 14:31:47.000000000 -0400
12923+++ linux-2.6.32.43/arch/x86/kernel/efi_32.c 2011-04-17 15:56:46.000000000 -0400
12924@@ -38,70 +38,38 @@
12925 */
12926
12927 static unsigned long efi_rt_eflags;
12928-static pgd_t efi_bak_pg_dir_pointer[2];
12929+static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS];
12930
12931-void efi_call_phys_prelog(void)
12932+void __init efi_call_phys_prelog(void)
12933 {
12934- unsigned long cr4;
12935- unsigned long temp;
12936 struct desc_ptr gdt_descr;
12937
12938 local_irq_save(efi_rt_eflags);
12939
12940- /*
12941- * If I don't have PAE, I should just duplicate two entries in page
12942- * directory. If I have PAE, I just need to duplicate one entry in
12943- * page directory.
12944- */
12945- cr4 = read_cr4_safe();
12946
12947- if (cr4 & X86_CR4_PAE) {
12948- efi_bak_pg_dir_pointer[0].pgd =
12949- swapper_pg_dir[pgd_index(0)].pgd;
12950- swapper_pg_dir[0].pgd =
12951- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12952- } else {
12953- efi_bak_pg_dir_pointer[0].pgd =
12954- swapper_pg_dir[pgd_index(0)].pgd;
12955- efi_bak_pg_dir_pointer[1].pgd =
12956- swapper_pg_dir[pgd_index(0x400000)].pgd;
12957- swapper_pg_dir[pgd_index(0)].pgd =
12958- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
12959- temp = PAGE_OFFSET + 0x400000;
12960- swapper_pg_dir[pgd_index(0x400000)].pgd =
12961- swapper_pg_dir[pgd_index(temp)].pgd;
12962- }
12963+ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
12964+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
12965+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
12966
12967 /*
12968 * After the lock is released, the original page table is restored.
12969 */
12970 __flush_tlb_all();
12971
12972- gdt_descr.address = __pa(get_cpu_gdt_table(0));
12973+ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
12974 gdt_descr.size = GDT_SIZE - 1;
12975 load_gdt(&gdt_descr);
12976 }
12977
12978-void efi_call_phys_epilog(void)
12979+void __init efi_call_phys_epilog(void)
12980 {
12981- unsigned long cr4;
12982 struct desc_ptr gdt_descr;
12983
12984- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
12985+ gdt_descr.address = get_cpu_gdt_table(0);
12986 gdt_descr.size = GDT_SIZE - 1;
12987 load_gdt(&gdt_descr);
12988
12989- cr4 = read_cr4_safe();
12990-
12991- if (cr4 & X86_CR4_PAE) {
12992- swapper_pg_dir[pgd_index(0)].pgd =
12993- efi_bak_pg_dir_pointer[0].pgd;
12994- } else {
12995- swapper_pg_dir[pgd_index(0)].pgd =
12996- efi_bak_pg_dir_pointer[0].pgd;
12997- swapper_pg_dir[pgd_index(0x400000)].pgd =
12998- efi_bak_pg_dir_pointer[1].pgd;
12999- }
13000+ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);
13001
13002 /*
13003 * After the lock is released, the original page table is restored.
13004diff -urNp linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S
13005--- linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S 2011-03-27 14:31:47.000000000 -0400
13006+++ linux-2.6.32.43/arch/x86/kernel/efi_stub_32.S 2011-04-17 15:56:46.000000000 -0400
13007@@ -6,6 +6,7 @@
13008 */
13009
13010 #include <linux/linkage.h>
13011+#include <linux/init.h>
13012 #include <asm/page_types.h>
13013
13014 /*
13015@@ -20,7 +21,7 @@
13016 * service functions will comply with gcc calling convention, too.
13017 */
13018
13019-.text
13020+__INIT
13021 ENTRY(efi_call_phys)
13022 /*
13023 * 0. The function can only be called in Linux kernel. So CS has been
13024@@ -36,9 +37,7 @@ ENTRY(efi_call_phys)
13025 * The mapping of lower virtual memory has been created in prelog and
13026 * epilog.
13027 */
13028- movl $1f, %edx
13029- subl $__PAGE_OFFSET, %edx
13030- jmp *%edx
13031+ jmp 1f-__PAGE_OFFSET
13032 1:
13033
13034 /*
13035@@ -47,14 +46,8 @@ ENTRY(efi_call_phys)
13036 * parameter 2, ..., param n. To make things easy, we save the return
13037 * address of efi_call_phys in a global variable.
13038 */
13039- popl %edx
13040- movl %edx, saved_return_addr
13041- /* get the function pointer into ECX*/
13042- popl %ecx
13043- movl %ecx, efi_rt_function_ptr
13044- movl $2f, %edx
13045- subl $__PAGE_OFFSET, %edx
13046- pushl %edx
13047+ popl (saved_return_addr)
13048+ popl (efi_rt_function_ptr)
13049
13050 /*
13051 * 3. Clear PG bit in %CR0.
13052@@ -73,9 +66,8 @@ ENTRY(efi_call_phys)
13053 /*
13054 * 5. Call the physical function.
13055 */
13056- jmp *%ecx
13057+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
13058
13059-2:
13060 /*
13061 * 6. After EFI runtime service returns, control will return to
13062 * following instruction. We'd better readjust stack pointer first.
13063@@ -88,35 +80,28 @@ ENTRY(efi_call_phys)
13064 movl %cr0, %edx
13065 orl $0x80000000, %edx
13066 movl %edx, %cr0
13067- jmp 1f
13068-1:
13069+
13070 /*
13071 * 8. Now restore the virtual mode from flat mode by
13072 * adding EIP with PAGE_OFFSET.
13073 */
13074- movl $1f, %edx
13075- jmp *%edx
13076+ jmp 1f+__PAGE_OFFSET
13077 1:
13078
13079 /*
13080 * 9. Balance the stack. And because EAX contain the return value,
13081 * we'd better not clobber it.
13082 */
13083- leal efi_rt_function_ptr, %edx
13084- movl (%edx), %ecx
13085- pushl %ecx
13086+ pushl (efi_rt_function_ptr)
13087
13088 /*
13089- * 10. Push the saved return address onto the stack and return.
13090+ * 10. Return to the saved return address.
13091 */
13092- leal saved_return_addr, %edx
13093- movl (%edx), %ecx
13094- pushl %ecx
13095- ret
13096+ jmpl *(saved_return_addr)
13097 ENDPROC(efi_call_phys)
13098 .previous
13099
13100-.data
13101+__INITDATA
13102 saved_return_addr:
13103 .long 0
13104 efi_rt_function_ptr:
13105diff -urNp linux-2.6.32.43/arch/x86/kernel/entry_32.S linux-2.6.32.43/arch/x86/kernel/entry_32.S
13106--- linux-2.6.32.43/arch/x86/kernel/entry_32.S 2011-03-27 14:31:47.000000000 -0400
13107+++ linux-2.6.32.43/arch/x86/kernel/entry_32.S 2011-05-22 23:02:03.000000000 -0400
13108@@ -185,13 +185,146 @@
13109 /*CFI_REL_OFFSET gs, PT_GS*/
13110 .endm
13111 .macro SET_KERNEL_GS reg
13112+
13113+#ifdef CONFIG_CC_STACKPROTECTOR
13114 movl $(__KERNEL_STACK_CANARY), \reg
13115+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
13116+ movl $(__USER_DS), \reg
13117+#else
13118+ xorl \reg, \reg
13119+#endif
13120+
13121 movl \reg, %gs
13122 .endm
13123
13124 #endif /* CONFIG_X86_32_LAZY_GS */
13125
13126-.macro SAVE_ALL
13127+.macro pax_enter_kernel
13128+#ifdef CONFIG_PAX_KERNEXEC
13129+ call pax_enter_kernel
13130+#endif
13131+.endm
13132+
13133+.macro pax_exit_kernel
13134+#ifdef CONFIG_PAX_KERNEXEC
13135+ call pax_exit_kernel
13136+#endif
13137+.endm
13138+
13139+#ifdef CONFIG_PAX_KERNEXEC
13140+ENTRY(pax_enter_kernel)
13141+#ifdef CONFIG_PARAVIRT
13142+ pushl %eax
13143+ pushl %ecx
13144+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
13145+ mov %eax, %esi
13146+#else
13147+ mov %cr0, %esi
13148+#endif
13149+ bts $16, %esi
13150+ jnc 1f
13151+ mov %cs, %esi
13152+ cmp $__KERNEL_CS, %esi
13153+ jz 3f
13154+ ljmp $__KERNEL_CS, $3f
13155+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
13156+2:
13157+#ifdef CONFIG_PARAVIRT
13158+ mov %esi, %eax
13159+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
13160+#else
13161+ mov %esi, %cr0
13162+#endif
13163+3:
13164+#ifdef CONFIG_PARAVIRT
13165+ popl %ecx
13166+ popl %eax
13167+#endif
13168+ ret
13169+ENDPROC(pax_enter_kernel)
13170+
13171+ENTRY(pax_exit_kernel)
13172+#ifdef CONFIG_PARAVIRT
13173+ pushl %eax
13174+ pushl %ecx
13175+#endif
13176+ mov %cs, %esi
13177+ cmp $__KERNEXEC_KERNEL_CS, %esi
13178+ jnz 2f
13179+#ifdef CONFIG_PARAVIRT
13180+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
13181+ mov %eax, %esi
13182+#else
13183+ mov %cr0, %esi
13184+#endif
13185+ btr $16, %esi
13186+ ljmp $__KERNEL_CS, $1f
13187+1:
13188+#ifdef CONFIG_PARAVIRT
13189+ mov %esi, %eax
13190+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
13191+#else
13192+ mov %esi, %cr0
13193+#endif
13194+2:
13195+#ifdef CONFIG_PARAVIRT
13196+ popl %ecx
13197+ popl %eax
13198+#endif
13199+ ret
13200+ENDPROC(pax_exit_kernel)
13201+#endif
13202+
13203+.macro pax_erase_kstack
13204+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13205+ call pax_erase_kstack
13206+#endif
13207+.endm
13208+
13209+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13210+/*
13211+ * ebp: thread_info
13212+ * ecx, edx: can be clobbered
13213+ */
13214+ENTRY(pax_erase_kstack)
13215+ pushl %edi
13216+ pushl %eax
13217+
13218+ mov TI_lowest_stack(%ebp), %edi
13219+ mov $-0xBEEF, %eax
13220+ std
13221+
13222+1: mov %edi, %ecx
13223+ and $THREAD_SIZE_asm - 1, %ecx
13224+ shr $2, %ecx
13225+ repne scasl
13226+ jecxz 2f
13227+
13228+ cmp $2*16, %ecx
13229+ jc 2f
13230+
13231+ mov $2*16, %ecx
13232+ repe scasl
13233+ jecxz 2f
13234+ jne 1b
13235+
13236+2: cld
13237+ mov %esp, %ecx
13238+ sub %edi, %ecx
13239+ shr $2, %ecx
13240+ rep stosl
13241+
13242+ mov TI_task_thread_sp0(%ebp), %edi
13243+ sub $128, %edi
13244+ mov %edi, TI_lowest_stack(%ebp)
13245+
13246+ popl %eax
13247+ popl %edi
13248+ ret
13249+ENDPROC(pax_erase_kstack)
13250+#endif
13251+
13252+.macro __SAVE_ALL _DS
13253 cld
13254 PUSH_GS
13255 pushl %fs
13256@@ -224,7 +357,7 @@
13257 pushl %ebx
13258 CFI_ADJUST_CFA_OFFSET 4
13259 CFI_REL_OFFSET ebx, 0
13260- movl $(__USER_DS), %edx
13261+ movl $\_DS, %edx
13262 movl %edx, %ds
13263 movl %edx, %es
13264 movl $(__KERNEL_PERCPU), %edx
13265@@ -232,6 +365,15 @@
13266 SET_KERNEL_GS %edx
13267 .endm
13268
13269+.macro SAVE_ALL
13270+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
13271+ __SAVE_ALL __KERNEL_DS
13272+ pax_enter_kernel
13273+#else
13274+ __SAVE_ALL __USER_DS
13275+#endif
13276+.endm
13277+
13278 .macro RESTORE_INT_REGS
13279 popl %ebx
13280 CFI_ADJUST_CFA_OFFSET -4
13281@@ -352,7 +494,15 @@ check_userspace:
13282 movb PT_CS(%esp), %al
13283 andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
13284 cmpl $USER_RPL, %eax
13285+
13286+#ifdef CONFIG_PAX_KERNEXEC
13287+ jae resume_userspace
13288+
13289+ PAX_EXIT_KERNEL
13290+ jmp resume_kernel
13291+#else
13292 jb resume_kernel # not returning to v8086 or userspace
13293+#endif
13294
13295 ENTRY(resume_userspace)
13296 LOCKDEP_SYS_EXIT
13297@@ -364,7 +514,7 @@ ENTRY(resume_userspace)
13298 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
13299 # int/exception return?
13300 jne work_pending
13301- jmp restore_all
13302+ jmp restore_all_pax
13303 END(ret_from_exception)
13304
13305 #ifdef CONFIG_PREEMPT
13306@@ -414,25 +564,36 @@ sysenter_past_esp:
13307 /*CFI_REL_OFFSET cs, 0*/
13308 /*
13309 * Push current_thread_info()->sysenter_return to the stack.
13310- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
13311- * pushed above; +8 corresponds to copy_thread's esp0 setting.
13312 */
13313- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp)
13314+ pushl $0
13315 CFI_ADJUST_CFA_OFFSET 4
13316 CFI_REL_OFFSET eip, 0
13317
13318 pushl %eax
13319 CFI_ADJUST_CFA_OFFSET 4
13320 SAVE_ALL
13321+ GET_THREAD_INFO(%ebp)
13322+ movl TI_sysenter_return(%ebp),%ebp
13323+ movl %ebp,PT_EIP(%esp)
13324 ENABLE_INTERRUPTS(CLBR_NONE)
13325
13326 /*
13327 * Load the potential sixth argument from user stack.
13328 * Careful about security.
13329 */
13330+ movl PT_OLDESP(%esp),%ebp
13331+
13332+#ifdef CONFIG_PAX_MEMORY_UDEREF
13333+ mov PT_OLDSS(%esp),%ds
13334+1: movl %ds:(%ebp),%ebp
13335+ push %ss
13336+ pop %ds
13337+#else
13338 cmpl $__PAGE_OFFSET-3,%ebp
13339 jae syscall_fault
13340 1: movl (%ebp),%ebp
13341+#endif
13342+
13343 movl %ebp,PT_EBP(%esp)
13344 .section __ex_table,"a"
13345 .align 4
13346@@ -455,12 +616,23 @@ sysenter_do_call:
13347 testl $_TIF_ALLWORK_MASK, %ecx
13348 jne sysexit_audit
13349 sysenter_exit:
13350+
13351+#ifdef CONFIG_PAX_RANDKSTACK
13352+ pushl_cfi %eax
13353+ call pax_randomize_kstack
13354+ popl_cfi %eax
13355+#endif
13356+
13357+ pax_erase_kstack
13358+
13359 /* if something modifies registers it must also disable sysexit */
13360 movl PT_EIP(%esp), %edx
13361 movl PT_OLDESP(%esp), %ecx
13362 xorl %ebp,%ebp
13363 TRACE_IRQS_ON
13364 1: mov PT_FS(%esp), %fs
13365+2: mov PT_DS(%esp), %ds
13366+3: mov PT_ES(%esp), %es
13367 PTGS_TO_GS
13368 ENABLE_INTERRUPTS_SYSEXIT
13369
13370@@ -477,6 +649,9 @@ sysenter_audit:
13371 movl %eax,%edx /* 2nd arg: syscall number */
13372 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
13373 call audit_syscall_entry
13374+
13375+ pax_erase_kstack
13376+
13377 pushl %ebx
13378 CFI_ADJUST_CFA_OFFSET 4
13379 movl PT_EAX(%esp),%eax /* reload syscall number */
13380@@ -504,11 +679,17 @@ sysexit_audit:
13381
13382 CFI_ENDPROC
13383 .pushsection .fixup,"ax"
13384-2: movl $0,PT_FS(%esp)
13385+4: movl $0,PT_FS(%esp)
13386+ jmp 1b
13387+5: movl $0,PT_DS(%esp)
13388+ jmp 1b
13389+6: movl $0,PT_ES(%esp)
13390 jmp 1b
13391 .section __ex_table,"a"
13392 .align 4
13393- .long 1b,2b
13394+ .long 1b,4b
13395+ .long 2b,5b
13396+ .long 3b,6b
13397 .popsection
13398 PTGS_TO_GS_EX
13399 ENDPROC(ia32_sysenter_target)
13400@@ -538,6 +719,14 @@ syscall_exit:
13401 testl $_TIF_ALLWORK_MASK, %ecx # current->work
13402 jne syscall_exit_work
13403
13404+restore_all_pax:
13405+
13406+#ifdef CONFIG_PAX_RANDKSTACK
13407+ call pax_randomize_kstack
13408+#endif
13409+
13410+ pax_erase_kstack
13411+
13412 restore_all:
13413 TRACE_IRQS_IRET
13414 restore_all_notrace:
13415@@ -602,7 +791,13 @@ ldt_ss:
13416 mov PT_OLDESP(%esp), %eax /* load userspace esp */
13417 mov %dx, %ax /* eax: new kernel esp */
13418 sub %eax, %edx /* offset (low word is 0) */
13419- PER_CPU(gdt_page, %ebx)
13420+#ifdef CONFIG_SMP
13421+ movl PER_CPU_VAR(cpu_number), %ebx
13422+ shll $PAGE_SHIFT_asm, %ebx
13423+ addl $cpu_gdt_table, %ebx
13424+#else
13425+ movl $cpu_gdt_table, %ebx
13426+#endif
13427 shr $16, %edx
13428 mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */
13429 mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */
13430@@ -636,31 +831,25 @@ work_resched:
13431 movl TI_flags(%ebp), %ecx
13432 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
13433 # than syscall tracing?
13434- jz restore_all
13435+ jz restore_all_pax
13436 testb $_TIF_NEED_RESCHED, %cl
13437 jnz work_resched
13438
13439 work_notifysig: # deal with pending signals and
13440 # notify-resume requests
13441+ movl %esp, %eax
13442 #ifdef CONFIG_VM86
13443 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
13444- movl %esp, %eax
13445- jne work_notifysig_v86 # returning to kernel-space or
13446+ jz 1f # returning to kernel-space or
13447 # vm86-space
13448- xorl %edx, %edx
13449- call do_notify_resume
13450- jmp resume_userspace_sig
13451
13452- ALIGN
13453-work_notifysig_v86:
13454 pushl %ecx # save ti_flags for do_notify_resume
13455 CFI_ADJUST_CFA_OFFSET 4
13456 call save_v86_state # %eax contains pt_regs pointer
13457 popl %ecx
13458 CFI_ADJUST_CFA_OFFSET -4
13459 movl %eax, %esp
13460-#else
13461- movl %esp, %eax
13462+1:
13463 #endif
13464 xorl %edx, %edx
13465 call do_notify_resume
13466@@ -673,6 +862,9 @@ syscall_trace_entry:
13467 movl $-ENOSYS,PT_EAX(%esp)
13468 movl %esp, %eax
13469 call syscall_trace_enter
13470+
13471+ pax_erase_kstack
13472+
13473 /* What it returned is what we'll actually use. */
13474 cmpl $(nr_syscalls), %eax
13475 jnae syscall_call
13476@@ -695,6 +887,10 @@ END(syscall_exit_work)
13477
13478 RING0_INT_FRAME # can't unwind into user space anyway
13479 syscall_fault:
13480+#ifdef CONFIG_PAX_MEMORY_UDEREF
13481+ push %ss
13482+ pop %ds
13483+#endif
13484 GET_THREAD_INFO(%ebp)
13485 movl $-EFAULT,PT_EAX(%esp)
13486 jmp resume_userspace
13487@@ -726,6 +922,33 @@ PTREGSCALL(rt_sigreturn)
13488 PTREGSCALL(vm86)
13489 PTREGSCALL(vm86old)
13490
13491+ ALIGN;
13492+ENTRY(kernel_execve)
13493+ push %ebp
13494+ sub $PT_OLDSS+4,%esp
13495+ push %edi
13496+ push %ecx
13497+ push %eax
13498+ lea 3*4(%esp),%edi
13499+ mov $PT_OLDSS/4+1,%ecx
13500+ xorl %eax,%eax
13501+ rep stosl
13502+ pop %eax
13503+ pop %ecx
13504+ pop %edi
13505+ movl $X86_EFLAGS_IF,PT_EFLAGS(%esp)
13506+ mov %eax,PT_EBX(%esp)
13507+ mov %edx,PT_ECX(%esp)
13508+ mov %ecx,PT_EDX(%esp)
13509+ mov %esp,%eax
13510+ call sys_execve
13511+ GET_THREAD_INFO(%ebp)
13512+ test %eax,%eax
13513+ jz syscall_exit
13514+ add $PT_OLDSS+4,%esp
13515+ pop %ebp
13516+ ret
13517+
13518 .macro FIXUP_ESPFIX_STACK
13519 /*
13520 * Switch back for ESPFIX stack to the normal zerobased stack
13521@@ -735,7 +958,13 @@ PTREGSCALL(vm86old)
13522 * normal stack and adjusts ESP with the matching offset.
13523 */
13524 /* fixup the stack */
13525- PER_CPU(gdt_page, %ebx)
13526+#ifdef CONFIG_SMP
13527+ movl PER_CPU_VAR(cpu_number), %ebx
13528+ shll $PAGE_SHIFT_asm, %ebx
13529+ addl $cpu_gdt_table, %ebx
13530+#else
13531+ movl $cpu_gdt_table, %ebx
13532+#endif
13533 mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */
13534 mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */
13535 shl $16, %eax
13536@@ -1198,7 +1427,6 @@ return_to_handler:
13537 ret
13538 #endif
13539
13540-.section .rodata,"a"
13541 #include "syscall_table_32.S"
13542
13543 syscall_table_size=(.-sys_call_table)
13544@@ -1255,9 +1483,12 @@ error_code:
13545 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
13546 REG_TO_PTGS %ecx
13547 SET_KERNEL_GS %ecx
13548- movl $(__USER_DS), %ecx
13549+ movl $(__KERNEL_DS), %ecx
13550 movl %ecx, %ds
13551 movl %ecx, %es
13552+
13553+ pax_enter_kernel
13554+
13555 TRACE_IRQS_OFF
13556 movl %esp,%eax # pt_regs pointer
13557 call *%edi
13558@@ -1351,6 +1582,9 @@ nmi_stack_correct:
13559 xorl %edx,%edx # zero error code
13560 movl %esp,%eax # pt_regs pointer
13561 call do_nmi
13562+
13563+ pax_exit_kernel
13564+
13565 jmp restore_all_notrace
13566 CFI_ENDPROC
13567
13568@@ -1391,6 +1625,9 @@ nmi_espfix_stack:
13569 FIXUP_ESPFIX_STACK # %eax == %esp
13570 xorl %edx,%edx # zero error code
13571 call do_nmi
13572+
13573+ pax_exit_kernel
13574+
13575 RESTORE_REGS
13576 lss 12+4(%esp), %esp # back to espfix stack
13577 CFI_ADJUST_CFA_OFFSET -24
13578diff -urNp linux-2.6.32.43/arch/x86/kernel/entry_64.S linux-2.6.32.43/arch/x86/kernel/entry_64.S
13579--- linux-2.6.32.43/arch/x86/kernel/entry_64.S 2011-03-27 14:31:47.000000000 -0400
13580+++ linux-2.6.32.43/arch/x86/kernel/entry_64.S 2011-06-04 20:30:53.000000000 -0400
13581@@ -53,6 +53,7 @@
13582 #include <asm/paravirt.h>
13583 #include <asm/ftrace.h>
13584 #include <asm/percpu.h>
13585+#include <asm/pgtable.h>
13586
13587 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
13588 #include <linux/elf-em.h>
13589@@ -174,6 +175,257 @@ ENTRY(native_usergs_sysret64)
13590 ENDPROC(native_usergs_sysret64)
13591 #endif /* CONFIG_PARAVIRT */
13592
13593+ .macro ljmpq sel, off
13594+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
13595+ .byte 0x48; ljmp *1234f(%rip)
13596+ .pushsection .rodata
13597+ .align 16
13598+ 1234: .quad \off; .word \sel
13599+ .popsection
13600+#else
13601+ pushq $\sel
13602+ pushq $\off
13603+ lretq
13604+#endif
13605+ .endm
13606+
13607+ .macro pax_enter_kernel
13608+#ifdef CONFIG_PAX_KERNEXEC
13609+ call pax_enter_kernel
13610+#endif
13611+ .endm
13612+
13613+ .macro pax_exit_kernel
13614+#ifdef CONFIG_PAX_KERNEXEC
13615+ call pax_exit_kernel
13616+#endif
13617+ .endm
13618+
13619+#ifdef CONFIG_PAX_KERNEXEC
13620+ENTRY(pax_enter_kernel)
13621+ pushq %rdi
13622+
13623+#ifdef CONFIG_PARAVIRT
13624+ PV_SAVE_REGS(CLBR_RDI)
13625+#endif
13626+
13627+ GET_CR0_INTO_RDI
13628+ bts $16,%rdi
13629+ jnc 1f
13630+ mov %cs,%edi
13631+ cmp $__KERNEL_CS,%edi
13632+ jz 3f
13633+ ljmpq __KERNEL_CS,3f
13634+1: ljmpq __KERNEXEC_KERNEL_CS,2f
13635+2: SET_RDI_INTO_CR0
13636+3:
13637+
13638+#ifdef CONFIG_PARAVIRT
13639+ PV_RESTORE_REGS(CLBR_RDI)
13640+#endif
13641+
13642+ popq %rdi
13643+ retq
13644+ENDPROC(pax_enter_kernel)
13645+
13646+ENTRY(pax_exit_kernel)
13647+ pushq %rdi
13648+
13649+#ifdef CONFIG_PARAVIRT
13650+ PV_SAVE_REGS(CLBR_RDI)
13651+#endif
13652+
13653+ mov %cs,%rdi
13654+ cmp $__KERNEXEC_KERNEL_CS,%edi
13655+ jnz 2f
13656+ GET_CR0_INTO_RDI
13657+ btr $16,%rdi
13658+ ljmpq __KERNEL_CS,1f
13659+1: SET_RDI_INTO_CR0
13660+2:
13661+
13662+#ifdef CONFIG_PARAVIRT
13663+ PV_RESTORE_REGS(CLBR_RDI);
13664+#endif
13665+
13666+ popq %rdi
13667+ retq
13668+ENDPROC(pax_exit_kernel)
13669+#endif
13670+
13671+ .macro pax_enter_kernel_user
13672+#ifdef CONFIG_PAX_MEMORY_UDEREF
13673+ call pax_enter_kernel_user
13674+#endif
13675+ .endm
13676+
13677+ .macro pax_exit_kernel_user
13678+#ifdef CONFIG_PAX_MEMORY_UDEREF
13679+ call pax_exit_kernel_user
13680+#endif
13681+#ifdef CONFIG_PAX_RANDKSTACK
13682+ push %rax
13683+ call pax_randomize_kstack
13684+ pop %rax
13685+#endif
13686+ pax_erase_kstack
13687+ .endm
13688+
13689+#ifdef CONFIG_PAX_MEMORY_UDEREF
13690+ENTRY(pax_enter_kernel_user)
13691+ pushq %rdi
13692+ pushq %rbx
13693+
13694+#ifdef CONFIG_PARAVIRT
13695+ PV_SAVE_REGS(CLBR_RDI)
13696+#endif
13697+
13698+ GET_CR3_INTO_RDI
13699+ mov %rdi,%rbx
13700+ add $__START_KERNEL_map,%rbx
13701+ sub phys_base(%rip),%rbx
13702+
13703+#ifdef CONFIG_PARAVIRT
13704+ pushq %rdi
13705+ cmpl $0, pv_info+PARAVIRT_enabled
13706+ jz 1f
13707+ i = 0
13708+ .rept USER_PGD_PTRS
13709+ mov i*8(%rbx),%rsi
13710+ mov $0,%sil
13711+ lea i*8(%rbx),%rdi
13712+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13713+ i = i + 1
13714+ .endr
13715+ jmp 2f
13716+1:
13717+#endif
13718+
13719+ i = 0
13720+ .rept USER_PGD_PTRS
13721+ movb $0,i*8(%rbx)
13722+ i = i + 1
13723+ .endr
13724+
13725+#ifdef CONFIG_PARAVIRT
13726+2: popq %rdi
13727+#endif
13728+ SET_RDI_INTO_CR3
13729+
13730+#ifdef CONFIG_PAX_KERNEXEC
13731+ GET_CR0_INTO_RDI
13732+ bts $16,%rdi
13733+ SET_RDI_INTO_CR0
13734+#endif
13735+
13736+#ifdef CONFIG_PARAVIRT
13737+ PV_RESTORE_REGS(CLBR_RDI)
13738+#endif
13739+
13740+ popq %rbx
13741+ popq %rdi
13742+ retq
13743+ENDPROC(pax_enter_kernel_user)
13744+
13745+ENTRY(pax_exit_kernel_user)
13746+ push %rdi
13747+
13748+#ifdef CONFIG_PARAVIRT
13749+ pushq %rbx
13750+ PV_SAVE_REGS(CLBR_RDI)
13751+#endif
13752+
13753+#ifdef CONFIG_PAX_KERNEXEC
13754+ GET_CR0_INTO_RDI
13755+ btr $16,%rdi
13756+ SET_RDI_INTO_CR0
13757+#endif
13758+
13759+ GET_CR3_INTO_RDI
13760+ add $__START_KERNEL_map,%rdi
13761+ sub phys_base(%rip),%rdi
13762+
13763+#ifdef CONFIG_PARAVIRT
13764+ cmpl $0, pv_info+PARAVIRT_enabled
13765+ jz 1f
13766+ mov %rdi,%rbx
13767+ i = 0
13768+ .rept USER_PGD_PTRS
13769+ mov i*8(%rbx),%rsi
13770+ mov $0x67,%sil
13771+ lea i*8(%rbx),%rdi
13772+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd)
13773+ i = i + 1
13774+ .endr
13775+ jmp 2f
13776+1:
13777+#endif
13778+
13779+ i = 0
13780+ .rept USER_PGD_PTRS
13781+ movb $0x67,i*8(%rdi)
13782+ i = i + 1
13783+ .endr
13784+
13785+#ifdef CONFIG_PARAVIRT
13786+2: PV_RESTORE_REGS(CLBR_RDI)
13787+ popq %rbx
13788+#endif
13789+
13790+ popq %rdi
13791+ retq
13792+ENDPROC(pax_exit_kernel_user)
13793+#endif
13794+
13795+.macro pax_erase_kstack
13796+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13797+ call pax_erase_kstack
13798+#endif
13799+.endm
13800+
13801+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
13802+/*
13803+ * r10: thread_info
13804+ * rcx, rdx: can be clobbered
13805+ */
13806+ENTRY(pax_erase_kstack)
13807+ pushq %rdi
13808+ pushq %rax
13809+
13810+ GET_THREAD_INFO(%r10)
13811+ mov TI_lowest_stack(%r10), %rdi
13812+ mov $-0xBEEF, %rax
13813+ std
13814+
13815+1: mov %edi, %ecx
13816+ and $THREAD_SIZE_asm - 1, %ecx
13817+ shr $3, %ecx
13818+ repne scasq
13819+ jecxz 2f
13820+
13821+ cmp $2*8, %ecx
13822+ jc 2f
13823+
13824+ mov $2*8, %ecx
13825+ repe scasq
13826+ jecxz 2f
13827+ jne 1b
13828+
13829+2: cld
13830+ mov %esp, %ecx
13831+ sub %edi, %ecx
13832+ shr $3, %ecx
13833+ rep stosq
13834+
13835+ mov TI_task_thread_sp0(%r10), %rdi
13836+ sub $256, %rdi
13837+ mov %rdi, TI_lowest_stack(%r10)
13838+
13839+ popq %rax
13840+ popq %rdi
13841+ ret
13842+ENDPROC(pax_erase_kstack)
13843+#endif
13844
13845 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
13846 #ifdef CONFIG_TRACE_IRQFLAGS
13847@@ -317,7 +569,7 @@ ENTRY(save_args)
13848 leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */
13849 movq_cfi rbp, 8 /* push %rbp */
13850 leaq 8(%rsp), %rbp /* mov %rsp, %ebp */
13851- testl $3, CS(%rdi)
13852+ testb $3, CS(%rdi)
13853 je 1f
13854 SWAPGS
13855 /*
13856@@ -409,7 +661,7 @@ ENTRY(ret_from_fork)
13857
13858 RESTORE_REST
13859
13860- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13861+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
13862 je int_ret_from_sys_call
13863
13864 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
13865@@ -455,7 +707,7 @@ END(ret_from_fork)
13866 ENTRY(system_call)
13867 CFI_STARTPROC simple
13868 CFI_SIGNAL_FRAME
13869- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
13870+ CFI_DEF_CFA rsp,0
13871 CFI_REGISTER rip,rcx
13872 /*CFI_REGISTER rflags,r11*/
13873 SWAPGS_UNSAFE_STACK
13874@@ -468,12 +720,13 @@ ENTRY(system_call_after_swapgs)
13875
13876 movq %rsp,PER_CPU_VAR(old_rsp)
13877 movq PER_CPU_VAR(kernel_stack),%rsp
13878+ pax_enter_kernel_user
13879 /*
13880 * No need to follow this irqs off/on section - it's straight
13881 * and short:
13882 */
13883 ENABLE_INTERRUPTS(CLBR_NONE)
13884- SAVE_ARGS 8,1
13885+ SAVE_ARGS 8*6,1
13886 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
13887 movq %rcx,RIP-ARGOFFSET(%rsp)
13888 CFI_REL_OFFSET rip,RIP-ARGOFFSET
13889@@ -502,6 +755,7 @@ sysret_check:
13890 andl %edi,%edx
13891 jnz sysret_careful
13892 CFI_REMEMBER_STATE
13893+ pax_exit_kernel_user
13894 /*
13895 * sysretq will re-enable interrupts:
13896 */
13897@@ -562,6 +816,9 @@ auditsys:
13898 movq %rax,%rsi /* 2nd arg: syscall number */
13899 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
13900 call audit_syscall_entry
13901+
13902+ pax_erase_kstack
13903+
13904 LOAD_ARGS 0 /* reload call-clobbered registers */
13905 jmp system_call_fastpath
13906
13907@@ -592,6 +849,9 @@ tracesys:
13908 FIXUP_TOP_OF_STACK %rdi
13909 movq %rsp,%rdi
13910 call syscall_trace_enter
13911+
13912+ pax_erase_kstack
13913+
13914 /*
13915 * Reload arg registers from stack in case ptrace changed them.
13916 * We don't reload %rax because syscall_trace_enter() returned
13917@@ -613,7 +873,7 @@ tracesys:
13918 GLOBAL(int_ret_from_sys_call)
13919 DISABLE_INTERRUPTS(CLBR_NONE)
13920 TRACE_IRQS_OFF
13921- testl $3,CS-ARGOFFSET(%rsp)
13922+ testb $3,CS-ARGOFFSET(%rsp)
13923 je retint_restore_args
13924 movl $_TIF_ALLWORK_MASK,%edi
13925 /* edi: mask to check */
13926@@ -800,6 +1060,16 @@ END(interrupt)
13927 CFI_ADJUST_CFA_OFFSET 10*8
13928 call save_args
13929 PARTIAL_FRAME 0
13930+#ifdef CONFIG_PAX_MEMORY_UDEREF
13931+ testb $3, CS(%rdi)
13932+ jnz 1f
13933+ pax_enter_kernel
13934+ jmp 2f
13935+1: pax_enter_kernel_user
13936+2:
13937+#else
13938+ pax_enter_kernel
13939+#endif
13940 call \func
13941 .endm
13942
13943@@ -822,7 +1092,7 @@ ret_from_intr:
13944 CFI_ADJUST_CFA_OFFSET -8
13945 exit_intr:
13946 GET_THREAD_INFO(%rcx)
13947- testl $3,CS-ARGOFFSET(%rsp)
13948+ testb $3,CS-ARGOFFSET(%rsp)
13949 je retint_kernel
13950
13951 /* Interrupt came from user space */
13952@@ -844,12 +1114,14 @@ retint_swapgs: /* return to user-space
13953 * The iretq could re-enable interrupts:
13954 */
13955 DISABLE_INTERRUPTS(CLBR_ANY)
13956+ pax_exit_kernel_user
13957 TRACE_IRQS_IRETQ
13958 SWAPGS
13959 jmp restore_args
13960
13961 retint_restore_args: /* return to kernel space */
13962 DISABLE_INTERRUPTS(CLBR_ANY)
13963+ pax_exit_kernel
13964 /*
13965 * The iretq could re-enable interrupts:
13966 */
13967@@ -1032,6 +1304,16 @@ ENTRY(\sym)
13968 CFI_ADJUST_CFA_OFFSET 15*8
13969 call error_entry
13970 DEFAULT_FRAME 0
13971+#ifdef CONFIG_PAX_MEMORY_UDEREF
13972+ testb $3, CS(%rsp)
13973+ jnz 1f
13974+ pax_enter_kernel
13975+ jmp 2f
13976+1: pax_enter_kernel_user
13977+2:
13978+#else
13979+ pax_enter_kernel
13980+#endif
13981 movq %rsp,%rdi /* pt_regs pointer */
13982 xorl %esi,%esi /* no error code */
13983 call \do_sym
13984@@ -1049,6 +1331,16 @@ ENTRY(\sym)
13985 subq $15*8, %rsp
13986 call save_paranoid
13987 TRACE_IRQS_OFF
13988+#ifdef CONFIG_PAX_MEMORY_UDEREF
13989+ testb $3, CS(%rsp)
13990+ jnz 1f
13991+ pax_enter_kernel
13992+ jmp 2f
13993+1: pax_enter_kernel_user
13994+2:
13995+#else
13996+ pax_enter_kernel
13997+#endif
13998 movq %rsp,%rdi /* pt_regs pointer */
13999 xorl %esi,%esi /* no error code */
14000 call \do_sym
14001@@ -1066,9 +1358,24 @@ ENTRY(\sym)
14002 subq $15*8, %rsp
14003 call save_paranoid
14004 TRACE_IRQS_OFF
14005+#ifdef CONFIG_PAX_MEMORY_UDEREF
14006+ testb $3, CS(%rsp)
14007+ jnz 1f
14008+ pax_enter_kernel
14009+ jmp 2f
14010+1: pax_enter_kernel_user
14011+2:
14012+#else
14013+ pax_enter_kernel
14014+#endif
14015 movq %rsp,%rdi /* pt_regs pointer */
14016 xorl %esi,%esi /* no error code */
14017- PER_CPU(init_tss, %rbp)
14018+#ifdef CONFIG_SMP
14019+ imul $TSS_size, PER_CPU_VAR(cpu_number), %ebp
14020+ lea init_tss(%rbp), %rbp
14021+#else
14022+ lea init_tss(%rip), %rbp
14023+#endif
14024 subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14025 call \do_sym
14026 addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%rbp)
14027@@ -1085,6 +1392,16 @@ ENTRY(\sym)
14028 CFI_ADJUST_CFA_OFFSET 15*8
14029 call error_entry
14030 DEFAULT_FRAME 0
14031+#ifdef CONFIG_PAX_MEMORY_UDEREF
14032+ testb $3, CS(%rsp)
14033+ jnz 1f
14034+ pax_enter_kernel
14035+ jmp 2f
14036+1: pax_enter_kernel_user
14037+2:
14038+#else
14039+ pax_enter_kernel
14040+#endif
14041 movq %rsp,%rdi /* pt_regs pointer */
14042 movq ORIG_RAX(%rsp),%rsi /* get error code */
14043 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14044@@ -1104,6 +1421,16 @@ ENTRY(\sym)
14045 call save_paranoid
14046 DEFAULT_FRAME 0
14047 TRACE_IRQS_OFF
14048+#ifdef CONFIG_PAX_MEMORY_UDEREF
14049+ testb $3, CS(%rsp)
14050+ jnz 1f
14051+ pax_enter_kernel
14052+ jmp 2f
14053+1: pax_enter_kernel_user
14054+2:
14055+#else
14056+ pax_enter_kernel
14057+#endif
14058 movq %rsp,%rdi /* pt_regs pointer */
14059 movq ORIG_RAX(%rsp),%rsi /* get error code */
14060 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
14061@@ -1405,14 +1732,27 @@ ENTRY(paranoid_exit)
14062 TRACE_IRQS_OFF
14063 testl %ebx,%ebx /* swapgs needed? */
14064 jnz paranoid_restore
14065- testl $3,CS(%rsp)
14066+ testb $3,CS(%rsp)
14067 jnz paranoid_userspace
14068+#ifdef CONFIG_PAX_MEMORY_UDEREF
14069+ pax_exit_kernel
14070+ TRACE_IRQS_IRETQ 0
14071+ SWAPGS_UNSAFE_STACK
14072+ RESTORE_ALL 8
14073+ jmp irq_return
14074+#endif
14075 paranoid_swapgs:
14076+#ifdef CONFIG_PAX_MEMORY_UDEREF
14077+ pax_exit_kernel_user
14078+#else
14079+ pax_exit_kernel
14080+#endif
14081 TRACE_IRQS_IRETQ 0
14082 SWAPGS_UNSAFE_STACK
14083 RESTORE_ALL 8
14084 jmp irq_return
14085 paranoid_restore:
14086+ pax_exit_kernel
14087 TRACE_IRQS_IRETQ 0
14088 RESTORE_ALL 8
14089 jmp irq_return
14090@@ -1470,7 +1810,7 @@ ENTRY(error_entry)
14091 movq_cfi r14, R14+8
14092 movq_cfi r15, R15+8
14093 xorl %ebx,%ebx
14094- testl $3,CS+8(%rsp)
14095+ testb $3,CS+8(%rsp)
14096 je error_kernelspace
14097 error_swapgs:
14098 SWAPGS
14099@@ -1529,6 +1869,16 @@ ENTRY(nmi)
14100 CFI_ADJUST_CFA_OFFSET 15*8
14101 call save_paranoid
14102 DEFAULT_FRAME 0
14103+#ifdef CONFIG_PAX_MEMORY_UDEREF
14104+ testb $3, CS(%rsp)
14105+ jnz 1f
14106+ pax_enter_kernel
14107+ jmp 2f
14108+1: pax_enter_kernel_user
14109+2:
14110+#else
14111+ pax_enter_kernel
14112+#endif
14113 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
14114 movq %rsp,%rdi
14115 movq $-1,%rsi
14116@@ -1539,11 +1889,25 @@ ENTRY(nmi)
14117 DISABLE_INTERRUPTS(CLBR_NONE)
14118 testl %ebx,%ebx /* swapgs needed? */
14119 jnz nmi_restore
14120- testl $3,CS(%rsp)
14121+ testb $3,CS(%rsp)
14122 jnz nmi_userspace
14123+#ifdef CONFIG_PAX_MEMORY_UDEREF
14124+ pax_exit_kernel
14125+ SWAPGS_UNSAFE_STACK
14126+ RESTORE_ALL 8
14127+ jmp irq_return
14128+#endif
14129 nmi_swapgs:
14130+#ifdef CONFIG_PAX_MEMORY_UDEREF
14131+ pax_exit_kernel_user
14132+#else
14133+ pax_exit_kernel
14134+#endif
14135 SWAPGS_UNSAFE_STACK
14136+ RESTORE_ALL 8
14137+ jmp irq_return
14138 nmi_restore:
14139+ pax_exit_kernel
14140 RESTORE_ALL 8
14141 jmp irq_return
14142 nmi_userspace:
14143diff -urNp linux-2.6.32.43/arch/x86/kernel/ftrace.c linux-2.6.32.43/arch/x86/kernel/ftrace.c
14144--- linux-2.6.32.43/arch/x86/kernel/ftrace.c 2011-03-27 14:31:47.000000000 -0400
14145+++ linux-2.6.32.43/arch/x86/kernel/ftrace.c 2011-05-04 17:56:20.000000000 -0400
14146@@ -103,7 +103,7 @@ static void *mod_code_ip; /* holds the
14147 static void *mod_code_newcode; /* holds the text to write to the IP */
14148
14149 static unsigned nmi_wait_count;
14150-static atomic_t nmi_update_count = ATOMIC_INIT(0);
14151+static atomic_unchecked_t nmi_update_count = ATOMIC_INIT(0);
14152
14153 int ftrace_arch_read_dyn_info(char *buf, int size)
14154 {
14155@@ -111,7 +111,7 @@ int ftrace_arch_read_dyn_info(char *buf,
14156
14157 r = snprintf(buf, size, "%u %u",
14158 nmi_wait_count,
14159- atomic_read(&nmi_update_count));
14160+ atomic_read_unchecked(&nmi_update_count));
14161 return r;
14162 }
14163
14164@@ -149,8 +149,10 @@ void ftrace_nmi_enter(void)
14165 {
14166 if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) {
14167 smp_rmb();
14168+ pax_open_kernel();
14169 ftrace_mod_code();
14170- atomic_inc(&nmi_update_count);
14171+ pax_close_kernel();
14172+ atomic_inc_unchecked(&nmi_update_count);
14173 }
14174 /* Must have previous changes seen before executions */
14175 smp_mb();
14176@@ -215,7 +217,7 @@ do_ftrace_mod_code(unsigned long ip, voi
14177
14178
14179
14180-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE];
14181+static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only;
14182
14183 static unsigned char *ftrace_nop_replace(void)
14184 {
14185@@ -228,6 +230,8 @@ ftrace_modify_code(unsigned long ip, uns
14186 {
14187 unsigned char replaced[MCOUNT_INSN_SIZE];
14188
14189+ ip = ktla_ktva(ip);
14190+
14191 /*
14192 * Note: Due to modules and __init, code can
14193 * disappear and change, we need to protect against faulting
14194@@ -284,7 +288,7 @@ int ftrace_update_ftrace_func(ftrace_fun
14195 unsigned char old[MCOUNT_INSN_SIZE], *new;
14196 int ret;
14197
14198- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
14199+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
14200 new = ftrace_call_replace(ip, (unsigned long)func);
14201 ret = ftrace_modify_code(ip, old, new);
14202
14203@@ -337,15 +341,15 @@ int __init ftrace_dyn_arch_init(void *da
14204 switch (faulted) {
14205 case 0:
14206 pr_info("ftrace: converting mcount calls to 0f 1f 44 00 00\n");
14207- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE);
14208+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE);
14209 break;
14210 case 1:
14211 pr_info("ftrace: converting mcount calls to 66 66 66 66 90\n");
14212- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE);
14213+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE);
14214 break;
14215 case 2:
14216 pr_info("ftrace: converting mcount calls to jmp . + 5\n");
14217- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE);
14218+ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE);
14219 break;
14220 }
14221
14222@@ -366,6 +370,8 @@ static int ftrace_mod_jmp(unsigned long
14223 {
14224 unsigned char code[MCOUNT_INSN_SIZE];
14225
14226+ ip = ktla_ktva(ip);
14227+
14228 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
14229 return -EFAULT;
14230
14231diff -urNp linux-2.6.32.43/arch/x86/kernel/head32.c linux-2.6.32.43/arch/x86/kernel/head32.c
14232--- linux-2.6.32.43/arch/x86/kernel/head32.c 2011-03-27 14:31:47.000000000 -0400
14233+++ linux-2.6.32.43/arch/x86/kernel/head32.c 2011-04-17 15:56:46.000000000 -0400
14234@@ -16,6 +16,7 @@
14235 #include <asm/apic.h>
14236 #include <asm/io_apic.h>
14237 #include <asm/bios_ebda.h>
14238+#include <asm/boot.h>
14239
14240 static void __init i386_default_early_setup(void)
14241 {
14242@@ -31,7 +32,7 @@ void __init i386_start_kernel(void)
14243 {
14244 reserve_trampoline_memory();
14245
14246- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14247+ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS");
14248
14249 #ifdef CONFIG_BLK_DEV_INITRD
14250 /* Reserve INITRD */
14251diff -urNp linux-2.6.32.43/arch/x86/kernel/head_32.S linux-2.6.32.43/arch/x86/kernel/head_32.S
14252--- linux-2.6.32.43/arch/x86/kernel/head_32.S 2011-03-27 14:31:47.000000000 -0400
14253+++ linux-2.6.32.43/arch/x86/kernel/head_32.S 2011-07-06 19:53:33.000000000 -0400
14254@@ -19,10 +19,17 @@
14255 #include <asm/setup.h>
14256 #include <asm/processor-flags.h>
14257 #include <asm/percpu.h>
14258+#include <asm/msr-index.h>
14259
14260 /* Physical address */
14261 #define pa(X) ((X) - __PAGE_OFFSET)
14262
14263+#ifdef CONFIG_PAX_KERNEXEC
14264+#define ta(X) (X)
14265+#else
14266+#define ta(X) ((X) - __PAGE_OFFSET)
14267+#endif
14268+
14269 /*
14270 * References to members of the new_cpu_data structure.
14271 */
14272@@ -52,11 +59,7 @@
14273 * and small than max_low_pfn, otherwise will waste some page table entries
14274 */
14275
14276-#if PTRS_PER_PMD > 1
14277-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
14278-#else
14279-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
14280-#endif
14281+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
14282
14283 /* Enough space to fit pagetables for the low memory linear map */
14284 MAPPING_BEYOND_END = \
14285@@ -73,6 +76,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P
14286 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14287
14288 /*
14289+ * Real beginning of normal "text" segment
14290+ */
14291+ENTRY(stext)
14292+ENTRY(_stext)
14293+
14294+/*
14295 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
14296 * %esi points to the real-mode code as a 32-bit pointer.
14297 * CS and DS must be 4 GB flat segments, but we don't depend on
14298@@ -80,7 +89,16 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
14299 * can.
14300 */
14301 __HEAD
14302+
14303+#ifdef CONFIG_PAX_KERNEXEC
14304+ jmp startup_32
14305+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
14306+.fill PAGE_SIZE-5,1,0xcc
14307+#endif
14308+
14309 ENTRY(startup_32)
14310+ movl pa(stack_start),%ecx
14311+
14312 /* test KEEP_SEGMENTS flag to see if the bootloader is asking
14313 us to not reload segments */
14314 testb $(1<<6), BP_loadflags(%esi)
14315@@ -95,7 +113,60 @@ ENTRY(startup_32)
14316 movl %eax,%es
14317 movl %eax,%fs
14318 movl %eax,%gs
14319+ movl %eax,%ss
14320 2:
14321+ leal -__PAGE_OFFSET(%ecx),%esp
14322+
14323+#ifdef CONFIG_SMP
14324+ movl $pa(cpu_gdt_table),%edi
14325+ movl $__per_cpu_load,%eax
14326+ movw %ax,__KERNEL_PERCPU + 2(%edi)
14327+ rorl $16,%eax
14328+ movb %al,__KERNEL_PERCPU + 4(%edi)
14329+ movb %ah,__KERNEL_PERCPU + 7(%edi)
14330+ movl $__per_cpu_end - 1,%eax
14331+ subl $__per_cpu_start,%eax
14332+ movw %ax,__KERNEL_PERCPU + 0(%edi)
14333+#endif
14334+
14335+#ifdef CONFIG_PAX_MEMORY_UDEREF
14336+ movl $NR_CPUS,%ecx
14337+ movl $pa(cpu_gdt_table),%edi
14338+1:
14339+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
14340+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
14341+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
14342+ addl $PAGE_SIZE_asm,%edi
14343+ loop 1b
14344+#endif
14345+
14346+#ifdef CONFIG_PAX_KERNEXEC
14347+ movl $pa(boot_gdt),%edi
14348+ movl $__LOAD_PHYSICAL_ADDR,%eax
14349+ movw %ax,__BOOT_CS + 2(%edi)
14350+ rorl $16,%eax
14351+ movb %al,__BOOT_CS + 4(%edi)
14352+ movb %ah,__BOOT_CS + 7(%edi)
14353+ rorl $16,%eax
14354+
14355+ ljmp $(__BOOT_CS),$1f
14356+1:
14357+
14358+ movl $NR_CPUS,%ecx
14359+ movl $pa(cpu_gdt_table),%edi
14360+ addl $__PAGE_OFFSET,%eax
14361+1:
14362+ movw %ax,__KERNEL_CS + 2(%edi)
14363+ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi)
14364+ rorl $16,%eax
14365+ movb %al,__KERNEL_CS + 4(%edi)
14366+ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi)
14367+ movb %ah,__KERNEL_CS + 7(%edi)
14368+ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi)
14369+ rorl $16,%eax
14370+ addl $PAGE_SIZE_asm,%edi
14371+ loop 1b
14372+#endif
14373
14374 /*
14375 * Clear BSS first so that there are no surprises...
14376@@ -140,9 +211,7 @@ ENTRY(startup_32)
14377 cmpl $num_subarch_entries, %eax
14378 jae bad_subarch
14379
14380- movl pa(subarch_entries)(,%eax,4), %eax
14381- subl $__PAGE_OFFSET, %eax
14382- jmp *%eax
14383+ jmp *pa(subarch_entries)(,%eax,4)
14384
14385 bad_subarch:
14386 WEAK(lguest_entry)
14387@@ -154,10 +223,10 @@ WEAK(xen_entry)
14388 __INITDATA
14389
14390 subarch_entries:
14391- .long default_entry /* normal x86/PC */
14392- .long lguest_entry /* lguest hypervisor */
14393- .long xen_entry /* Xen hypervisor */
14394- .long default_entry /* Moorestown MID */
14395+ .long ta(default_entry) /* normal x86/PC */
14396+ .long ta(lguest_entry) /* lguest hypervisor */
14397+ .long ta(xen_entry) /* Xen hypervisor */
14398+ .long ta(default_entry) /* Moorestown MID */
14399 num_subarch_entries = (. - subarch_entries) / 4
14400 .previous
14401 #endif /* CONFIG_PARAVIRT */
14402@@ -218,8 +287,11 @@ default_entry:
14403 movl %eax, pa(max_pfn_mapped)
14404
14405 /* Do early initialization of the fixmap area */
14406- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14407- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14408+#ifdef CONFIG_COMPAT_VDSO
14409+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14410+#else
14411+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8)
14412+#endif
14413 #else /* Not PAE */
14414
14415 page_pde_offset = (__PAGE_OFFSET >> 20);
14416@@ -249,8 +321,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
14417 movl %eax, pa(max_pfn_mapped)
14418
14419 /* Do early initialization of the fixmap area */
14420- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax
14421- movl %eax,pa(swapper_pg_dir+0xffc)
14422+#ifdef CONFIG_COMPAT_VDSO
14423+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc)
14424+#else
14425+ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc)
14426+#endif
14427 #endif
14428 jmp 3f
14429 /*
14430@@ -272,6 +347,9 @@ ENTRY(startup_32_smp)
14431 movl %eax,%es
14432 movl %eax,%fs
14433 movl %eax,%gs
14434+ movl pa(stack_start),%ecx
14435+ movl %eax,%ss
14436+ leal -__PAGE_OFFSET(%ecx),%esp
14437 #endif /* CONFIG_SMP */
14438 3:
14439
14440@@ -297,6 +375,7 @@ ENTRY(startup_32_smp)
14441 orl %edx,%eax
14442 movl %eax,%cr4
14443
14444+#ifdef CONFIG_X86_PAE
14445 btl $5, %eax # check if PAE is enabled
14446 jnc 6f
14447
14448@@ -305,6 +384,10 @@ ENTRY(startup_32_smp)
14449 cpuid
14450 cmpl $0x80000000, %eax
14451 jbe 6f
14452+
14453+ /* Clear bogus XD_DISABLE bits */
14454+ call verify_cpu
14455+
14456 mov $0x80000001, %eax
14457 cpuid
14458 /* Execute Disable bit supported? */
14459@@ -312,13 +395,17 @@ ENTRY(startup_32_smp)
14460 jnc 6f
14461
14462 /* Setup EFER (Extended Feature Enable Register) */
14463- movl $0xc0000080, %ecx
14464+ movl $MSR_EFER, %ecx
14465 rdmsr
14466
14467 btsl $11, %eax
14468 /* Make changes effective */
14469 wrmsr
14470
14471+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
14472+ movl $1,pa(nx_enabled)
14473+#endif
14474+
14475 6:
14476
14477 /*
14478@@ -331,8 +418,8 @@ ENTRY(startup_32_smp)
14479 movl %eax,%cr0 /* ..and set paging (PG) bit */
14480 ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
14481 1:
14482- /* Set up the stack pointer */
14483- lss stack_start,%esp
14484+ /* Shift the stack pointer to a virtual address */
14485+ addl $__PAGE_OFFSET, %esp
14486
14487 /*
14488 * Initialize eflags. Some BIOS's leave bits like NT set. This would
14489@@ -344,9 +431,7 @@ ENTRY(startup_32_smp)
14490
14491 #ifdef CONFIG_SMP
14492 cmpb $0, ready
14493- jz 1f /* Initial CPU cleans BSS */
14494- jmp checkCPUtype
14495-1:
14496+ jnz checkCPUtype
14497 #endif /* CONFIG_SMP */
14498
14499 /*
14500@@ -424,7 +509,7 @@ is386: movl $2,%ecx # set MP
14501 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
14502 movl %eax,%ss # after changing gdt.
14503
14504- movl $(__USER_DS),%eax # DS/ES contains default USER segment
14505+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
14506 movl %eax,%ds
14507 movl %eax,%es
14508
14509@@ -438,15 +523,22 @@ is386: movl $2,%ecx # set MP
14510 */
14511 cmpb $0,ready
14512 jne 1f
14513- movl $per_cpu__gdt_page,%eax
14514+ movl $cpu_gdt_table,%eax
14515 movl $per_cpu__stack_canary,%ecx
14516+#ifdef CONFIG_SMP
14517+ addl $__per_cpu_load,%ecx
14518+#endif
14519 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
14520 shrl $16, %ecx
14521 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
14522 movb %ch, 8 * GDT_ENTRY_STACK_CANARY + 7(%eax)
14523 1:
14524-#endif
14525 movl $(__KERNEL_STACK_CANARY),%eax
14526+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
14527+ movl $(__USER_DS),%eax
14528+#else
14529+ xorl %eax,%eax
14530+#endif
14531 movl %eax,%gs
14532
14533 xorl %eax,%eax # Clear LDT
14534@@ -454,14 +546,7 @@ is386: movl $2,%ecx # set MP
14535
14536 cld # gcc2 wants the direction flag cleared at all times
14537 pushl $0 # fake return address for unwinder
14538-#ifdef CONFIG_SMP
14539- movb ready, %cl
14540 movb $1, ready
14541- cmpb $0,%cl # the first CPU calls start_kernel
14542- je 1f
14543- movl (stack_start), %esp
14544-1:
14545-#endif /* CONFIG_SMP */
14546 jmp *(initial_code)
14547
14548 /*
14549@@ -546,22 +631,22 @@ early_page_fault:
14550 jmp early_fault
14551
14552 early_fault:
14553- cld
14554 #ifdef CONFIG_PRINTK
14555+ cmpl $1,%ss:early_recursion_flag
14556+ je hlt_loop
14557+ incl %ss:early_recursion_flag
14558+ cld
14559 pusha
14560 movl $(__KERNEL_DS),%eax
14561 movl %eax,%ds
14562 movl %eax,%es
14563- cmpl $2,early_recursion_flag
14564- je hlt_loop
14565- incl early_recursion_flag
14566 movl %cr2,%eax
14567 pushl %eax
14568 pushl %edx /* trapno */
14569 pushl $fault_msg
14570 call printk
14571+; call dump_stack
14572 #endif
14573- call dump_stack
14574 hlt_loop:
14575 hlt
14576 jmp hlt_loop
14577@@ -569,8 +654,11 @@ hlt_loop:
14578 /* This is the default interrupt "handler" :-) */
14579 ALIGN
14580 ignore_int:
14581- cld
14582 #ifdef CONFIG_PRINTK
14583+ cmpl $2,%ss:early_recursion_flag
14584+ je hlt_loop
14585+ incl %ss:early_recursion_flag
14586+ cld
14587 pushl %eax
14588 pushl %ecx
14589 pushl %edx
14590@@ -579,9 +667,6 @@ ignore_int:
14591 movl $(__KERNEL_DS),%eax
14592 movl %eax,%ds
14593 movl %eax,%es
14594- cmpl $2,early_recursion_flag
14595- je hlt_loop
14596- incl early_recursion_flag
14597 pushl 16(%esp)
14598 pushl 24(%esp)
14599 pushl 32(%esp)
14600@@ -600,6 +685,8 @@ ignore_int:
14601 #endif
14602 iret
14603
14604+#include "verify_cpu.S"
14605+
14606 __REFDATA
14607 .align 4
14608 ENTRY(initial_code)
14609@@ -610,31 +697,47 @@ ENTRY(initial_page_table)
14610 /*
14611 * BSS section
14612 */
14613-__PAGE_ALIGNED_BSS
14614- .align PAGE_SIZE_asm
14615 #ifdef CONFIG_X86_PAE
14616+.section .swapper_pg_pmd,"a",@progbits
14617 swapper_pg_pmd:
14618 .fill 1024*KPMDS,4,0
14619 #else
14620+.section .swapper_pg_dir,"a",@progbits
14621 ENTRY(swapper_pg_dir)
14622 .fill 1024,4,0
14623 #endif
14624+.section .swapper_pg_fixmap,"a",@progbits
14625 swapper_pg_fixmap:
14626 .fill 1024,4,0
14627 #ifdef CONFIG_X86_TRAMPOLINE
14628+.section .trampoline_pg_dir,"a",@progbits
14629 ENTRY(trampoline_pg_dir)
14630+#ifdef CONFIG_X86_PAE
14631+ .fill 4,8,0
14632+#else
14633 .fill 1024,4,0
14634 #endif
14635+#endif
14636+
14637+.section .empty_zero_page,"a",@progbits
14638 ENTRY(empty_zero_page)
14639 .fill 4096,1,0
14640
14641 /*
14642+ * The IDT has to be page-aligned to simplify the Pentium
14643+ * F0 0F bug workaround.. We have a special link segment
14644+ * for this.
14645+ */
14646+.section .idt,"a",@progbits
14647+ENTRY(idt_table)
14648+ .fill 256,8,0
14649+
14650+/*
14651 * This starts the data section.
14652 */
14653 #ifdef CONFIG_X86_PAE
14654-__PAGE_ALIGNED_DATA
14655- /* Page-aligned for the benefit of paravirt? */
14656- .align PAGE_SIZE_asm
14657+.section .swapper_pg_dir,"a",@progbits
14658+
14659 ENTRY(swapper_pg_dir)
14660 .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
14661 # if KPMDS == 3
14662@@ -653,15 +756,24 @@ ENTRY(swapper_pg_dir)
14663 # error "Kernel PMDs should be 1, 2 or 3"
14664 # endif
14665 .align PAGE_SIZE_asm /* needs to be page-sized too */
14666+
14667+#ifdef CONFIG_PAX_PER_CPU_PGD
14668+ENTRY(cpu_pgd)
14669+ .rept NR_CPUS
14670+ .fill 4,8,0
14671+ .endr
14672+#endif
14673+
14674 #endif
14675
14676 .data
14677+.balign 4
14678 ENTRY(stack_start)
14679- .long init_thread_union+THREAD_SIZE
14680- .long __BOOT_DS
14681+ .long init_thread_union+THREAD_SIZE-8
14682
14683 ready: .byte 0
14684
14685+.section .rodata,"a",@progbits
14686 early_recursion_flag:
14687 .long 0
14688
14689@@ -697,7 +809,7 @@ fault_msg:
14690 .word 0 # 32 bit align gdt_desc.address
14691 boot_gdt_descr:
14692 .word __BOOT_DS+7
14693- .long boot_gdt - __PAGE_OFFSET
14694+ .long pa(boot_gdt)
14695
14696 .word 0 # 32-bit align idt_desc.address
14697 idt_descr:
14698@@ -708,7 +820,7 @@ idt_descr:
14699 .word 0 # 32 bit align gdt_desc.address
14700 ENTRY(early_gdt_descr)
14701 .word GDT_ENTRIES*8-1
14702- .long per_cpu__gdt_page /* Overwritten for secondary CPUs */
14703+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
14704
14705 /*
14706 * The boot_gdt must mirror the equivalent in setup.S and is
14707@@ -717,5 +829,65 @@ ENTRY(early_gdt_descr)
14708 .align L1_CACHE_BYTES
14709 ENTRY(boot_gdt)
14710 .fill GDT_ENTRY_BOOT_CS,8,0
14711- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
14712- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
14713+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
14714+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
14715+
14716+ .align PAGE_SIZE_asm
14717+ENTRY(cpu_gdt_table)
14718+ .rept NR_CPUS
14719+ .quad 0x0000000000000000 /* NULL descriptor */
14720+ .quad 0x0000000000000000 /* 0x0b reserved */
14721+ .quad 0x0000000000000000 /* 0x13 reserved */
14722+ .quad 0x0000000000000000 /* 0x1b reserved */
14723+
14724+#ifdef CONFIG_PAX_KERNEXEC
14725+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
14726+#else
14727+ .quad 0x0000000000000000 /* 0x20 unused */
14728+#endif
14729+
14730+ .quad 0x0000000000000000 /* 0x28 unused */
14731+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
14732+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
14733+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
14734+ .quad 0x0000000000000000 /* 0x4b reserved */
14735+ .quad 0x0000000000000000 /* 0x53 reserved */
14736+ .quad 0x0000000000000000 /* 0x5b reserved */
14737+
14738+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
14739+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
14740+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
14741+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
14742+
14743+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
14744+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
14745+
14746+ /*
14747+ * Segments used for calling PnP BIOS have byte granularity.
14748+ * The code segments and data segments have fixed 64k limits,
14749+ * the transfer segment sizes are set at run time.
14750+ */
14751+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
14752+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
14753+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
14754+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
14755+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
14756+
14757+ /*
14758+ * The APM segments have byte granularity and their bases
14759+ * are set at run time. All have 64k limits.
14760+ */
14761+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
14762+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
14763+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
14764+
14765+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
14766+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
14767+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
14768+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
14769+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
14770+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
14771+
14772+ /* Be sure this is zeroed to avoid false validations in Xen */
14773+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
14774+ .endr
14775diff -urNp linux-2.6.32.43/arch/x86/kernel/head_64.S linux-2.6.32.43/arch/x86/kernel/head_64.S
14776--- linux-2.6.32.43/arch/x86/kernel/head_64.S 2011-03-27 14:31:47.000000000 -0400
14777+++ linux-2.6.32.43/arch/x86/kernel/head_64.S 2011-04-17 15:56:46.000000000 -0400
14778@@ -19,6 +19,7 @@
14779 #include <asm/cache.h>
14780 #include <asm/processor-flags.h>
14781 #include <asm/percpu.h>
14782+#include <asm/cpufeature.h>
14783
14784 #ifdef CONFIG_PARAVIRT
14785 #include <asm/asm-offsets.h>
14786@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET
14787 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
14788 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
14789 L3_START_KERNEL = pud_index(__START_KERNEL_map)
14790+L4_VMALLOC_START = pgd_index(VMALLOC_START)
14791+L3_VMALLOC_START = pud_index(VMALLOC_START)
14792+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
14793+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
14794
14795 .text
14796 __HEAD
14797@@ -85,35 +90,22 @@ startup_64:
14798 */
14799 addq %rbp, init_level4_pgt + 0(%rip)
14800 addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
14801+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
14802+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
14803 addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
14804
14805 addq %rbp, level3_ident_pgt + 0(%rip)
14806+#ifndef CONFIG_XEN
14807+ addq %rbp, level3_ident_pgt + 8(%rip)
14808+#endif
14809
14810- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
14811- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
14812+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
14813
14814- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14815+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
14816+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip)
14817
14818- /* Add an Identity mapping if I am above 1G */
14819- leaq _text(%rip), %rdi
14820- andq $PMD_PAGE_MASK, %rdi
14821-
14822- movq %rdi, %rax
14823- shrq $PUD_SHIFT, %rax
14824- andq $(PTRS_PER_PUD - 1), %rax
14825- jz ident_complete
14826-
14827- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx
14828- leaq level3_ident_pgt(%rip), %rbx
14829- movq %rdx, 0(%rbx, %rax, 8)
14830-
14831- movq %rdi, %rax
14832- shrq $PMD_SHIFT, %rax
14833- andq $(PTRS_PER_PMD - 1), %rax
14834- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx
14835- leaq level2_spare_pgt(%rip), %rbx
14836- movq %rdx, 0(%rbx, %rax, 8)
14837-ident_complete:
14838+ addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
14839+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
14840
14841 /*
14842 * Fixup the kernel text+data virtual addresses. Note that
14843@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64)
14844 * after the boot processor executes this code.
14845 */
14846
14847- /* Enable PAE mode and PGE */
14848- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax
14849+ /* Enable PAE mode and PSE/PGE */
14850+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
14851 movq %rax, %cr4
14852
14853 /* Setup early boot stage 4 level pagetables. */
14854@@ -184,9 +176,13 @@ ENTRY(secondary_startup_64)
14855 movl $MSR_EFER, %ecx
14856 rdmsr
14857 btsl $_EFER_SCE, %eax /* Enable System Call */
14858- btl $20,%edi /* No Execute supported? */
14859+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
14860 jnc 1f
14861 btsl $_EFER_NX, %eax
14862+ leaq init_level4_pgt(%rip), %rdi
14863+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
14864+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
14865+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
14866 1: wrmsr /* Make changes effective */
14867
14868 /* Setup cr0 */
14869@@ -262,16 +258,16 @@ ENTRY(secondary_startup_64)
14870 .quad x86_64_start_kernel
14871 ENTRY(initial_gs)
14872 .quad INIT_PER_CPU_VAR(irq_stack_union)
14873- __FINITDATA
14874
14875 ENTRY(stack_start)
14876 .quad init_thread_union+THREAD_SIZE-8
14877 .word 0
14878+ __FINITDATA
14879
14880 bad_address:
14881 jmp bad_address
14882
14883- .section ".init.text","ax"
14884+ __INIT
14885 #ifdef CONFIG_EARLY_PRINTK
14886 .globl early_idt_handlers
14887 early_idt_handlers:
14888@@ -316,18 +312,23 @@ ENTRY(early_idt_handler)
14889 #endif /* EARLY_PRINTK */
14890 1: hlt
14891 jmp 1b
14892+ .previous
14893
14894 #ifdef CONFIG_EARLY_PRINTK
14895+ __INITDATA
14896 early_recursion_flag:
14897 .long 0
14898+ .previous
14899
14900+ .section .rodata,"a",@progbits
14901 early_idt_msg:
14902 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
14903 early_idt_ripmsg:
14904 .asciz "RIP %s\n"
14905-#endif /* CONFIG_EARLY_PRINTK */
14906 .previous
14907+#endif /* CONFIG_EARLY_PRINTK */
14908
14909+ .section .rodata,"a",@progbits
14910 #define NEXT_PAGE(name) \
14911 .balign PAGE_SIZE; \
14912 ENTRY(name)
14913@@ -350,13 +351,36 @@ NEXT_PAGE(init_level4_pgt)
14914 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14915 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
14916 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14917+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
14918+ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE
14919+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
14920+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14921 .org init_level4_pgt + L4_START_KERNEL*8, 0
14922 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
14923 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
14924
14925+#ifdef CONFIG_PAX_PER_CPU_PGD
14926+NEXT_PAGE(cpu_pgd)
14927+ .rept NR_CPUS
14928+ .fill 512,8,0
14929+ .endr
14930+#endif
14931+
14932 NEXT_PAGE(level3_ident_pgt)
14933 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
14934+#ifdef CONFIG_XEN
14935 .fill 511,8,0
14936+#else
14937+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
14938+ .fill 510,8,0
14939+#endif
14940+
14941+NEXT_PAGE(level3_vmalloc_pgt)
14942+ .fill 512,8,0
14943+
14944+NEXT_PAGE(level3_vmemmap_pgt)
14945+ .fill L3_VMEMMAP_START,8,0
14946+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
14947
14948 NEXT_PAGE(level3_kernel_pgt)
14949 .fill L3_START_KERNEL,8,0
14950@@ -364,20 +388,23 @@ NEXT_PAGE(level3_kernel_pgt)
14951 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
14952 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14953
14954+NEXT_PAGE(level2_vmemmap_pgt)
14955+ .fill 512,8,0
14956+
14957 NEXT_PAGE(level2_fixmap_pgt)
14958- .fill 506,8,0
14959- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
14960- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
14961- .fill 5,8,0
14962+ .fill 507,8,0
14963+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
14964+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
14965+ .fill 4,8,0
14966
14967-NEXT_PAGE(level1_fixmap_pgt)
14968+NEXT_PAGE(level1_vsyscall_pgt)
14969 .fill 512,8,0
14970
14971-NEXT_PAGE(level2_ident_pgt)
14972- /* Since I easily can, map the first 1G.
14973+ /* Since I easily can, map the first 2G.
14974 * Don't set NX because code runs from these pages.
14975 */
14976- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
14977+NEXT_PAGE(level2_ident_pgt)
14978+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
14979
14980 NEXT_PAGE(level2_kernel_pgt)
14981 /*
14982@@ -390,33 +417,55 @@ NEXT_PAGE(level2_kernel_pgt)
14983 * If you want to increase this then increase MODULES_VADDR
14984 * too.)
14985 */
14986- PMDS(0, __PAGE_KERNEL_LARGE_EXEC,
14987- KERNEL_IMAGE_SIZE/PMD_SIZE)
14988-
14989-NEXT_PAGE(level2_spare_pgt)
14990- .fill 512, 8, 0
14991+ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE)
14992
14993 #undef PMDS
14994 #undef NEXT_PAGE
14995
14996- .data
14997+ .align PAGE_SIZE
14998+ENTRY(cpu_gdt_table)
14999+ .rept NR_CPUS
15000+ .quad 0x0000000000000000 /* NULL descriptor */
15001+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
15002+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
15003+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
15004+ .quad 0x00cffb000000ffff /* __USER32_CS */
15005+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
15006+ .quad 0x00affb000000ffff /* __USER_CS */
15007+
15008+#ifdef CONFIG_PAX_KERNEXEC
15009+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
15010+#else
15011+ .quad 0x0 /* unused */
15012+#endif
15013+
15014+ .quad 0,0 /* TSS */
15015+ .quad 0,0 /* LDT */
15016+ .quad 0,0,0 /* three TLS descriptors */
15017+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
15018+ /* asm/segment.h:GDT_ENTRIES must match this */
15019+
15020+ /* zero the remaining page */
15021+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
15022+ .endr
15023+
15024 .align 16
15025 .globl early_gdt_descr
15026 early_gdt_descr:
15027 .word GDT_ENTRIES*8-1
15028 early_gdt_descr_base:
15029- .quad INIT_PER_CPU_VAR(gdt_page)
15030+ .quad cpu_gdt_table
15031
15032 ENTRY(phys_base)
15033 /* This must match the first entry in level2_kernel_pgt */
15034 .quad 0x0000000000000000
15035
15036 #include "../../x86/xen/xen-head.S"
15037-
15038- .section .bss, "aw", @nobits
15039+
15040+ .section .rodata,"a",@progbits
15041 .align L1_CACHE_BYTES
15042 ENTRY(idt_table)
15043- .skip IDT_ENTRIES * 16
15044+ .fill 512,8,0
15045
15046 __PAGE_ALIGNED_BSS
15047 .align PAGE_SIZE
15048diff -urNp linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c
15049--- linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c 2011-03-27 14:31:47.000000000 -0400
15050+++ linux-2.6.32.43/arch/x86/kernel/i386_ksyms_32.c 2011-04-17 15:56:46.000000000 -0400
15051@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
15052 EXPORT_SYMBOL(cmpxchg8b_emu);
15053 #endif
15054
15055+EXPORT_SYMBOL_GPL(cpu_gdt_table);
15056+
15057 /* Networking helper routines. */
15058 EXPORT_SYMBOL(csum_partial_copy_generic);
15059+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
15060+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
15061
15062 EXPORT_SYMBOL(__get_user_1);
15063 EXPORT_SYMBOL(__get_user_2);
15064@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr);
15065
15066 EXPORT_SYMBOL(csum_partial);
15067 EXPORT_SYMBOL(empty_zero_page);
15068+
15069+#ifdef CONFIG_PAX_KERNEXEC
15070+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
15071+#endif
15072diff -urNp linux-2.6.32.43/arch/x86/kernel/i8259.c linux-2.6.32.43/arch/x86/kernel/i8259.c
15073--- linux-2.6.32.43/arch/x86/kernel/i8259.c 2011-03-27 14:31:47.000000000 -0400
15074+++ linux-2.6.32.43/arch/x86/kernel/i8259.c 2011-05-04 17:56:28.000000000 -0400
15075@@ -208,7 +208,7 @@ spurious_8259A_irq:
15076 "spurious 8259A interrupt: IRQ%d.\n", irq);
15077 spurious_irq_mask |= irqmask;
15078 }
15079- atomic_inc(&irq_err_count);
15080+ atomic_inc_unchecked(&irq_err_count);
15081 /*
15082 * Theoretically we do not have to handle this IRQ,
15083 * but in Linux this does not cause problems and is
15084diff -urNp linux-2.6.32.43/arch/x86/kernel/init_task.c linux-2.6.32.43/arch/x86/kernel/init_task.c
15085--- linux-2.6.32.43/arch/x86/kernel/init_task.c 2011-03-27 14:31:47.000000000 -0400
15086+++ linux-2.6.32.43/arch/x86/kernel/init_task.c 2011-04-17 15:56:46.000000000 -0400
15087@@ -20,8 +20,7 @@ static struct sighand_struct init_sighan
15088 * way process stacks are handled. This is done by having a special
15089 * "init_task" linker map entry..
15090 */
15091-union thread_union init_thread_union __init_task_data =
15092- { INIT_THREAD_INFO(init_task) };
15093+union thread_union init_thread_union __init_task_data;
15094
15095 /*
15096 * Initial task structure.
15097@@ -38,5 +37,5 @@ EXPORT_SYMBOL(init_task);
15098 * section. Since TSS's are completely CPU-local, we want them
15099 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
15100 */
15101-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
15102-
15103+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
15104+EXPORT_SYMBOL(init_tss);
15105diff -urNp linux-2.6.32.43/arch/x86/kernel/ioport.c linux-2.6.32.43/arch/x86/kernel/ioport.c
15106--- linux-2.6.32.43/arch/x86/kernel/ioport.c 2011-03-27 14:31:47.000000000 -0400
15107+++ linux-2.6.32.43/arch/x86/kernel/ioport.c 2011-04-17 15:56:46.000000000 -0400
15108@@ -6,6 +6,7 @@
15109 #include <linux/sched.h>
15110 #include <linux/kernel.h>
15111 #include <linux/capability.h>
15112+#include <linux/security.h>
15113 #include <linux/errno.h>
15114 #include <linux/types.h>
15115 #include <linux/ioport.h>
15116@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long
15117
15118 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
15119 return -EINVAL;
15120+#ifdef CONFIG_GRKERNSEC_IO
15121+ if (turn_on && grsec_disable_privio) {
15122+ gr_handle_ioperm();
15123+ return -EPERM;
15124+ }
15125+#endif
15126 if (turn_on && !capable(CAP_SYS_RAWIO))
15127 return -EPERM;
15128
15129@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long
15130 * because the ->io_bitmap_max value must match the bitmap
15131 * contents:
15132 */
15133- tss = &per_cpu(init_tss, get_cpu());
15134+ tss = init_tss + get_cpu();
15135
15136 set_bitmap(t->io_bitmap_ptr, from, num, !turn_on);
15137
15138@@ -111,6 +118,12 @@ static int do_iopl(unsigned int level, s
15139 return -EINVAL;
15140 /* Trying to gain more privileges? */
15141 if (level > old) {
15142+#ifdef CONFIG_GRKERNSEC_IO
15143+ if (grsec_disable_privio) {
15144+ gr_handle_iopl();
15145+ return -EPERM;
15146+ }
15147+#endif
15148 if (!capable(CAP_SYS_RAWIO))
15149 return -EPERM;
15150 }
15151diff -urNp linux-2.6.32.43/arch/x86/kernel/irq_32.c linux-2.6.32.43/arch/x86/kernel/irq_32.c
15152--- linux-2.6.32.43/arch/x86/kernel/irq_32.c 2011-03-27 14:31:47.000000000 -0400
15153+++ linux-2.6.32.43/arch/x86/kernel/irq_32.c 2011-07-06 19:53:33.000000000 -0400
15154@@ -35,7 +35,7 @@ static int check_stack_overflow(void)
15155 __asm__ __volatile__("andl %%esp,%0" :
15156 "=r" (sp) : "0" (THREAD_SIZE - 1));
15157
15158- return sp < (sizeof(struct thread_info) + STACK_WARN);
15159+ return sp < STACK_WARN;
15160 }
15161
15162 static void print_stack_overflow(void)
15163@@ -54,9 +54,9 @@ static inline void print_stack_overflow(
15164 * per-CPU IRQ handling contexts (thread information and stack)
15165 */
15166 union irq_ctx {
15167- struct thread_info tinfo;
15168- u32 stack[THREAD_SIZE/sizeof(u32)];
15169-} __attribute__((aligned(PAGE_SIZE)));
15170+ unsigned long previous_esp;
15171+ u32 stack[THREAD_SIZE/sizeof(u32)];
15172+} __attribute__((aligned(THREAD_SIZE)));
15173
15174 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
15175 static DEFINE_PER_CPU(union irq_ctx *, softirq_ctx);
15176@@ -78,10 +78,9 @@ static void call_on_stack(void *func, vo
15177 static inline int
15178 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
15179 {
15180- union irq_ctx *curctx, *irqctx;
15181+ union irq_ctx *irqctx;
15182 u32 *isp, arg1, arg2;
15183
15184- curctx = (union irq_ctx *) current_thread_info();
15185 irqctx = __get_cpu_var(hardirq_ctx);
15186
15187 /*
15188@@ -90,21 +89,16 @@ execute_on_irq_stack(int overflow, struc
15189 * handler) we can't do that and just have to keep using the
15190 * current stack (which is the irq stack already after all)
15191 */
15192- if (unlikely(curctx == irqctx))
15193+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
15194 return 0;
15195
15196 /* build the stack frame on the IRQ stack */
15197- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15198- irqctx->tinfo.task = curctx->tinfo.task;
15199- irqctx->tinfo.previous_esp = current_stack_pointer;
15200+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15201+ irqctx->previous_esp = current_stack_pointer;
15202
15203- /*
15204- * Copy the softirq bits in preempt_count so that the
15205- * softirq checks work in the hardirq context.
15206- */
15207- irqctx->tinfo.preempt_count =
15208- (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
15209- (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
15210+#ifdef CONFIG_PAX_MEMORY_UDEREF
15211+ __set_fs(MAKE_MM_SEG(0));
15212+#endif
15213
15214 if (unlikely(overflow))
15215 call_on_stack(print_stack_overflow, isp);
15216@@ -116,6 +110,11 @@ execute_on_irq_stack(int overflow, struc
15217 : "0" (irq), "1" (desc), "2" (isp),
15218 "D" (desc->handle_irq)
15219 : "memory", "cc", "ecx");
15220+
15221+#ifdef CONFIG_PAX_MEMORY_UDEREF
15222+ __set_fs(current_thread_info()->addr_limit);
15223+#endif
15224+
15225 return 1;
15226 }
15227
15228@@ -124,28 +123,11 @@ execute_on_irq_stack(int overflow, struc
15229 */
15230 void __cpuinit irq_ctx_init(int cpu)
15231 {
15232- union irq_ctx *irqctx;
15233-
15234 if (per_cpu(hardirq_ctx, cpu))
15235 return;
15236
15237- irqctx = &per_cpu(hardirq_stack, cpu);
15238- irqctx->tinfo.task = NULL;
15239- irqctx->tinfo.exec_domain = NULL;
15240- irqctx->tinfo.cpu = cpu;
15241- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
15242- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15243-
15244- per_cpu(hardirq_ctx, cpu) = irqctx;
15245-
15246- irqctx = &per_cpu(softirq_stack, cpu);
15247- irqctx->tinfo.task = NULL;
15248- irqctx->tinfo.exec_domain = NULL;
15249- irqctx->tinfo.cpu = cpu;
15250- irqctx->tinfo.preempt_count = 0;
15251- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
15252-
15253- per_cpu(softirq_ctx, cpu) = irqctx;
15254+ per_cpu(hardirq_ctx, cpu) = &per_cpu(hardirq_stack, cpu);
15255+ per_cpu(softirq_ctx, cpu) = &per_cpu(softirq_stack, cpu);
15256
15257 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
15258 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
15259@@ -159,7 +141,6 @@ void irq_ctx_exit(int cpu)
15260 asmlinkage void do_softirq(void)
15261 {
15262 unsigned long flags;
15263- struct thread_info *curctx;
15264 union irq_ctx *irqctx;
15265 u32 *isp;
15266
15267@@ -169,15 +150,22 @@ asmlinkage void do_softirq(void)
15268 local_irq_save(flags);
15269
15270 if (local_softirq_pending()) {
15271- curctx = current_thread_info();
15272 irqctx = __get_cpu_var(softirq_ctx);
15273- irqctx->tinfo.task = curctx->task;
15274- irqctx->tinfo.previous_esp = current_stack_pointer;
15275+ irqctx->previous_esp = current_stack_pointer;
15276
15277 /* build the stack frame on the softirq stack */
15278- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
15279+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
15280+
15281+#ifdef CONFIG_PAX_MEMORY_UDEREF
15282+ __set_fs(MAKE_MM_SEG(0));
15283+#endif
15284
15285 call_on_stack(__do_softirq, isp);
15286+
15287+#ifdef CONFIG_PAX_MEMORY_UDEREF
15288+ __set_fs(current_thread_info()->addr_limit);
15289+#endif
15290+
15291 /*
15292 * Shouldnt happen, we returned above if in_interrupt():
15293 */
15294diff -urNp linux-2.6.32.43/arch/x86/kernel/irq.c linux-2.6.32.43/arch/x86/kernel/irq.c
15295--- linux-2.6.32.43/arch/x86/kernel/irq.c 2011-03-27 14:31:47.000000000 -0400
15296+++ linux-2.6.32.43/arch/x86/kernel/irq.c 2011-05-04 17:56:28.000000000 -0400
15297@@ -15,7 +15,7 @@
15298 #include <asm/mce.h>
15299 #include <asm/hw_irq.h>
15300
15301-atomic_t irq_err_count;
15302+atomic_unchecked_t irq_err_count;
15303
15304 /* Function pointer for generic interrupt vector handling */
15305 void (*generic_interrupt_extension)(void) = NULL;
15306@@ -114,9 +114,9 @@ static int show_other_interrupts(struct
15307 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
15308 seq_printf(p, " Machine check polls\n");
15309 #endif
15310- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
15311+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
15312 #if defined(CONFIG_X86_IO_APIC)
15313- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
15314+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
15315 #endif
15316 return 0;
15317 }
15318@@ -209,10 +209,10 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
15319
15320 u64 arch_irq_stat(void)
15321 {
15322- u64 sum = atomic_read(&irq_err_count);
15323+ u64 sum = atomic_read_unchecked(&irq_err_count);
15324
15325 #ifdef CONFIG_X86_IO_APIC
15326- sum += atomic_read(&irq_mis_count);
15327+ sum += atomic_read_unchecked(&irq_mis_count);
15328 #endif
15329 return sum;
15330 }
15331diff -urNp linux-2.6.32.43/arch/x86/kernel/kgdb.c linux-2.6.32.43/arch/x86/kernel/kgdb.c
15332--- linux-2.6.32.43/arch/x86/kernel/kgdb.c 2011-03-27 14:31:47.000000000 -0400
15333+++ linux-2.6.32.43/arch/x86/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
15334@@ -390,13 +390,13 @@ int kgdb_arch_handle_exception(int e_vec
15335
15336 /* clear the trace bit */
15337 linux_regs->flags &= ~X86_EFLAGS_TF;
15338- atomic_set(&kgdb_cpu_doing_single_step, -1);
15339+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
15340
15341 /* set the trace bit if we're stepping */
15342 if (remcomInBuffer[0] == 's') {
15343 linux_regs->flags |= X86_EFLAGS_TF;
15344 kgdb_single_step = 1;
15345- atomic_set(&kgdb_cpu_doing_single_step,
15346+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
15347 raw_smp_processor_id());
15348 }
15349
15350@@ -476,7 +476,7 @@ static int __kgdb_notify(struct die_args
15351 break;
15352
15353 case DIE_DEBUG:
15354- if (atomic_read(&kgdb_cpu_doing_single_step) ==
15355+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) ==
15356 raw_smp_processor_id()) {
15357 if (user_mode(regs))
15358 return single_step_cont(regs, args);
15359@@ -573,7 +573,7 @@ unsigned long kgdb_arch_pc(int exception
15360 return instruction_pointer(regs);
15361 }
15362
15363-struct kgdb_arch arch_kgdb_ops = {
15364+const struct kgdb_arch arch_kgdb_ops = {
15365 /* Breakpoint instruction: */
15366 .gdb_bpt_instr = { 0xcc },
15367 .flags = KGDB_HW_BREAKPOINT,
15368diff -urNp linux-2.6.32.43/arch/x86/kernel/kprobes.c linux-2.6.32.43/arch/x86/kernel/kprobes.c
15369--- linux-2.6.32.43/arch/x86/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
15370+++ linux-2.6.32.43/arch/x86/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
15371@@ -166,9 +166,13 @@ static void __kprobes set_jmp_op(void *f
15372 char op;
15373 s32 raddr;
15374 } __attribute__((packed)) * jop;
15375- jop = (struct __arch_jmp_op *)from;
15376+
15377+ jop = (struct __arch_jmp_op *)(ktla_ktva(from));
15378+
15379+ pax_open_kernel();
15380 jop->raddr = (s32)((long)(to) - ((long)(from) + 5));
15381 jop->op = RELATIVEJUMP_INSTRUCTION;
15382+ pax_close_kernel();
15383 }
15384
15385 /*
15386@@ -193,7 +197,7 @@ static int __kprobes can_boost(kprobe_op
15387 kprobe_opcode_t opcode;
15388 kprobe_opcode_t *orig_opcodes = opcodes;
15389
15390- if (search_exception_tables((unsigned long)opcodes))
15391+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
15392 return 0; /* Page fault may occur on this address. */
15393
15394 retry:
15395@@ -337,7 +341,9 @@ static void __kprobes fix_riprel(struct
15396 disp = (u8 *) p->addr + *((s32 *) insn) -
15397 (u8 *) p->ainsn.insn;
15398 BUG_ON((s64) (s32) disp != disp); /* Sanity check. */
15399+ pax_open_kernel();
15400 *(s32 *)insn = (s32) disp;
15401+ pax_close_kernel();
15402 }
15403 }
15404 #endif
15405@@ -345,16 +351,18 @@ static void __kprobes fix_riprel(struct
15406
15407 static void __kprobes arch_copy_kprobe(struct kprobe *p)
15408 {
15409- memcpy(p->ainsn.insn, p->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15410+ pax_open_kernel();
15411+ memcpy(p->ainsn.insn, ktla_ktva(p->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
15412+ pax_close_kernel();
15413
15414 fix_riprel(p);
15415
15416- if (can_boost(p->addr))
15417+ if (can_boost(ktla_ktva(p->addr)))
15418 p->ainsn.boostable = 0;
15419 else
15420 p->ainsn.boostable = -1;
15421
15422- p->opcode = *p->addr;
15423+ p->opcode = *(ktla_ktva(p->addr));
15424 }
15425
15426 int __kprobes arch_prepare_kprobe(struct kprobe *p)
15427@@ -432,7 +440,7 @@ static void __kprobes prepare_singlestep
15428 if (p->opcode == BREAKPOINT_INSTRUCTION)
15429 regs->ip = (unsigned long)p->addr;
15430 else
15431- regs->ip = (unsigned long)p->ainsn.insn;
15432+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15433 }
15434
15435 void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri,
15436@@ -453,7 +461,7 @@ static void __kprobes setup_singlestep(s
15437 if (p->ainsn.boostable == 1 && !p->post_handler) {
15438 /* Boost up -- we can execute copied instructions directly */
15439 reset_current_kprobe();
15440- regs->ip = (unsigned long)p->ainsn.insn;
15441+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
15442 preempt_enable_no_resched();
15443 return;
15444 }
15445@@ -523,7 +531,7 @@ static int __kprobes kprobe_handler(stru
15446 struct kprobe_ctlblk *kcb;
15447
15448 addr = (kprobe_opcode_t *)(regs->ip - sizeof(kprobe_opcode_t));
15449- if (*addr != BREAKPOINT_INSTRUCTION) {
15450+ if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
15451 /*
15452 * The breakpoint instruction was removed right
15453 * after we hit it. Another cpu has removed
15454@@ -775,7 +783,7 @@ static void __kprobes resume_execution(s
15455 struct pt_regs *regs, struct kprobe_ctlblk *kcb)
15456 {
15457 unsigned long *tos = stack_addr(regs);
15458- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
15459+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
15460 unsigned long orig_ip = (unsigned long)p->addr;
15461 kprobe_opcode_t *insn = p->ainsn.insn;
15462
15463@@ -958,7 +966,7 @@ int __kprobes kprobe_exceptions_notify(s
15464 struct die_args *args = data;
15465 int ret = NOTIFY_DONE;
15466
15467- if (args->regs && user_mode_vm(args->regs))
15468+ if (args->regs && user_mode(args->regs))
15469 return ret;
15470
15471 switch (val) {
15472diff -urNp linux-2.6.32.43/arch/x86/kernel/ldt.c linux-2.6.32.43/arch/x86/kernel/ldt.c
15473--- linux-2.6.32.43/arch/x86/kernel/ldt.c 2011-03-27 14:31:47.000000000 -0400
15474+++ linux-2.6.32.43/arch/x86/kernel/ldt.c 2011-04-17 15:56:46.000000000 -0400
15475@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, i
15476 if (reload) {
15477 #ifdef CONFIG_SMP
15478 preempt_disable();
15479- load_LDT(pc);
15480+ load_LDT_nolock(pc);
15481 if (!cpumask_equal(mm_cpumask(current->mm),
15482 cpumask_of(smp_processor_id())))
15483 smp_call_function(flush_ldt, current->mm, 1);
15484 preempt_enable();
15485 #else
15486- load_LDT(pc);
15487+ load_LDT_nolock(pc);
15488 #endif
15489 }
15490 if (oldsize) {
15491@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t
15492 return err;
15493
15494 for (i = 0; i < old->size; i++)
15495- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
15496+ write_ldt_entry(new->ldt, i, old->ldt + i);
15497 return 0;
15498 }
15499
15500@@ -115,6 +115,24 @@ int init_new_context(struct task_struct
15501 retval = copy_ldt(&mm->context, &old_mm->context);
15502 mutex_unlock(&old_mm->context.lock);
15503 }
15504+
15505+ if (tsk == current) {
15506+ mm->context.vdso = 0;
15507+
15508+#ifdef CONFIG_X86_32
15509+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
15510+ mm->context.user_cs_base = 0UL;
15511+ mm->context.user_cs_limit = ~0UL;
15512+
15513+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
15514+ cpus_clear(mm->context.cpu_user_cs_mask);
15515+#endif
15516+
15517+#endif
15518+#endif
15519+
15520+ }
15521+
15522 return retval;
15523 }
15524
15525@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, u
15526 }
15527 }
15528
15529+#ifdef CONFIG_PAX_SEGMEXEC
15530+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
15531+ error = -EINVAL;
15532+ goto out_unlock;
15533+ }
15534+#endif
15535+
15536 fill_ldt(&ldt, &ldt_info);
15537 if (oldmode)
15538 ldt.avl = 0;
15539diff -urNp linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c
15540--- linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c 2011-03-27 14:31:47.000000000 -0400
15541+++ linux-2.6.32.43/arch/x86/kernel/machine_kexec_32.c 2011-04-17 15:56:46.000000000 -0400
15542@@ -26,7 +26,7 @@
15543 #include <asm/system.h>
15544 #include <asm/cacheflush.h>
15545
15546-static void set_idt(void *newidt, __u16 limit)
15547+static void set_idt(struct desc_struct *newidt, __u16 limit)
15548 {
15549 struct desc_ptr curidt;
15550
15551@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16
15552 }
15553
15554
15555-static void set_gdt(void *newgdt, __u16 limit)
15556+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
15557 {
15558 struct desc_ptr curgdt;
15559
15560@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image)
15561 }
15562
15563 control_page = page_address(image->control_code_page);
15564- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
15565+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
15566
15567 relocate_kernel_ptr = control_page;
15568 page_list[PA_CONTROL_PAGE] = __pa(control_page);
15569diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_amd.c linux-2.6.32.43/arch/x86/kernel/microcode_amd.c
15570--- linux-2.6.32.43/arch/x86/kernel/microcode_amd.c 2011-04-17 17:00:52.000000000 -0400
15571+++ linux-2.6.32.43/arch/x86/kernel/microcode_amd.c 2011-04-17 17:03:05.000000000 -0400
15572@@ -364,7 +364,7 @@ static void microcode_fini_cpu_amd(int c
15573 uci->mc = NULL;
15574 }
15575
15576-static struct microcode_ops microcode_amd_ops = {
15577+static const struct microcode_ops microcode_amd_ops = {
15578 .request_microcode_user = request_microcode_user,
15579 .request_microcode_fw = request_microcode_fw,
15580 .collect_cpu_info = collect_cpu_info_amd,
15581@@ -372,7 +372,7 @@ static struct microcode_ops microcode_am
15582 .microcode_fini_cpu = microcode_fini_cpu_amd,
15583 };
15584
15585-struct microcode_ops * __init init_amd_microcode(void)
15586+const struct microcode_ops * __init init_amd_microcode(void)
15587 {
15588 return &microcode_amd_ops;
15589 }
15590diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_core.c linux-2.6.32.43/arch/x86/kernel/microcode_core.c
15591--- linux-2.6.32.43/arch/x86/kernel/microcode_core.c 2011-03-27 14:31:47.000000000 -0400
15592+++ linux-2.6.32.43/arch/x86/kernel/microcode_core.c 2011-04-17 15:56:46.000000000 -0400
15593@@ -90,7 +90,7 @@ MODULE_LICENSE("GPL");
15594
15595 #define MICROCODE_VERSION "2.00"
15596
15597-static struct microcode_ops *microcode_ops;
15598+static const struct microcode_ops *microcode_ops;
15599
15600 /*
15601 * Synchronization.
15602diff -urNp linux-2.6.32.43/arch/x86/kernel/microcode_intel.c linux-2.6.32.43/arch/x86/kernel/microcode_intel.c
15603--- linux-2.6.32.43/arch/x86/kernel/microcode_intel.c 2011-03-27 14:31:47.000000000 -0400
15604+++ linux-2.6.32.43/arch/x86/kernel/microcode_intel.c 2011-04-17 15:56:46.000000000 -0400
15605@@ -443,13 +443,13 @@ static enum ucode_state request_microcod
15606
15607 static int get_ucode_user(void *to, const void *from, size_t n)
15608 {
15609- return copy_from_user(to, from, n);
15610+ return copy_from_user(to, (__force const void __user *)from, n);
15611 }
15612
15613 static enum ucode_state
15614 request_microcode_user(int cpu, const void __user *buf, size_t size)
15615 {
15616- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
15617+ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user);
15618 }
15619
15620 static void microcode_fini_cpu(int cpu)
15621@@ -460,7 +460,7 @@ static void microcode_fini_cpu(int cpu)
15622 uci->mc = NULL;
15623 }
15624
15625-static struct microcode_ops microcode_intel_ops = {
15626+static const struct microcode_ops microcode_intel_ops = {
15627 .request_microcode_user = request_microcode_user,
15628 .request_microcode_fw = request_microcode_fw,
15629 .collect_cpu_info = collect_cpu_info,
15630@@ -468,7 +468,7 @@ static struct microcode_ops microcode_in
15631 .microcode_fini_cpu = microcode_fini_cpu,
15632 };
15633
15634-struct microcode_ops * __init init_intel_microcode(void)
15635+const struct microcode_ops * __init init_intel_microcode(void)
15636 {
15637 return &microcode_intel_ops;
15638 }
15639diff -urNp linux-2.6.32.43/arch/x86/kernel/module.c linux-2.6.32.43/arch/x86/kernel/module.c
15640--- linux-2.6.32.43/arch/x86/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
15641+++ linux-2.6.32.43/arch/x86/kernel/module.c 2011-04-17 15:56:46.000000000 -0400
15642@@ -34,7 +34,7 @@
15643 #define DEBUGP(fmt...)
15644 #endif
15645
15646-void *module_alloc(unsigned long size)
15647+static void *__module_alloc(unsigned long size, pgprot_t prot)
15648 {
15649 struct vm_struct *area;
15650
15651@@ -48,8 +48,18 @@ void *module_alloc(unsigned long size)
15652 if (!area)
15653 return NULL;
15654
15655- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM,
15656- PAGE_KERNEL_EXEC);
15657+ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot);
15658+}
15659+
15660+void *module_alloc(unsigned long size)
15661+{
15662+
15663+#ifdef CONFIG_PAX_KERNEXEC
15664+ return __module_alloc(size, PAGE_KERNEL);
15665+#else
15666+ return __module_alloc(size, PAGE_KERNEL_EXEC);
15667+#endif
15668+
15669 }
15670
15671 /* Free memory returned from module_alloc */
15672@@ -58,6 +68,40 @@ void module_free(struct module *mod, voi
15673 vfree(module_region);
15674 }
15675
15676+#ifdef CONFIG_PAX_KERNEXEC
15677+#ifdef CONFIG_X86_32
15678+void *module_alloc_exec(unsigned long size)
15679+{
15680+ struct vm_struct *area;
15681+
15682+ if (size == 0)
15683+ return NULL;
15684+
15685+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
15686+ return area ? area->addr : NULL;
15687+}
15688+EXPORT_SYMBOL(module_alloc_exec);
15689+
15690+void module_free_exec(struct module *mod, void *module_region)
15691+{
15692+ vunmap(module_region);
15693+}
15694+EXPORT_SYMBOL(module_free_exec);
15695+#else
15696+void module_free_exec(struct module *mod, void *module_region)
15697+{
15698+ module_free(mod, module_region);
15699+}
15700+EXPORT_SYMBOL(module_free_exec);
15701+
15702+void *module_alloc_exec(unsigned long size)
15703+{
15704+ return __module_alloc(size, PAGE_KERNEL_RX);
15705+}
15706+EXPORT_SYMBOL(module_alloc_exec);
15707+#endif
15708+#endif
15709+
15710 /* We don't need anything special. */
15711 int module_frob_arch_sections(Elf_Ehdr *hdr,
15712 Elf_Shdr *sechdrs,
15713@@ -77,14 +121,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15714 unsigned int i;
15715 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
15716 Elf32_Sym *sym;
15717- uint32_t *location;
15718+ uint32_t *plocation, location;
15719
15720 DEBUGP("Applying relocate section %u to %u\n", relsec,
15721 sechdrs[relsec].sh_info);
15722 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
15723 /* This is where to make the change */
15724- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
15725- + rel[i].r_offset;
15726+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
15727+ location = (uint32_t)plocation;
15728+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
15729+ plocation = ktla_ktva((void *)plocation);
15730 /* This is the symbol it is referring to. Note that all
15731 undefined symbols have been resolved. */
15732 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
15733@@ -93,11 +139,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
15734 switch (ELF32_R_TYPE(rel[i].r_info)) {
15735 case R_386_32:
15736 /* We add the value into the location given */
15737- *location += sym->st_value;
15738+ pax_open_kernel();
15739+ *plocation += sym->st_value;
15740+ pax_close_kernel();
15741 break;
15742 case R_386_PC32:
15743 /* Add the value, subtract its postition */
15744- *location += sym->st_value - (uint32_t)location;
15745+ pax_open_kernel();
15746+ *plocation += sym->st_value - location;
15747+ pax_close_kernel();
15748 break;
15749 default:
15750 printk(KERN_ERR "module %s: Unknown relocation: %u\n",
15751@@ -153,21 +203,30 @@ int apply_relocate_add(Elf64_Shdr *sechd
15752 case R_X86_64_NONE:
15753 break;
15754 case R_X86_64_64:
15755+ pax_open_kernel();
15756 *(u64 *)loc = val;
15757+ pax_close_kernel();
15758 break;
15759 case R_X86_64_32:
15760+ pax_open_kernel();
15761 *(u32 *)loc = val;
15762+ pax_close_kernel();
15763 if (val != *(u32 *)loc)
15764 goto overflow;
15765 break;
15766 case R_X86_64_32S:
15767+ pax_open_kernel();
15768 *(s32 *)loc = val;
15769+ pax_close_kernel();
15770 if ((s64)val != *(s32 *)loc)
15771 goto overflow;
15772 break;
15773 case R_X86_64_PC32:
15774 val -= (u64)loc;
15775+ pax_open_kernel();
15776 *(u32 *)loc = val;
15777+ pax_close_kernel();
15778+
15779 #if 0
15780 if ((s64)val != *(s32 *)loc)
15781 goto overflow;
15782diff -urNp linux-2.6.32.43/arch/x86/kernel/paravirt.c linux-2.6.32.43/arch/x86/kernel/paravirt.c
15783--- linux-2.6.32.43/arch/x86/kernel/paravirt.c 2011-03-27 14:31:47.000000000 -0400
15784+++ linux-2.6.32.43/arch/x86/kernel/paravirt.c 2011-08-05 20:33:55.000000000 -0400
15785@@ -53,6 +53,9 @@ u64 _paravirt_ident_64(u64 x)
15786 {
15787 return x;
15788 }
15789+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15790+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
15791+#endif
15792
15793 void __init default_banner(void)
15794 {
15795@@ -122,7 +125,7 @@ unsigned paravirt_patch_jmp(void *insnbu
15796 * corresponding structure. */
15797 static void *get_call_destination(u8 type)
15798 {
15799- struct paravirt_patch_template tmpl = {
15800+ const struct paravirt_patch_template tmpl = {
15801 .pv_init_ops = pv_init_ops,
15802 .pv_time_ops = pv_time_ops,
15803 .pv_cpu_ops = pv_cpu_ops,
15804@@ -133,6 +136,8 @@ static void *get_call_destination(u8 typ
15805 .pv_lock_ops = pv_lock_ops,
15806 #endif
15807 };
15808+
15809+ pax_track_stack();
15810 return *((void **)&tmpl + type);
15811 }
15812
15813@@ -145,15 +150,19 @@ unsigned paravirt_patch_default(u8 type,
15814 if (opfunc == NULL)
15815 /* If there's no function, patch it with a ud2a (BUG) */
15816 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
15817- else if (opfunc == _paravirt_nop)
15818+ else if (opfunc == (void *)_paravirt_nop)
15819 /* If the operation is a nop, then nop the callsite */
15820 ret = paravirt_patch_nop();
15821
15822 /* identity functions just return their single argument */
15823- else if (opfunc == _paravirt_ident_32)
15824+ else if (opfunc == (void *)_paravirt_ident_32)
15825 ret = paravirt_patch_ident_32(insnbuf, len);
15826- else if (opfunc == _paravirt_ident_64)
15827+ else if (opfunc == (void *)_paravirt_ident_64)
15828+ ret = paravirt_patch_ident_64(insnbuf, len);
15829+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
15830+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
15831 ret = paravirt_patch_ident_64(insnbuf, len);
15832+#endif
15833
15834 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
15835 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
15836@@ -178,7 +187,7 @@ unsigned paravirt_patch_insns(void *insn
15837 if (insn_len > len || start == NULL)
15838 insn_len = len;
15839 else
15840- memcpy(insnbuf, start, insn_len);
15841+ memcpy(insnbuf, ktla_ktva(start), insn_len);
15842
15843 return insn_len;
15844 }
15845@@ -294,22 +303,22 @@ void arch_flush_lazy_mmu_mode(void)
15846 preempt_enable();
15847 }
15848
15849-struct pv_info pv_info = {
15850+struct pv_info pv_info __read_only = {
15851 .name = "bare hardware",
15852 .paravirt_enabled = 0,
15853 .kernel_rpl = 0,
15854 .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */
15855 };
15856
15857-struct pv_init_ops pv_init_ops = {
15858+struct pv_init_ops pv_init_ops __read_only = {
15859 .patch = native_patch,
15860 };
15861
15862-struct pv_time_ops pv_time_ops = {
15863+struct pv_time_ops pv_time_ops __read_only = {
15864 .sched_clock = native_sched_clock,
15865 };
15866
15867-struct pv_irq_ops pv_irq_ops = {
15868+struct pv_irq_ops pv_irq_ops __read_only = {
15869 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
15870 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
15871 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
15872@@ -321,7 +330,7 @@ struct pv_irq_ops pv_irq_ops = {
15873 #endif
15874 };
15875
15876-struct pv_cpu_ops pv_cpu_ops = {
15877+struct pv_cpu_ops pv_cpu_ops __read_only = {
15878 .cpuid = native_cpuid,
15879 .get_debugreg = native_get_debugreg,
15880 .set_debugreg = native_set_debugreg,
15881@@ -382,21 +391,26 @@ struct pv_cpu_ops pv_cpu_ops = {
15882 .end_context_switch = paravirt_nop,
15883 };
15884
15885-struct pv_apic_ops pv_apic_ops = {
15886+struct pv_apic_ops pv_apic_ops __read_only = {
15887 #ifdef CONFIG_X86_LOCAL_APIC
15888 .startup_ipi_hook = paravirt_nop,
15889 #endif
15890 };
15891
15892-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
15893+#ifdef CONFIG_X86_32
15894+#ifdef CONFIG_X86_PAE
15895+/* 64-bit pagetable entries */
15896+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
15897+#else
15898 /* 32-bit pagetable entries */
15899 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
15900+#endif
15901 #else
15902 /* 64-bit pagetable entries */
15903 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
15904 #endif
15905
15906-struct pv_mmu_ops pv_mmu_ops = {
15907+struct pv_mmu_ops pv_mmu_ops __read_only = {
15908
15909 .read_cr2 = native_read_cr2,
15910 .write_cr2 = native_write_cr2,
15911@@ -467,6 +481,12 @@ struct pv_mmu_ops pv_mmu_ops = {
15912 },
15913
15914 .set_fixmap = native_set_fixmap,
15915+
15916+#ifdef CONFIG_PAX_KERNEXEC
15917+ .pax_open_kernel = native_pax_open_kernel,
15918+ .pax_close_kernel = native_pax_close_kernel,
15919+#endif
15920+
15921 };
15922
15923 EXPORT_SYMBOL_GPL(pv_time_ops);
15924diff -urNp linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c
15925--- linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c 2011-03-27 14:31:47.000000000 -0400
15926+++ linux-2.6.32.43/arch/x86/kernel/paravirt-spinlocks.c 2011-04-17 15:56:46.000000000 -0400
15927@@ -13,7 +13,7 @@ default_spin_lock_flags(raw_spinlock_t *
15928 __raw_spin_lock(lock);
15929 }
15930
15931-struct pv_lock_ops pv_lock_ops = {
15932+struct pv_lock_ops pv_lock_ops __read_only = {
15933 #ifdef CONFIG_SMP
15934 .spin_is_locked = __ticket_spin_is_locked,
15935 .spin_is_contended = __ticket_spin_is_contended,
15936diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c
15937--- linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c 2011-03-27 14:31:47.000000000 -0400
15938+++ linux-2.6.32.43/arch/x86/kernel/pci-calgary_64.c 2011-04-17 15:56:46.000000000 -0400
15939@@ -477,7 +477,7 @@ static void calgary_free_coherent(struct
15940 free_pages((unsigned long)vaddr, get_order(size));
15941 }
15942
15943-static struct dma_map_ops calgary_dma_ops = {
15944+static const struct dma_map_ops calgary_dma_ops = {
15945 .alloc_coherent = calgary_alloc_coherent,
15946 .free_coherent = calgary_free_coherent,
15947 .map_sg = calgary_map_sg,
15948diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-dma.c linux-2.6.32.43/arch/x86/kernel/pci-dma.c
15949--- linux-2.6.32.43/arch/x86/kernel/pci-dma.c 2011-03-27 14:31:47.000000000 -0400
15950+++ linux-2.6.32.43/arch/x86/kernel/pci-dma.c 2011-04-17 15:56:46.000000000 -0400
15951@@ -14,7 +14,7 @@
15952
15953 static int forbid_dac __read_mostly;
15954
15955-struct dma_map_ops *dma_ops;
15956+const struct dma_map_ops *dma_ops;
15957 EXPORT_SYMBOL(dma_ops);
15958
15959 static int iommu_sac_force __read_mostly;
15960@@ -243,7 +243,7 @@ early_param("iommu", iommu_setup);
15961
15962 int dma_supported(struct device *dev, u64 mask)
15963 {
15964- struct dma_map_ops *ops = get_dma_ops(dev);
15965+ const struct dma_map_ops *ops = get_dma_ops(dev);
15966
15967 #ifdef CONFIG_PCI
15968 if (mask > 0xffffffff && forbid_dac > 0) {
15969diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c
15970--- linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c 2011-03-27 14:31:47.000000000 -0400
15971+++ linux-2.6.32.43/arch/x86/kernel/pci-gart_64.c 2011-04-17 15:56:46.000000000 -0400
15972@@ -682,7 +682,7 @@ static __init int init_k8_gatt(struct ag
15973 return -1;
15974 }
15975
15976-static struct dma_map_ops gart_dma_ops = {
15977+static const struct dma_map_ops gart_dma_ops = {
15978 .map_sg = gart_map_sg,
15979 .unmap_sg = gart_unmap_sg,
15980 .map_page = gart_map_page,
15981diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-nommu.c linux-2.6.32.43/arch/x86/kernel/pci-nommu.c
15982--- linux-2.6.32.43/arch/x86/kernel/pci-nommu.c 2011-03-27 14:31:47.000000000 -0400
15983+++ linux-2.6.32.43/arch/x86/kernel/pci-nommu.c 2011-04-17 15:56:46.000000000 -0400
15984@@ -94,7 +94,7 @@ static void nommu_sync_sg_for_device(str
15985 flush_write_buffers();
15986 }
15987
15988-struct dma_map_ops nommu_dma_ops = {
15989+const struct dma_map_ops nommu_dma_ops = {
15990 .alloc_coherent = dma_generic_alloc_coherent,
15991 .free_coherent = nommu_free_coherent,
15992 .map_sg = nommu_map_sg,
15993diff -urNp linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c
15994--- linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c 2011-03-27 14:31:47.000000000 -0400
15995+++ linux-2.6.32.43/arch/x86/kernel/pci-swiotlb.c 2011-04-17 15:56:46.000000000 -0400
15996@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent(
15997 return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags);
15998 }
15999
16000-static struct dma_map_ops swiotlb_dma_ops = {
16001+static const struct dma_map_ops swiotlb_dma_ops = {
16002 .mapping_error = swiotlb_dma_mapping_error,
16003 .alloc_coherent = x86_swiotlb_alloc_coherent,
16004 .free_coherent = swiotlb_free_coherent,
16005diff -urNp linux-2.6.32.43/arch/x86/kernel/process_32.c linux-2.6.32.43/arch/x86/kernel/process_32.c
16006--- linux-2.6.32.43/arch/x86/kernel/process_32.c 2011-06-25 12:55:34.000000000 -0400
16007+++ linux-2.6.32.43/arch/x86/kernel/process_32.c 2011-06-25 12:56:37.000000000 -0400
16008@@ -67,6 +67,7 @@ asmlinkage void ret_from_fork(void) __as
16009 unsigned long thread_saved_pc(struct task_struct *tsk)
16010 {
16011 return ((unsigned long *)tsk->thread.sp)[3];
16012+//XXX return tsk->thread.eip;
16013 }
16014
16015 #ifndef CONFIG_SMP
16016@@ -129,15 +130,14 @@ void __show_regs(struct pt_regs *regs, i
16017 unsigned short ss, gs;
16018 const char *board;
16019
16020- if (user_mode_vm(regs)) {
16021+ if (user_mode(regs)) {
16022 sp = regs->sp;
16023 ss = regs->ss & 0xffff;
16024- gs = get_user_gs(regs);
16025 } else {
16026 sp = (unsigned long) (&regs->sp);
16027 savesegment(ss, ss);
16028- savesegment(gs, gs);
16029 }
16030+ gs = get_user_gs(regs);
16031
16032 printk("\n");
16033
16034@@ -210,10 +210,10 @@ int kernel_thread(int (*fn)(void *), voi
16035 regs.bx = (unsigned long) fn;
16036 regs.dx = (unsigned long) arg;
16037
16038- regs.ds = __USER_DS;
16039- regs.es = __USER_DS;
16040+ regs.ds = __KERNEL_DS;
16041+ regs.es = __KERNEL_DS;
16042 regs.fs = __KERNEL_PERCPU;
16043- regs.gs = __KERNEL_STACK_CANARY;
16044+ savesegment(gs, regs.gs);
16045 regs.orig_ax = -1;
16046 regs.ip = (unsigned long) kernel_thread_helper;
16047 regs.cs = __KERNEL_CS | get_kernel_rpl();
16048@@ -247,13 +247,14 @@ int copy_thread(unsigned long clone_flag
16049 struct task_struct *tsk;
16050 int err;
16051
16052- childregs = task_pt_regs(p);
16053+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
16054 *childregs = *regs;
16055 childregs->ax = 0;
16056 childregs->sp = sp;
16057
16058 p->thread.sp = (unsigned long) childregs;
16059 p->thread.sp0 = (unsigned long) (childregs+1);
16060+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16061
16062 p->thread.ip = (unsigned long) ret_from_fork;
16063
16064@@ -345,7 +346,7 @@ __switch_to(struct task_struct *prev_p,
16065 struct thread_struct *prev = &prev_p->thread,
16066 *next = &next_p->thread;
16067 int cpu = smp_processor_id();
16068- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16069+ struct tss_struct *tss = init_tss + cpu;
16070 bool preload_fpu;
16071
16072 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
16073@@ -380,6 +381,10 @@ __switch_to(struct task_struct *prev_p,
16074 */
16075 lazy_save_gs(prev->gs);
16076
16077+#ifdef CONFIG_PAX_MEMORY_UDEREF
16078+ __set_fs(task_thread_info(next_p)->addr_limit);
16079+#endif
16080+
16081 /*
16082 * Load the per-thread Thread-Local Storage descriptor.
16083 */
16084@@ -415,6 +420,9 @@ __switch_to(struct task_struct *prev_p,
16085 */
16086 arch_end_context_switch(next_p);
16087
16088+ percpu_write(current_task, next_p);
16089+ percpu_write(current_tinfo, &next_p->tinfo);
16090+
16091 if (preload_fpu)
16092 __math_state_restore();
16093
16094@@ -424,8 +432,6 @@ __switch_to(struct task_struct *prev_p,
16095 if (prev->gs | next->gs)
16096 lazy_load_gs(next->gs);
16097
16098- percpu_write(current_task, next_p);
16099-
16100 return prev_p;
16101 }
16102
16103@@ -495,4 +501,3 @@ unsigned long get_wchan(struct task_stru
16104 } while (count++ < 16);
16105 return 0;
16106 }
16107-
16108diff -urNp linux-2.6.32.43/arch/x86/kernel/process_64.c linux-2.6.32.43/arch/x86/kernel/process_64.c
16109--- linux-2.6.32.43/arch/x86/kernel/process_64.c 2011-06-25 12:55:34.000000000 -0400
16110+++ linux-2.6.32.43/arch/x86/kernel/process_64.c 2011-06-25 12:56:37.000000000 -0400
16111@@ -91,7 +91,7 @@ static void __exit_idle(void)
16112 void exit_idle(void)
16113 {
16114 /* idle loop has pid 0 */
16115- if (current->pid)
16116+ if (task_pid_nr(current))
16117 return;
16118 __exit_idle();
16119 }
16120@@ -170,7 +170,7 @@ void __show_regs(struct pt_regs *regs, i
16121 if (!board)
16122 board = "";
16123 printk(KERN_INFO "Pid: %d, comm: %.20s %s %s %.*s %s\n",
16124- current->pid, current->comm, print_tainted(),
16125+ task_pid_nr(current), current->comm, print_tainted(),
16126 init_utsname()->release,
16127 (int)strcspn(init_utsname()->version, " "),
16128 init_utsname()->version, board);
16129@@ -280,8 +280,7 @@ int copy_thread(unsigned long clone_flag
16130 struct pt_regs *childregs;
16131 struct task_struct *me = current;
16132
16133- childregs = ((struct pt_regs *)
16134- (THREAD_SIZE + task_stack_page(p))) - 1;
16135+ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 16;
16136 *childregs = *regs;
16137
16138 childregs->ax = 0;
16139@@ -292,6 +291,7 @@ int copy_thread(unsigned long clone_flag
16140 p->thread.sp = (unsigned long) childregs;
16141 p->thread.sp0 = (unsigned long) (childregs+1);
16142 p->thread.usersp = me->thread.usersp;
16143+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
16144
16145 set_tsk_thread_flag(p, TIF_FORK);
16146
16147@@ -379,7 +379,7 @@ __switch_to(struct task_struct *prev_p,
16148 struct thread_struct *prev = &prev_p->thread;
16149 struct thread_struct *next = &next_p->thread;
16150 int cpu = smp_processor_id();
16151- struct tss_struct *tss = &per_cpu(init_tss, cpu);
16152+ struct tss_struct *tss = init_tss + cpu;
16153 unsigned fsindex, gsindex;
16154 bool preload_fpu;
16155
16156@@ -475,10 +475,9 @@ __switch_to(struct task_struct *prev_p,
16157 prev->usersp = percpu_read(old_rsp);
16158 percpu_write(old_rsp, next->usersp);
16159 percpu_write(current_task, next_p);
16160+ percpu_write(current_tinfo, &next_p->tinfo);
16161
16162- percpu_write(kernel_stack,
16163- (unsigned long)task_stack_page(next_p) +
16164- THREAD_SIZE - KERNEL_STACK_OFFSET);
16165+ percpu_write(kernel_stack, next->sp0);
16166
16167 /*
16168 * Now maybe reload the debug registers and handle I/O bitmaps
16169@@ -559,12 +558,11 @@ unsigned long get_wchan(struct task_stru
16170 if (!p || p == current || p->state == TASK_RUNNING)
16171 return 0;
16172 stack = (unsigned long)task_stack_page(p);
16173- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
16174+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
16175 return 0;
16176 fp = *(u64 *)(p->thread.sp);
16177 do {
16178- if (fp < (unsigned long)stack ||
16179- fp >= (unsigned long)stack+THREAD_SIZE)
16180+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
16181 return 0;
16182 ip = *(u64 *)(fp+8);
16183 if (!in_sched_functions(ip))
16184diff -urNp linux-2.6.32.43/arch/x86/kernel/process.c linux-2.6.32.43/arch/x86/kernel/process.c
16185--- linux-2.6.32.43/arch/x86/kernel/process.c 2011-04-22 19:16:29.000000000 -0400
16186+++ linux-2.6.32.43/arch/x86/kernel/process.c 2011-05-22 23:02:03.000000000 -0400
16187@@ -51,16 +51,33 @@ void free_thread_xstate(struct task_stru
16188
16189 void free_thread_info(struct thread_info *ti)
16190 {
16191- free_thread_xstate(ti->task);
16192 free_pages((unsigned long)ti, get_order(THREAD_SIZE));
16193 }
16194
16195+static struct kmem_cache *task_struct_cachep;
16196+
16197 void arch_task_cache_init(void)
16198 {
16199- task_xstate_cachep =
16200- kmem_cache_create("task_xstate", xstate_size,
16201+ /* create a slab on which task_structs can be allocated */
16202+ task_struct_cachep =
16203+ kmem_cache_create("task_struct", sizeof(struct task_struct),
16204+ ARCH_MIN_TASKALIGN, SLAB_PANIC | SLAB_NOTRACK, NULL);
16205+
16206+ task_xstate_cachep =
16207+ kmem_cache_create("task_xstate", xstate_size,
16208 __alignof__(union thread_xstate),
16209- SLAB_PANIC | SLAB_NOTRACK, NULL);
16210+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
16211+}
16212+
16213+struct task_struct *alloc_task_struct(void)
16214+{
16215+ return kmem_cache_alloc(task_struct_cachep, GFP_KERNEL);
16216+}
16217+
16218+void free_task_struct(struct task_struct *task)
16219+{
16220+ free_thread_xstate(task);
16221+ kmem_cache_free(task_struct_cachep, task);
16222 }
16223
16224 /*
16225@@ -73,7 +90,7 @@ void exit_thread(void)
16226 unsigned long *bp = t->io_bitmap_ptr;
16227
16228 if (bp) {
16229- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
16230+ struct tss_struct *tss = init_tss + get_cpu();
16231
16232 t->io_bitmap_ptr = NULL;
16233 clear_thread_flag(TIF_IO_BITMAP);
16234@@ -93,6 +110,9 @@ void flush_thread(void)
16235
16236 clear_tsk_thread_flag(tsk, TIF_DEBUG);
16237
16238+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
16239+ loadsegment(gs, 0);
16240+#endif
16241 tsk->thread.debugreg0 = 0;
16242 tsk->thread.debugreg1 = 0;
16243 tsk->thread.debugreg2 = 0;
16244@@ -307,7 +327,7 @@ void default_idle(void)
16245 EXPORT_SYMBOL(default_idle);
16246 #endif
16247
16248-void stop_this_cpu(void *dummy)
16249+__noreturn void stop_this_cpu(void *dummy)
16250 {
16251 local_irq_disable();
16252 /*
16253@@ -568,16 +588,35 @@ static int __init idle_setup(char *str)
16254 }
16255 early_param("idle", idle_setup);
16256
16257-unsigned long arch_align_stack(unsigned long sp)
16258+#ifdef CONFIG_PAX_RANDKSTACK
16259+asmlinkage void pax_randomize_kstack(void)
16260 {
16261- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
16262- sp -= get_random_int() % 8192;
16263- return sp & ~0xf;
16264-}
16265+ struct thread_struct *thread = &current->thread;
16266+ unsigned long time;
16267
16268-unsigned long arch_randomize_brk(struct mm_struct *mm)
16269-{
16270- unsigned long range_end = mm->brk + 0x02000000;
16271- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
16272+ if (!randomize_va_space)
16273+ return;
16274+
16275+ rdtscl(time);
16276+
16277+ /* P4 seems to return a 0 LSB, ignore it */
16278+#ifdef CONFIG_MPENTIUM4
16279+ time &= 0x3EUL;
16280+ time <<= 2;
16281+#elif defined(CONFIG_X86_64)
16282+ time &= 0xFUL;
16283+ time <<= 4;
16284+#else
16285+ time &= 0x1FUL;
16286+ time <<= 3;
16287+#endif
16288+
16289+ thread->sp0 ^= time;
16290+ load_sp0(init_tss + smp_processor_id(), thread);
16291+
16292+#ifdef CONFIG_X86_64
16293+ percpu_write(kernel_stack, thread->sp0);
16294+#endif
16295 }
16296+#endif
16297
16298diff -urNp linux-2.6.32.43/arch/x86/kernel/ptrace.c linux-2.6.32.43/arch/x86/kernel/ptrace.c
16299--- linux-2.6.32.43/arch/x86/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
16300+++ linux-2.6.32.43/arch/x86/kernel/ptrace.c 2011-04-17 15:56:46.000000000 -0400
16301@@ -925,7 +925,7 @@ static const struct user_regset_view use
16302 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
16303 {
16304 int ret;
16305- unsigned long __user *datap = (unsigned long __user *)data;
16306+ unsigned long __user *datap = (__force unsigned long __user *)data;
16307
16308 switch (request) {
16309 /* read the word at location addr in the USER area. */
16310@@ -1012,14 +1012,14 @@ long arch_ptrace(struct task_struct *chi
16311 if (addr < 0)
16312 return -EIO;
16313 ret = do_get_thread_area(child, addr,
16314- (struct user_desc __user *) data);
16315+ (__force struct user_desc __user *) data);
16316 break;
16317
16318 case PTRACE_SET_THREAD_AREA:
16319 if (addr < 0)
16320 return -EIO;
16321 ret = do_set_thread_area(child, addr,
16322- (struct user_desc __user *) data, 0);
16323+ (__force struct user_desc __user *) data, 0);
16324 break;
16325 #endif
16326
16327@@ -1038,12 +1038,12 @@ long arch_ptrace(struct task_struct *chi
16328 #ifdef CONFIG_X86_PTRACE_BTS
16329 case PTRACE_BTS_CONFIG:
16330 ret = ptrace_bts_config
16331- (child, data, (struct ptrace_bts_config __user *)addr);
16332+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16333 break;
16334
16335 case PTRACE_BTS_STATUS:
16336 ret = ptrace_bts_status
16337- (child, data, (struct ptrace_bts_config __user *)addr);
16338+ (child, data, (__force struct ptrace_bts_config __user *)addr);
16339 break;
16340
16341 case PTRACE_BTS_SIZE:
16342@@ -1052,7 +1052,7 @@ long arch_ptrace(struct task_struct *chi
16343
16344 case PTRACE_BTS_GET:
16345 ret = ptrace_bts_read_record
16346- (child, data, (struct bts_struct __user *) addr);
16347+ (child, data, (__force struct bts_struct __user *) addr);
16348 break;
16349
16350 case PTRACE_BTS_CLEAR:
16351@@ -1061,7 +1061,7 @@ long arch_ptrace(struct task_struct *chi
16352
16353 case PTRACE_BTS_DRAIN:
16354 ret = ptrace_bts_drain
16355- (child, data, (struct bts_struct __user *) addr);
16356+ (child, data, (__force struct bts_struct __user *) addr);
16357 break;
16358 #endif /* CONFIG_X86_PTRACE_BTS */
16359
16360@@ -1450,7 +1450,7 @@ void send_sigtrap(struct task_struct *ts
16361 info.si_code = si_code;
16362
16363 /* User-mode ip? */
16364- info.si_addr = user_mode_vm(regs) ? (void __user *) regs->ip : NULL;
16365+ info.si_addr = user_mode(regs) ? (__force void __user *) regs->ip : NULL;
16366
16367 /* Send us the fake SIGTRAP */
16368 force_sig_info(SIGTRAP, &info, tsk);
16369@@ -1469,7 +1469,7 @@ void send_sigtrap(struct task_struct *ts
16370 * We must return the syscall number to actually look up in the table.
16371 * This can be -1L to skip running any syscall at all.
16372 */
16373-asmregparm long syscall_trace_enter(struct pt_regs *regs)
16374+long syscall_trace_enter(struct pt_regs *regs)
16375 {
16376 long ret = 0;
16377
16378@@ -1514,7 +1514,7 @@ asmregparm long syscall_trace_enter(stru
16379 return ret ?: regs->orig_ax;
16380 }
16381
16382-asmregparm void syscall_trace_leave(struct pt_regs *regs)
16383+void syscall_trace_leave(struct pt_regs *regs)
16384 {
16385 if (unlikely(current->audit_context))
16386 audit_syscall_exit(AUDITSC_RESULT(regs->ax), regs->ax);
16387diff -urNp linux-2.6.32.43/arch/x86/kernel/reboot.c linux-2.6.32.43/arch/x86/kernel/reboot.c
16388--- linux-2.6.32.43/arch/x86/kernel/reboot.c 2011-03-27 14:31:47.000000000 -0400
16389+++ linux-2.6.32.43/arch/x86/kernel/reboot.c 2011-05-22 23:02:03.000000000 -0400
16390@@ -33,7 +33,7 @@ void (*pm_power_off)(void);
16391 EXPORT_SYMBOL(pm_power_off);
16392
16393 static const struct desc_ptr no_idt = {};
16394-static int reboot_mode;
16395+static unsigned short reboot_mode;
16396 enum reboot_type reboot_type = BOOT_KBD;
16397 int reboot_force;
16398
16399@@ -292,12 +292,12 @@ core_initcall(reboot_init);
16400 controller to pulse the CPU reset line, which is more thorough, but
16401 doesn't work with at least one type of 486 motherboard. It is easy
16402 to stop this code working; hence the copious comments. */
16403-static const unsigned long long
16404-real_mode_gdt_entries [3] =
16405+static struct desc_struct
16406+real_mode_gdt_entries [3] __read_only =
16407 {
16408- 0x0000000000000000ULL, /* Null descriptor */
16409- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
16410- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
16411+ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */
16412+ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */
16413+ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */
16414 };
16415
16416 static const struct desc_ptr
16417@@ -346,7 +346,7 @@ static const unsigned char jump_to_bios
16418 * specified by the code and length parameters.
16419 * We assume that length will aways be less that 100!
16420 */
16421-void machine_real_restart(const unsigned char *code, int length)
16422+__noreturn void machine_real_restart(const unsigned char *code, unsigned int length)
16423 {
16424 local_irq_disable();
16425
16426@@ -366,8 +366,8 @@ void machine_real_restart(const unsigned
16427 /* Remap the kernel at virtual address zero, as well as offset zero
16428 from the kernel segment. This assumes the kernel segment starts at
16429 virtual address PAGE_OFFSET. */
16430- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16431- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
16432+ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16433+ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));
16434
16435 /*
16436 * Use `swapper_pg_dir' as our page directory.
16437@@ -379,16 +379,15 @@ void machine_real_restart(const unsigned
16438 boot)". This seems like a fairly standard thing that gets set by
16439 REBOOT.COM programs, and the previous reset routine did this
16440 too. */
16441- *((unsigned short *)0x472) = reboot_mode;
16442+ *(unsigned short *)(__va(0x472)) = reboot_mode;
16443
16444 /* For the switch to real mode, copy some code to low memory. It has
16445 to be in the first 64k because it is running in 16-bit mode, and it
16446 has to have the same physical and virtual address, because it turns
16447 off paging. Copy it near the end of the first page, out of the way
16448 of BIOS variables. */
16449- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100),
16450- real_mode_switch, sizeof (real_mode_switch));
16451- memcpy((void *)(0x1000 - 100), code, length);
16452+ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch));
16453+ memcpy(__va(0x1000 - 100), code, length);
16454
16455 /* Set up the IDT for real mode. */
16456 load_idt(&real_mode_idt);
16457@@ -416,6 +415,7 @@ void machine_real_restart(const unsigned
16458 __asm__ __volatile__ ("ljmp $0x0008,%0"
16459 :
16460 : "i" ((void *)(0x1000 - sizeof (real_mode_switch) - 100)));
16461+ do { } while (1);
16462 }
16463 #ifdef CONFIG_APM_MODULE
16464 EXPORT_SYMBOL(machine_real_restart);
16465@@ -536,7 +536,7 @@ void __attribute__((weak)) mach_reboot_f
16466 {
16467 }
16468
16469-static void native_machine_emergency_restart(void)
16470+__noreturn static void native_machine_emergency_restart(void)
16471 {
16472 int i;
16473
16474@@ -651,13 +651,13 @@ void native_machine_shutdown(void)
16475 #endif
16476 }
16477
16478-static void __machine_emergency_restart(int emergency)
16479+static __noreturn void __machine_emergency_restart(int emergency)
16480 {
16481 reboot_emergency = emergency;
16482 machine_ops.emergency_restart();
16483 }
16484
16485-static void native_machine_restart(char *__unused)
16486+static __noreturn void native_machine_restart(char *__unused)
16487 {
16488 printk("machine restart\n");
16489
16490@@ -666,7 +666,7 @@ static void native_machine_restart(char
16491 __machine_emergency_restart(0);
16492 }
16493
16494-static void native_machine_halt(void)
16495+static __noreturn void native_machine_halt(void)
16496 {
16497 /* stop other cpus and apics */
16498 machine_shutdown();
16499@@ -677,7 +677,7 @@ static void native_machine_halt(void)
16500 stop_this_cpu(NULL);
16501 }
16502
16503-static void native_machine_power_off(void)
16504+__noreturn static void native_machine_power_off(void)
16505 {
16506 if (pm_power_off) {
16507 if (!reboot_force)
16508@@ -686,6 +686,7 @@ static void native_machine_power_off(voi
16509 }
16510 /* a fallback in case there is no PM info available */
16511 tboot_shutdown(TB_SHUTDOWN_HALT);
16512+ do { } while (1);
16513 }
16514
16515 struct machine_ops machine_ops = {
16516diff -urNp linux-2.6.32.43/arch/x86/kernel/setup.c linux-2.6.32.43/arch/x86/kernel/setup.c
16517--- linux-2.6.32.43/arch/x86/kernel/setup.c 2011-04-17 17:00:52.000000000 -0400
16518+++ linux-2.6.32.43/arch/x86/kernel/setup.c 2011-04-17 17:03:05.000000000 -0400
16519@@ -783,14 +783,14 @@ void __init setup_arch(char **cmdline_p)
16520
16521 if (!boot_params.hdr.root_flags)
16522 root_mountflags &= ~MS_RDONLY;
16523- init_mm.start_code = (unsigned long) _text;
16524- init_mm.end_code = (unsigned long) _etext;
16525+ init_mm.start_code = ktla_ktva((unsigned long) _text);
16526+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
16527 init_mm.end_data = (unsigned long) _edata;
16528 init_mm.brk = _brk_end;
16529
16530- code_resource.start = virt_to_phys(_text);
16531- code_resource.end = virt_to_phys(_etext)-1;
16532- data_resource.start = virt_to_phys(_etext);
16533+ code_resource.start = virt_to_phys(ktla_ktva(_text));
16534+ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1;
16535+ data_resource.start = virt_to_phys(_sdata);
16536 data_resource.end = virt_to_phys(_edata)-1;
16537 bss_resource.start = virt_to_phys(&__bss_start);
16538 bss_resource.end = virt_to_phys(&__bss_stop)-1;
16539diff -urNp linux-2.6.32.43/arch/x86/kernel/setup_percpu.c linux-2.6.32.43/arch/x86/kernel/setup_percpu.c
16540--- linux-2.6.32.43/arch/x86/kernel/setup_percpu.c 2011-03-27 14:31:47.000000000 -0400
16541+++ linux-2.6.32.43/arch/x86/kernel/setup_percpu.c 2011-06-04 20:36:29.000000000 -0400
16542@@ -25,19 +25,17 @@
16543 # define DBG(x...)
16544 #endif
16545
16546-DEFINE_PER_CPU(int, cpu_number);
16547+#ifdef CONFIG_SMP
16548+DEFINE_PER_CPU(unsigned int, cpu_number);
16549 EXPORT_PER_CPU_SYMBOL(cpu_number);
16550+#endif
16551
16552-#ifdef CONFIG_X86_64
16553 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
16554-#else
16555-#define BOOT_PERCPU_OFFSET 0
16556-#endif
16557
16558 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
16559 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
16560
16561-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
16562+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
16563 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
16564 };
16565 EXPORT_SYMBOL(__per_cpu_offset);
16566@@ -159,10 +157,10 @@ static inline void setup_percpu_segment(
16567 {
16568 #ifdef CONFIG_X86_32
16569 struct desc_struct gdt;
16570+ unsigned long base = per_cpu_offset(cpu);
16571
16572- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
16573- 0x2 | DESCTYPE_S, 0x8);
16574- gdt.s = 1;
16575+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
16576+ 0x83 | DESCTYPE_S, 0xC);
16577 write_gdt_entry(get_cpu_gdt_table(cpu),
16578 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
16579 #endif
16580@@ -212,6 +210,11 @@ void __init setup_per_cpu_areas(void)
16581 /* alrighty, percpu areas up and running */
16582 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
16583 for_each_possible_cpu(cpu) {
16584+#ifdef CONFIG_CC_STACKPROTECTOR
16585+#ifdef CONFIG_X86_32
16586+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
16587+#endif
16588+#endif
16589 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
16590 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
16591 per_cpu(cpu_number, cpu) = cpu;
16592@@ -239,6 +242,12 @@ void __init setup_per_cpu_areas(void)
16593 early_per_cpu_map(x86_cpu_to_node_map, cpu);
16594 #endif
16595 #endif
16596+#ifdef CONFIG_CC_STACKPROTECTOR
16597+#ifdef CONFIG_X86_32
16598+ if (!cpu)
16599+ per_cpu(stack_canary.canary, cpu) = canary;
16600+#endif
16601+#endif
16602 /*
16603 * Up to this point, the boot CPU has been using .data.init
16604 * area. Reload any changed state for the boot CPU.
16605diff -urNp linux-2.6.32.43/arch/x86/kernel/signal.c linux-2.6.32.43/arch/x86/kernel/signal.c
16606--- linux-2.6.32.43/arch/x86/kernel/signal.c 2011-03-27 14:31:47.000000000 -0400
16607+++ linux-2.6.32.43/arch/x86/kernel/signal.c 2011-05-22 23:02:03.000000000 -0400
16608@@ -197,7 +197,7 @@ static unsigned long align_sigframe(unsi
16609 * Align the stack pointer according to the i386 ABI,
16610 * i.e. so that on function entry ((sp + 4) & 15) == 0.
16611 */
16612- sp = ((sp + 4) & -16ul) - 4;
16613+ sp = ((sp - 12) & -16ul) - 4;
16614 #else /* !CONFIG_X86_32 */
16615 sp = round_down(sp, 16) - 8;
16616 #endif
16617@@ -248,11 +248,11 @@ get_sigframe(struct k_sigaction *ka, str
16618 * Return an always-bogus address instead so we will die with SIGSEGV.
16619 */
16620 if (onsigstack && !likely(on_sig_stack(sp)))
16621- return (void __user *)-1L;
16622+ return (__force void __user *)-1L;
16623
16624 /* save i387 state */
16625 if (used_math() && save_i387_xstate(*fpstate) < 0)
16626- return (void __user *)-1L;
16627+ return (__force void __user *)-1L;
16628
16629 return (void __user *)sp;
16630 }
16631@@ -307,9 +307,9 @@ __setup_frame(int sig, struct k_sigactio
16632 }
16633
16634 if (current->mm->context.vdso)
16635- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16636+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
16637 else
16638- restorer = &frame->retcode;
16639+ restorer = (void __user *)&frame->retcode;
16640 if (ka->sa.sa_flags & SA_RESTORER)
16641 restorer = ka->sa.sa_restorer;
16642
16643@@ -323,7 +323,7 @@ __setup_frame(int sig, struct k_sigactio
16644 * reasons and because gdb uses it as a signature to notice
16645 * signal handler stack frames.
16646 */
16647- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
16648+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
16649
16650 if (err)
16651 return -EFAULT;
16652@@ -377,7 +377,10 @@ static int __setup_rt_frame(int sig, str
16653 err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
16654
16655 /* Set up to return from userspace. */
16656- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16657+ if (current->mm->context.vdso)
16658+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
16659+ else
16660+ restorer = (void __user *)&frame->retcode;
16661 if (ka->sa.sa_flags & SA_RESTORER)
16662 restorer = ka->sa.sa_restorer;
16663 put_user_ex(restorer, &frame->pretcode);
16664@@ -389,7 +392,7 @@ static int __setup_rt_frame(int sig, str
16665 * reasons and because gdb uses it as a signature to notice
16666 * signal handler stack frames.
16667 */
16668- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
16669+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
16670 } put_user_catch(err);
16671
16672 if (err)
16673@@ -782,6 +785,8 @@ static void do_signal(struct pt_regs *re
16674 int signr;
16675 sigset_t *oldset;
16676
16677+ pax_track_stack();
16678+
16679 /*
16680 * We want the common case to go fast, which is why we may in certain
16681 * cases get here from kernel mode. Just return without doing anything
16682@@ -789,7 +794,7 @@ static void do_signal(struct pt_regs *re
16683 * X86_32: vm86 regs switched out by assembly code before reaching
16684 * here, so testing against kernel CS suffices.
16685 */
16686- if (!user_mode(regs))
16687+ if (!user_mode_novm(regs))
16688 return;
16689
16690 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
16691diff -urNp linux-2.6.32.43/arch/x86/kernel/smpboot.c linux-2.6.32.43/arch/x86/kernel/smpboot.c
16692--- linux-2.6.32.43/arch/x86/kernel/smpboot.c 2011-03-27 14:31:47.000000000 -0400
16693+++ linux-2.6.32.43/arch/x86/kernel/smpboot.c 2011-07-01 19:10:03.000000000 -0400
16694@@ -94,14 +94,14 @@ static DEFINE_PER_CPU(struct task_struct
16695 */
16696 static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex);
16697
16698-void cpu_hotplug_driver_lock()
16699+void cpu_hotplug_driver_lock(void)
16700 {
16701- mutex_lock(&x86_cpu_hotplug_driver_mutex);
16702+ mutex_lock(&x86_cpu_hotplug_driver_mutex);
16703 }
16704
16705-void cpu_hotplug_driver_unlock()
16706+void cpu_hotplug_driver_unlock(void)
16707 {
16708- mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16709+ mutex_unlock(&x86_cpu_hotplug_driver_mutex);
16710 }
16711
16712 ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; }
16713@@ -625,7 +625,7 @@ wakeup_secondary_cpu_via_init(int phys_a
16714 * target processor state.
16715 */
16716 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
16717- (unsigned long)stack_start.sp);
16718+ stack_start);
16719
16720 /*
16721 * Run STARTUP IPI loop.
16722@@ -743,6 +743,7 @@ static int __cpuinit do_boot_cpu(int api
16723 set_idle_for_cpu(cpu, c_idle.idle);
16724 do_rest:
16725 per_cpu(current_task, cpu) = c_idle.idle;
16726+ per_cpu(current_tinfo, cpu) = &c_idle.idle->tinfo;
16727 #ifdef CONFIG_X86_32
16728 /* Stack for startup_32 can be just as for start_secondary onwards */
16729 irq_ctx_init(cpu);
16730@@ -750,13 +751,15 @@ do_rest:
16731 #else
16732 clear_tsk_thread_flag(c_idle.idle, TIF_FORK);
16733 initial_gs = per_cpu_offset(cpu);
16734- per_cpu(kernel_stack, cpu) =
16735- (unsigned long)task_stack_page(c_idle.idle) -
16736- KERNEL_STACK_OFFSET + THREAD_SIZE;
16737+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(c_idle.idle) - 16 + THREAD_SIZE;
16738 #endif
16739+
16740+ pax_open_kernel();
16741 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
16742+ pax_close_kernel();
16743+
16744 initial_code = (unsigned long)start_secondary;
16745- stack_start.sp = (void *) c_idle.idle->thread.sp;
16746+ stack_start = c_idle.idle->thread.sp;
16747
16748 /* start_ip had better be page-aligned! */
16749 start_ip = setup_trampoline();
16750@@ -891,6 +894,12 @@ int __cpuinit native_cpu_up(unsigned int
16751
16752 per_cpu(cpu_state, cpu) = CPU_UP_PREPARE;
16753
16754+#ifdef CONFIG_PAX_PER_CPU_PGD
16755+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
16756+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
16757+ KERNEL_PGD_PTRS);
16758+#endif
16759+
16760 err = do_boot_cpu(apicid, cpu);
16761
16762 if (err) {
16763diff -urNp linux-2.6.32.43/arch/x86/kernel/step.c linux-2.6.32.43/arch/x86/kernel/step.c
16764--- linux-2.6.32.43/arch/x86/kernel/step.c 2011-03-27 14:31:47.000000000 -0400
16765+++ linux-2.6.32.43/arch/x86/kernel/step.c 2011-04-17 15:56:46.000000000 -0400
16766@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc
16767 struct desc_struct *desc;
16768 unsigned long base;
16769
16770- seg &= ~7UL;
16771+ seg >>= 3;
16772
16773 mutex_lock(&child->mm->context.lock);
16774- if (unlikely((seg >> 3) >= child->mm->context.size))
16775+ if (unlikely(seg >= child->mm->context.size))
16776 addr = -1L; /* bogus selector, access would fault */
16777 else {
16778 desc = child->mm->context.ldt + seg;
16779@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struc
16780 addr += base;
16781 }
16782 mutex_unlock(&child->mm->context.lock);
16783- }
16784+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
16785+ addr = ktla_ktva(addr);
16786
16787 return addr;
16788 }
16789@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct t
16790 unsigned char opcode[15];
16791 unsigned long addr = convert_ip_to_linear(child, regs);
16792
16793+ if (addr == -EINVAL)
16794+ return 0;
16795+
16796 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
16797 for (i = 0; i < copied; i++) {
16798 switch (opcode[i]) {
16799@@ -74,7 +78,7 @@ static int is_setting_trap_flag(struct t
16800
16801 #ifdef CONFIG_X86_64
16802 case 0x40 ... 0x4f:
16803- if (regs->cs != __USER_CS)
16804+ if ((regs->cs & 0xffff) != __USER_CS)
16805 /* 32-bit mode: register increment */
16806 return 0;
16807 /* 64-bit mode: REX prefix */
16808diff -urNp linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S
16809--- linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S 2011-03-27 14:31:47.000000000 -0400
16810+++ linux-2.6.32.43/arch/x86/kernel/syscall_table_32.S 2011-04-17 15:56:46.000000000 -0400
16811@@ -1,3 +1,4 @@
16812+.section .rodata,"a",@progbits
16813 ENTRY(sys_call_table)
16814 .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
16815 .long sys_exit
16816diff -urNp linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c
16817--- linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c 2011-03-27 14:31:47.000000000 -0400
16818+++ linux-2.6.32.43/arch/x86/kernel/sys_i386_32.c 2011-04-17 15:56:46.000000000 -0400
16819@@ -24,6 +24,21 @@
16820
16821 #include <asm/syscalls.h>
16822
16823+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
16824+{
16825+ unsigned long pax_task_size = TASK_SIZE;
16826+
16827+#ifdef CONFIG_PAX_SEGMEXEC
16828+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
16829+ pax_task_size = SEGMEXEC_TASK_SIZE;
16830+#endif
16831+
16832+ if (len > pax_task_size || addr > pax_task_size - len)
16833+ return -EINVAL;
16834+
16835+ return 0;
16836+}
16837+
16838 /*
16839 * Perform the select(nd, in, out, ex, tv) and mmap() system
16840 * calls. Linux/i386 didn't use to be able to handle more than
16841@@ -58,6 +73,212 @@ out:
16842 return err;
16843 }
16844
16845+unsigned long
16846+arch_get_unmapped_area(struct file *filp, unsigned long addr,
16847+ unsigned long len, unsigned long pgoff, unsigned long flags)
16848+{
16849+ struct mm_struct *mm = current->mm;
16850+ struct vm_area_struct *vma;
16851+ unsigned long start_addr, pax_task_size = TASK_SIZE;
16852+
16853+#ifdef CONFIG_PAX_SEGMEXEC
16854+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16855+ pax_task_size = SEGMEXEC_TASK_SIZE;
16856+#endif
16857+
16858+ pax_task_size -= PAGE_SIZE;
16859+
16860+ if (len > pax_task_size)
16861+ return -ENOMEM;
16862+
16863+ if (flags & MAP_FIXED)
16864+ return addr;
16865+
16866+#ifdef CONFIG_PAX_RANDMMAP
16867+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16868+#endif
16869+
16870+ if (addr) {
16871+ addr = PAGE_ALIGN(addr);
16872+ if (pax_task_size - len >= addr) {
16873+ vma = find_vma(mm, addr);
16874+ if (check_heap_stack_gap(vma, addr, len))
16875+ return addr;
16876+ }
16877+ }
16878+ if (len > mm->cached_hole_size) {
16879+ start_addr = addr = mm->free_area_cache;
16880+ } else {
16881+ start_addr = addr = mm->mmap_base;
16882+ mm->cached_hole_size = 0;
16883+ }
16884+
16885+#ifdef CONFIG_PAX_PAGEEXEC
16886+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) {
16887+ start_addr = 0x00110000UL;
16888+
16889+#ifdef CONFIG_PAX_RANDMMAP
16890+ if (mm->pax_flags & MF_PAX_RANDMMAP)
16891+ start_addr += mm->delta_mmap & 0x03FFF000UL;
16892+#endif
16893+
16894+ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base)
16895+ start_addr = addr = mm->mmap_base;
16896+ else
16897+ addr = start_addr;
16898+ }
16899+#endif
16900+
16901+full_search:
16902+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
16903+ /* At this point: (!vma || addr < vma->vm_end). */
16904+ if (pax_task_size - len < addr) {
16905+ /*
16906+ * Start a new search - just in case we missed
16907+ * some holes.
16908+ */
16909+ if (start_addr != mm->mmap_base) {
16910+ start_addr = addr = mm->mmap_base;
16911+ mm->cached_hole_size = 0;
16912+ goto full_search;
16913+ }
16914+ return -ENOMEM;
16915+ }
16916+ if (check_heap_stack_gap(vma, addr, len))
16917+ break;
16918+ if (addr + mm->cached_hole_size < vma->vm_start)
16919+ mm->cached_hole_size = vma->vm_start - addr;
16920+ addr = vma->vm_end;
16921+ if (mm->start_brk <= addr && addr < mm->mmap_base) {
16922+ start_addr = addr = mm->mmap_base;
16923+ mm->cached_hole_size = 0;
16924+ goto full_search;
16925+ }
16926+ }
16927+
16928+ /*
16929+ * Remember the place where we stopped the search:
16930+ */
16931+ mm->free_area_cache = addr + len;
16932+ return addr;
16933+}
16934+
16935+unsigned long
16936+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
16937+ const unsigned long len, const unsigned long pgoff,
16938+ const unsigned long flags)
16939+{
16940+ struct vm_area_struct *vma;
16941+ struct mm_struct *mm = current->mm;
16942+ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE;
16943+
16944+#ifdef CONFIG_PAX_SEGMEXEC
16945+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
16946+ pax_task_size = SEGMEXEC_TASK_SIZE;
16947+#endif
16948+
16949+ pax_task_size -= PAGE_SIZE;
16950+
16951+ /* requested length too big for entire address space */
16952+ if (len > pax_task_size)
16953+ return -ENOMEM;
16954+
16955+ if (flags & MAP_FIXED)
16956+ return addr;
16957+
16958+#ifdef CONFIG_PAX_PAGEEXEC
16959+ if (!nx_enabled && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
16960+ goto bottomup;
16961+#endif
16962+
16963+#ifdef CONFIG_PAX_RANDMMAP
16964+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
16965+#endif
16966+
16967+ /* requesting a specific address */
16968+ if (addr) {
16969+ addr = PAGE_ALIGN(addr);
16970+ if (pax_task_size - len >= addr) {
16971+ vma = find_vma(mm, addr);
16972+ if (check_heap_stack_gap(vma, addr, len))
16973+ return addr;
16974+ }
16975+ }
16976+
16977+ /* check if free_area_cache is useful for us */
16978+ if (len <= mm->cached_hole_size) {
16979+ mm->cached_hole_size = 0;
16980+ mm->free_area_cache = mm->mmap_base;
16981+ }
16982+
16983+ /* either no address requested or can't fit in requested address hole */
16984+ addr = mm->free_area_cache;
16985+
16986+ /* make sure it can fit in the remaining address space */
16987+ if (addr > len) {
16988+ vma = find_vma(mm, addr-len);
16989+ if (check_heap_stack_gap(vma, addr - len, len))
16990+ /* remember the address as a hint for next time */
16991+ return (mm->free_area_cache = addr-len);
16992+ }
16993+
16994+ if (mm->mmap_base < len)
16995+ goto bottomup;
16996+
16997+ addr = mm->mmap_base-len;
16998+
16999+ do {
17000+ /*
17001+ * Lookup failure means no vma is above this address,
17002+ * else if new region fits below vma->vm_start,
17003+ * return with success:
17004+ */
17005+ vma = find_vma(mm, addr);
17006+ if (check_heap_stack_gap(vma, addr, len))
17007+ /* remember the address as a hint for next time */
17008+ return (mm->free_area_cache = addr);
17009+
17010+ /* remember the largest hole we saw so far */
17011+ if (addr + mm->cached_hole_size < vma->vm_start)
17012+ mm->cached_hole_size = vma->vm_start - addr;
17013+
17014+ /* try just below the current vma->vm_start */
17015+ addr = skip_heap_stack_gap(vma, len);
17016+ } while (!IS_ERR_VALUE(addr));
17017+
17018+bottomup:
17019+ /*
17020+ * A failed mmap() very likely causes application failure,
17021+ * so fall back to the bottom-up function here. This scenario
17022+ * can happen with large stack limits and large mmap()
17023+ * allocations.
17024+ */
17025+
17026+#ifdef CONFIG_PAX_SEGMEXEC
17027+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
17028+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
17029+ else
17030+#endif
17031+
17032+ mm->mmap_base = TASK_UNMAPPED_BASE;
17033+
17034+#ifdef CONFIG_PAX_RANDMMAP
17035+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17036+ mm->mmap_base += mm->delta_mmap;
17037+#endif
17038+
17039+ mm->free_area_cache = mm->mmap_base;
17040+ mm->cached_hole_size = ~0UL;
17041+ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17042+ /*
17043+ * Restore the topdown base:
17044+ */
17045+ mm->mmap_base = base;
17046+ mm->free_area_cache = base;
17047+ mm->cached_hole_size = ~0UL;
17048+
17049+ return addr;
17050+}
17051
17052 struct sel_arg_struct {
17053 unsigned long n;
17054@@ -93,7 +314,7 @@ asmlinkage int sys_ipc(uint call, int fi
17055 return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL);
17056 case SEMTIMEDOP:
17057 return sys_semtimedop(first, (struct sembuf __user *)ptr, second,
17058- (const struct timespec __user *)fifth);
17059+ (__force const struct timespec __user *)fifth);
17060
17061 case SEMGET:
17062 return sys_semget(first, second, third);
17063@@ -140,7 +361,7 @@ asmlinkage int sys_ipc(uint call, int fi
17064 ret = do_shmat(first, (char __user *) ptr, second, &raddr);
17065 if (ret)
17066 return ret;
17067- return put_user(raddr, (ulong __user *) third);
17068+ return put_user(raddr, (__force ulong __user *) third);
17069 }
17070 case 1: /* iBCS2 emulator entry point */
17071 if (!segment_eq(get_fs(), get_ds()))
17072@@ -207,17 +428,3 @@ asmlinkage int sys_olduname(struct oldol
17073
17074 return error;
17075 }
17076-
17077-
17078-/*
17079- * Do a system call from kernel instead of calling sys_execve so we
17080- * end up with proper pt_regs.
17081- */
17082-int kernel_execve(const char *filename, char *const argv[], char *const envp[])
17083-{
17084- long __res;
17085- asm volatile ("push %%ebx ; movl %2,%%ebx ; int $0x80 ; pop %%ebx"
17086- : "=a" (__res)
17087- : "0" (__NR_execve), "ri" (filename), "c" (argv), "d" (envp) : "memory");
17088- return __res;
17089-}
17090diff -urNp linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c
17091--- linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c 2011-03-27 14:31:47.000000000 -0400
17092+++ linux-2.6.32.43/arch/x86/kernel/sys_x86_64.c 2011-04-17 15:56:46.000000000 -0400
17093@@ -32,8 +32,8 @@ out:
17094 return error;
17095 }
17096
17097-static void find_start_end(unsigned long flags, unsigned long *begin,
17098- unsigned long *end)
17099+static void find_start_end(struct mm_struct *mm, unsigned long flags,
17100+ unsigned long *begin, unsigned long *end)
17101 {
17102 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) {
17103 unsigned long new_begin;
17104@@ -52,7 +52,7 @@ static void find_start_end(unsigned long
17105 *begin = new_begin;
17106 }
17107 } else {
17108- *begin = TASK_UNMAPPED_BASE;
17109+ *begin = mm->mmap_base;
17110 *end = TASK_SIZE;
17111 }
17112 }
17113@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp
17114 if (flags & MAP_FIXED)
17115 return addr;
17116
17117- find_start_end(flags, &begin, &end);
17118+ find_start_end(mm, flags, &begin, &end);
17119
17120 if (len > end)
17121 return -ENOMEM;
17122
17123+#ifdef CONFIG_PAX_RANDMMAP
17124+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17125+#endif
17126+
17127 if (addr) {
17128 addr = PAGE_ALIGN(addr);
17129 vma = find_vma(mm, addr);
17130- if (end - len >= addr &&
17131- (!vma || addr + len <= vma->vm_start))
17132+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len))
17133 return addr;
17134 }
17135 if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32))
17136@@ -106,7 +109,7 @@ full_search:
17137 }
17138 return -ENOMEM;
17139 }
17140- if (!vma || addr + len <= vma->vm_start) {
17141+ if (check_heap_stack_gap(vma, addr, len)) {
17142 /*
17143 * Remember the place where we stopped the search:
17144 */
17145@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi
17146 {
17147 struct vm_area_struct *vma;
17148 struct mm_struct *mm = current->mm;
17149- unsigned long addr = addr0;
17150+ unsigned long base = mm->mmap_base, addr = addr0;
17151
17152 /* requested length too big for entire address space */
17153 if (len > TASK_SIZE)
17154@@ -141,13 +144,18 @@ arch_get_unmapped_area_topdown(struct fi
17155 if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT))
17156 goto bottomup;
17157
17158+#ifdef CONFIG_PAX_RANDMMAP
17159+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
17160+#endif
17161+
17162 /* requesting a specific address */
17163 if (addr) {
17164 addr = PAGE_ALIGN(addr);
17165- vma = find_vma(mm, addr);
17166- if (TASK_SIZE - len >= addr &&
17167- (!vma || addr + len <= vma->vm_start))
17168- return addr;
17169+ if (TASK_SIZE - len >= addr) {
17170+ vma = find_vma(mm, addr);
17171+ if (check_heap_stack_gap(vma, addr, len))
17172+ return addr;
17173+ }
17174 }
17175
17176 /* check if free_area_cache is useful for us */
17177@@ -162,7 +170,7 @@ arch_get_unmapped_area_topdown(struct fi
17178 /* make sure it can fit in the remaining address space */
17179 if (addr > len) {
17180 vma = find_vma(mm, addr-len);
17181- if (!vma || addr <= vma->vm_start)
17182+ if (check_heap_stack_gap(vma, addr - len, len))
17183 /* remember the address as a hint for next time */
17184 return mm->free_area_cache = addr-len;
17185 }
17186@@ -179,7 +187,7 @@ arch_get_unmapped_area_topdown(struct fi
17187 * return with success:
17188 */
17189 vma = find_vma(mm, addr);
17190- if (!vma || addr+len <= vma->vm_start)
17191+ if (check_heap_stack_gap(vma, addr, len))
17192 /* remember the address as a hint for next time */
17193 return mm->free_area_cache = addr;
17194
17195@@ -188,8 +196,8 @@ arch_get_unmapped_area_topdown(struct fi
17196 mm->cached_hole_size = vma->vm_start - addr;
17197
17198 /* try just below the current vma->vm_start */
17199- addr = vma->vm_start-len;
17200- } while (len < vma->vm_start);
17201+ addr = skip_heap_stack_gap(vma, len);
17202+ } while (!IS_ERR_VALUE(addr));
17203
17204 bottomup:
17205 /*
17206@@ -198,13 +206,21 @@ bottomup:
17207 * can happen with large stack limits and large mmap()
17208 * allocations.
17209 */
17210+ mm->mmap_base = TASK_UNMAPPED_BASE;
17211+
17212+#ifdef CONFIG_PAX_RANDMMAP
17213+ if (mm->pax_flags & MF_PAX_RANDMMAP)
17214+ mm->mmap_base += mm->delta_mmap;
17215+#endif
17216+
17217+ mm->free_area_cache = mm->mmap_base;
17218 mm->cached_hole_size = ~0UL;
17219- mm->free_area_cache = TASK_UNMAPPED_BASE;
17220 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
17221 /*
17222 * Restore the topdown base:
17223 */
17224- mm->free_area_cache = mm->mmap_base;
17225+ mm->mmap_base = base;
17226+ mm->free_area_cache = base;
17227 mm->cached_hole_size = ~0UL;
17228
17229 return addr;
17230diff -urNp linux-2.6.32.43/arch/x86/kernel/tboot.c linux-2.6.32.43/arch/x86/kernel/tboot.c
17231--- linux-2.6.32.43/arch/x86/kernel/tboot.c 2011-03-27 14:31:47.000000000 -0400
17232+++ linux-2.6.32.43/arch/x86/kernel/tboot.c 2011-05-22 23:02:03.000000000 -0400
17233@@ -216,7 +216,7 @@ static int tboot_setup_sleep(void)
17234
17235 void tboot_shutdown(u32 shutdown_type)
17236 {
17237- void (*shutdown)(void);
17238+ void (* __noreturn shutdown)(void);
17239
17240 if (!tboot_enabled())
17241 return;
17242@@ -238,7 +238,7 @@ void tboot_shutdown(u32 shutdown_type)
17243
17244 switch_to_tboot_pt();
17245
17246- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
17247+ shutdown = (void *)tboot->shutdown_entry;
17248 shutdown();
17249
17250 /* should not reach here */
17251@@ -295,7 +295,7 @@ void tboot_sleep(u8 sleep_state, u32 pm1
17252 tboot_shutdown(acpi_shutdown_map[sleep_state]);
17253 }
17254
17255-static atomic_t ap_wfs_count;
17256+static atomic_unchecked_t ap_wfs_count;
17257
17258 static int tboot_wait_for_aps(int num_aps)
17259 {
17260@@ -319,9 +319,9 @@ static int __cpuinit tboot_cpu_callback(
17261 {
17262 switch (action) {
17263 case CPU_DYING:
17264- atomic_inc(&ap_wfs_count);
17265+ atomic_inc_unchecked(&ap_wfs_count);
17266 if (num_online_cpus() == 1)
17267- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
17268+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
17269 return NOTIFY_BAD;
17270 break;
17271 }
17272@@ -340,7 +340,7 @@ static __init int tboot_late_init(void)
17273
17274 tboot_create_trampoline();
17275
17276- atomic_set(&ap_wfs_count, 0);
17277+ atomic_set_unchecked(&ap_wfs_count, 0);
17278 register_hotcpu_notifier(&tboot_cpu_notifier);
17279 return 0;
17280 }
17281diff -urNp linux-2.6.32.43/arch/x86/kernel/time.c linux-2.6.32.43/arch/x86/kernel/time.c
17282--- linux-2.6.32.43/arch/x86/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
17283+++ linux-2.6.32.43/arch/x86/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
17284@@ -26,17 +26,13 @@
17285 int timer_ack;
17286 #endif
17287
17288-#ifdef CONFIG_X86_64
17289-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES;
17290-#endif
17291-
17292 unsigned long profile_pc(struct pt_regs *regs)
17293 {
17294 unsigned long pc = instruction_pointer(regs);
17295
17296- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
17297+ if (!user_mode(regs) && in_lock_functions(pc)) {
17298 #ifdef CONFIG_FRAME_POINTER
17299- return *(unsigned long *)(regs->bp + sizeof(long));
17300+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
17301 #else
17302 unsigned long *sp =
17303 (unsigned long *)kernel_stack_pointer(regs);
17304@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs
17305 * or above a saved flags. Eflags has bits 22-31 zero,
17306 * kernel addresses don't.
17307 */
17308+
17309+#ifdef CONFIG_PAX_KERNEXEC
17310+ return ktla_ktva(sp[0]);
17311+#else
17312 if (sp[0] >> 22)
17313 return sp[0];
17314 if (sp[1] >> 22)
17315 return sp[1];
17316 #endif
17317+
17318+#endif
17319 }
17320 return pc;
17321 }
17322diff -urNp linux-2.6.32.43/arch/x86/kernel/tls.c linux-2.6.32.43/arch/x86/kernel/tls.c
17323--- linux-2.6.32.43/arch/x86/kernel/tls.c 2011-03-27 14:31:47.000000000 -0400
17324+++ linux-2.6.32.43/arch/x86/kernel/tls.c 2011-04-17 15:56:46.000000000 -0400
17325@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc
17326 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
17327 return -EINVAL;
17328
17329+#ifdef CONFIG_PAX_SEGMEXEC
17330+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
17331+ return -EINVAL;
17332+#endif
17333+
17334 set_tls_desc(p, idx, &info, 1);
17335
17336 return 0;
17337diff -urNp linux-2.6.32.43/arch/x86/kernel/trampoline_32.S linux-2.6.32.43/arch/x86/kernel/trampoline_32.S
17338--- linux-2.6.32.43/arch/x86/kernel/trampoline_32.S 2011-03-27 14:31:47.000000000 -0400
17339+++ linux-2.6.32.43/arch/x86/kernel/trampoline_32.S 2011-04-17 15:56:46.000000000 -0400
17340@@ -32,6 +32,12 @@
17341 #include <asm/segment.h>
17342 #include <asm/page_types.h>
17343
17344+#ifdef CONFIG_PAX_KERNEXEC
17345+#define ta(X) (X)
17346+#else
17347+#define ta(X) ((X) - __PAGE_OFFSET)
17348+#endif
17349+
17350 /* We can free up trampoline after bootup if cpu hotplug is not supported. */
17351 __CPUINITRODATA
17352 .code16
17353@@ -60,7 +66,7 @@ r_base = .
17354 inc %ax # protected mode (PE) bit
17355 lmsw %ax # into protected mode
17356 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
17357- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
17358+ ljmpl $__BOOT_CS, $ta(startup_32_smp)
17359
17360 # These need to be in the same 64K segment as the above;
17361 # hence we don't use the boot_gdt_descr defined in head.S
17362diff -urNp linux-2.6.32.43/arch/x86/kernel/trampoline_64.S linux-2.6.32.43/arch/x86/kernel/trampoline_64.S
17363--- linux-2.6.32.43/arch/x86/kernel/trampoline_64.S 2011-03-27 14:31:47.000000000 -0400
17364+++ linux-2.6.32.43/arch/x86/kernel/trampoline_64.S 2011-07-01 18:53:26.000000000 -0400
17365@@ -91,7 +91,7 @@ startup_32:
17366 movl $__KERNEL_DS, %eax # Initialize the %ds segment register
17367 movl %eax, %ds
17368
17369- movl $X86_CR4_PAE, %eax
17370+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax
17371 movl %eax, %cr4 # Enable PAE mode
17372
17373 # Setup trampoline 4 level pagetables
17374@@ -127,7 +127,7 @@ startup_64:
17375 no_longmode:
17376 hlt
17377 jmp no_longmode
17378-#include "verify_cpu_64.S"
17379+#include "verify_cpu.S"
17380
17381 # Careful these need to be in the same 64K segment as the above;
17382 tidt:
17383@@ -138,7 +138,7 @@ tidt:
17384 # so the kernel can live anywhere
17385 .balign 4
17386 tgdt:
17387- .short tgdt_end - tgdt # gdt limit
17388+ .short tgdt_end - tgdt - 1 # gdt limit
17389 .long tgdt - r_base
17390 .short 0
17391 .quad 0x00cf9b000000ffff # __KERNEL32_CS
17392diff -urNp linux-2.6.32.43/arch/x86/kernel/traps.c linux-2.6.32.43/arch/x86/kernel/traps.c
17393--- linux-2.6.32.43/arch/x86/kernel/traps.c 2011-03-27 14:31:47.000000000 -0400
17394+++ linux-2.6.32.43/arch/x86/kernel/traps.c 2011-07-06 19:53:33.000000000 -0400
17395@@ -69,12 +69,6 @@ asmlinkage int system_call(void);
17396
17397 /* Do we ignore FPU interrupts ? */
17398 char ignore_fpu_irq;
17399-
17400-/*
17401- * The IDT has to be page-aligned to simplify the Pentium
17402- * F0 0F bug workaround.
17403- */
17404-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
17405 #endif
17406
17407 DECLARE_BITMAP(used_vectors, NR_VECTORS);
17408@@ -112,19 +106,19 @@ static inline void preempt_conditional_c
17409 static inline void
17410 die_if_kernel(const char *str, struct pt_regs *regs, long err)
17411 {
17412- if (!user_mode_vm(regs))
17413+ if (!user_mode(regs))
17414 die(str, regs, err);
17415 }
17416 #endif
17417
17418 static void __kprobes
17419-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
17420+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
17421 long error_code, siginfo_t *info)
17422 {
17423 struct task_struct *tsk = current;
17424
17425 #ifdef CONFIG_X86_32
17426- if (regs->flags & X86_VM_MASK) {
17427+ if (v8086_mode(regs)) {
17428 /*
17429 * traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
17430 * On nmi (interrupt 2), do_trap should not be called.
17431@@ -135,7 +129,7 @@ do_trap(int trapnr, int signr, char *str
17432 }
17433 #endif
17434
17435- if (!user_mode(regs))
17436+ if (!user_mode_novm(regs))
17437 goto kernel_trap;
17438
17439 #ifdef CONFIG_X86_32
17440@@ -158,7 +152,7 @@ trap_signal:
17441 printk_ratelimit()) {
17442 printk(KERN_INFO
17443 "%s[%d] trap %s ip:%lx sp:%lx error:%lx",
17444- tsk->comm, tsk->pid, str,
17445+ tsk->comm, task_pid_nr(tsk), str,
17446 regs->ip, regs->sp, error_code);
17447 print_vma_addr(" in ", regs->ip);
17448 printk("\n");
17449@@ -175,8 +169,20 @@ kernel_trap:
17450 if (!fixup_exception(regs)) {
17451 tsk->thread.error_code = error_code;
17452 tsk->thread.trap_no = trapnr;
17453+
17454+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17455+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
17456+ str = "PAX: suspicious stack segment fault";
17457+#endif
17458+
17459 die(str, regs, error_code);
17460 }
17461+
17462+#ifdef CONFIG_PAX_REFCOUNT
17463+ if (trapnr == 4)
17464+ pax_report_refcount_overflow(regs);
17465+#endif
17466+
17467 return;
17468
17469 #ifdef CONFIG_X86_32
17470@@ -265,14 +271,30 @@ do_general_protection(struct pt_regs *re
17471 conditional_sti(regs);
17472
17473 #ifdef CONFIG_X86_32
17474- if (regs->flags & X86_VM_MASK)
17475+ if (v8086_mode(regs))
17476 goto gp_in_vm86;
17477 #endif
17478
17479 tsk = current;
17480- if (!user_mode(regs))
17481+ if (!user_mode_novm(regs))
17482 goto gp_in_kernel;
17483
17484+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
17485+ if (!nx_enabled && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
17486+ struct mm_struct *mm = tsk->mm;
17487+ unsigned long limit;
17488+
17489+ down_write(&mm->mmap_sem);
17490+ limit = mm->context.user_cs_limit;
17491+ if (limit < TASK_SIZE) {
17492+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
17493+ up_write(&mm->mmap_sem);
17494+ return;
17495+ }
17496+ up_write(&mm->mmap_sem);
17497+ }
17498+#endif
17499+
17500 tsk->thread.error_code = error_code;
17501 tsk->thread.trap_no = 13;
17502
17503@@ -305,6 +327,13 @@ gp_in_kernel:
17504 if (notify_die(DIE_GPF, "general protection fault", regs,
17505 error_code, 13, SIGSEGV) == NOTIFY_STOP)
17506 return;
17507+
17508+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17509+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
17510+ die("PAX: suspicious general protection fault", regs, error_code);
17511+ else
17512+#endif
17513+
17514 die("general protection fault", regs, error_code);
17515 }
17516
17517@@ -435,6 +464,17 @@ static notrace __kprobes void default_do
17518 dotraplinkage notrace __kprobes void
17519 do_nmi(struct pt_regs *regs, long error_code)
17520 {
17521+
17522+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
17523+ if (!user_mode(regs)) {
17524+ unsigned long cs = regs->cs & 0xFFFF;
17525+ unsigned long ip = ktva_ktla(regs->ip);
17526+
17527+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
17528+ regs->ip = ip;
17529+ }
17530+#endif
17531+
17532 nmi_enter();
17533
17534 inc_irq_stat(__nmi_count);
17535@@ -558,7 +598,7 @@ dotraplinkage void __kprobes do_debug(st
17536 }
17537
17538 #ifdef CONFIG_X86_32
17539- if (regs->flags & X86_VM_MASK)
17540+ if (v8086_mode(regs))
17541 goto debug_vm86;
17542 #endif
17543
17544@@ -570,7 +610,7 @@ dotraplinkage void __kprobes do_debug(st
17545 * kernel space (but re-enable TF when returning to user mode).
17546 */
17547 if (condition & DR_STEP) {
17548- if (!user_mode(regs))
17549+ if (!user_mode_novm(regs))
17550 goto clear_TF_reenable;
17551 }
17552
17553@@ -757,7 +797,7 @@ do_simd_coprocessor_error(struct pt_regs
17554 * Handle strange cache flush from user space exception
17555 * in all other cases. This is undocumented behaviour.
17556 */
17557- if (regs->flags & X86_VM_MASK) {
17558+ if (v8086_mode(regs)) {
17559 handle_vm86_fault((struct kernel_vm86_regs *)regs, error_code);
17560 return;
17561 }
17562@@ -798,7 +838,7 @@ asmlinkage void __attribute__((weak)) sm
17563 void __math_state_restore(void)
17564 {
17565 struct thread_info *thread = current_thread_info();
17566- struct task_struct *tsk = thread->task;
17567+ struct task_struct *tsk = current;
17568
17569 /*
17570 * Paranoid restore. send a SIGSEGV if we fail to restore the state.
17571@@ -825,8 +865,7 @@ void __math_state_restore(void)
17572 */
17573 asmlinkage void math_state_restore(void)
17574 {
17575- struct thread_info *thread = current_thread_info();
17576- struct task_struct *tsk = thread->task;
17577+ struct task_struct *tsk = current;
17578
17579 if (!tsk_used_math(tsk)) {
17580 local_irq_enable();
17581diff -urNp linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S
17582--- linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S 2011-03-27 14:31:47.000000000 -0400
17583+++ linux-2.6.32.43/arch/x86/kernel/verify_cpu_64.S 1969-12-31 19:00:00.000000000 -0500
17584@@ -1,105 +0,0 @@
17585-/*
17586- *
17587- * verify_cpu.S - Code for cpu long mode and SSE verification. This
17588- * code has been borrowed from boot/setup.S and was introduced by
17589- * Andi Kleen.
17590- *
17591- * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17592- * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17593- * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17594- *
17595- * This source code is licensed under the GNU General Public License,
17596- * Version 2. See the file COPYING for more details.
17597- *
17598- * This is a common code for verification whether CPU supports
17599- * long mode and SSE or not. It is not called directly instead this
17600- * file is included at various places and compiled in that context.
17601- * Following are the current usage.
17602- *
17603- * This file is included by both 16bit and 32bit code.
17604- *
17605- * arch/x86_64/boot/setup.S : Boot cpu verification (16bit)
17606- * arch/x86_64/boot/compressed/head.S: Boot cpu verification (32bit)
17607- * arch/x86_64/kernel/trampoline.S: secondary processor verfication (16bit)
17608- * arch/x86_64/kernel/acpi/wakeup.S:Verfication at resume (16bit)
17609- *
17610- * verify_cpu, returns the status of cpu check in register %eax.
17611- * 0: Success 1: Failure
17612- *
17613- * The caller needs to check for the error code and take the action
17614- * appropriately. Either display a message or halt.
17615- */
17616-
17617-#include <asm/cpufeature.h>
17618-
17619-verify_cpu:
17620- pushfl # Save caller passed flags
17621- pushl $0 # Kill any dangerous flags
17622- popfl
17623-
17624- pushfl # standard way to check for cpuid
17625- popl %eax
17626- movl %eax,%ebx
17627- xorl $0x200000,%eax
17628- pushl %eax
17629- popfl
17630- pushfl
17631- popl %eax
17632- cmpl %eax,%ebx
17633- jz verify_cpu_no_longmode # cpu has no cpuid
17634-
17635- movl $0x0,%eax # See if cpuid 1 is implemented
17636- cpuid
17637- cmpl $0x1,%eax
17638- jb verify_cpu_no_longmode # no cpuid 1
17639-
17640- xor %di,%di
17641- cmpl $0x68747541,%ebx # AuthenticAMD
17642- jnz verify_cpu_noamd
17643- cmpl $0x69746e65,%edx
17644- jnz verify_cpu_noamd
17645- cmpl $0x444d4163,%ecx
17646- jnz verify_cpu_noamd
17647- mov $1,%di # cpu is from AMD
17648-
17649-verify_cpu_noamd:
17650- movl $0x1,%eax # Does the cpu have what it takes
17651- cpuid
17652- andl $REQUIRED_MASK0,%edx
17653- xorl $REQUIRED_MASK0,%edx
17654- jnz verify_cpu_no_longmode
17655-
17656- movl $0x80000000,%eax # See if extended cpuid is implemented
17657- cpuid
17658- cmpl $0x80000001,%eax
17659- jb verify_cpu_no_longmode # no extended cpuid
17660-
17661- movl $0x80000001,%eax # Does the cpu have what it takes
17662- cpuid
17663- andl $REQUIRED_MASK1,%edx
17664- xorl $REQUIRED_MASK1,%edx
17665- jnz verify_cpu_no_longmode
17666-
17667-verify_cpu_sse_test:
17668- movl $1,%eax
17669- cpuid
17670- andl $SSE_MASK,%edx
17671- cmpl $SSE_MASK,%edx
17672- je verify_cpu_sse_ok
17673- test %di,%di
17674- jz verify_cpu_no_longmode # only try to force SSE on AMD
17675- movl $0xc0010015,%ecx # HWCR
17676- rdmsr
17677- btr $15,%eax # enable SSE
17678- wrmsr
17679- xor %di,%di # don't loop
17680- jmp verify_cpu_sse_test # try again
17681-
17682-verify_cpu_no_longmode:
17683- popfl # Restore caller passed flags
17684- movl $1,%eax
17685- ret
17686-verify_cpu_sse_ok:
17687- popfl # Restore caller passed flags
17688- xorl %eax, %eax
17689- ret
17690diff -urNp linux-2.6.32.43/arch/x86/kernel/verify_cpu.S linux-2.6.32.43/arch/x86/kernel/verify_cpu.S
17691--- linux-2.6.32.43/arch/x86/kernel/verify_cpu.S 1969-12-31 19:00:00.000000000 -0500
17692+++ linux-2.6.32.43/arch/x86/kernel/verify_cpu.S 2011-07-01 18:28:42.000000000 -0400
17693@@ -0,0 +1,140 @@
17694+/*
17695+ *
17696+ * verify_cpu.S - Code for cpu long mode and SSE verification. This
17697+ * code has been borrowed from boot/setup.S and was introduced by
17698+ * Andi Kleen.
17699+ *
17700+ * Copyright (c) 2007 Andi Kleen (ak@suse.de)
17701+ * Copyright (c) 2007 Eric Biederman (ebiederm@xmission.com)
17702+ * Copyright (c) 2007 Vivek Goyal (vgoyal@in.ibm.com)
17703+ * Copyright (c) 2010 Kees Cook (kees.cook@canonical.com)
17704+ *
17705+ * This source code is licensed under the GNU General Public License,
17706+ * Version 2. See the file COPYING for more details.
17707+ *
17708+ * This is a common code for verification whether CPU supports
17709+ * long mode and SSE or not. It is not called directly instead this
17710+ * file is included at various places and compiled in that context.
17711+ * This file is expected to run in 32bit code. Currently:
17712+ *
17713+ * arch/x86/boot/compressed/head_64.S: Boot cpu verification
17714+ * arch/x86/kernel/trampoline_64.S: secondary processor verification
17715+ * arch/x86/kernel/head_32.S: processor startup
17716+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
17717+ *
17718+ * verify_cpu, returns the status of longmode and SSE in register %eax.
17719+ * 0: Success 1: Failure
17720+ *
17721+ * On Intel, the XD_DISABLE flag will be cleared as a side-effect.
17722+ *
17723+ * The caller needs to check for the error code and take the action
17724+ * appropriately. Either display a message or halt.
17725+ */
17726+
17727+#include <asm/cpufeature.h>
17728+#include <asm/msr-index.h>
17729+
17730+verify_cpu:
17731+ pushfl # Save caller passed flags
17732+ pushl $0 # Kill any dangerous flags
17733+ popfl
17734+
17735+ pushfl # standard way to check for cpuid
17736+ popl %eax
17737+ movl %eax,%ebx
17738+ xorl $0x200000,%eax
17739+ pushl %eax
17740+ popfl
17741+ pushfl
17742+ popl %eax
17743+ cmpl %eax,%ebx
17744+ jz verify_cpu_no_longmode # cpu has no cpuid
17745+
17746+ movl $0x0,%eax # See if cpuid 1 is implemented
17747+ cpuid
17748+ cmpl $0x1,%eax
17749+ jb verify_cpu_no_longmode # no cpuid 1
17750+
17751+ xor %di,%di
17752+ cmpl $0x68747541,%ebx # AuthenticAMD
17753+ jnz verify_cpu_noamd
17754+ cmpl $0x69746e65,%edx
17755+ jnz verify_cpu_noamd
17756+ cmpl $0x444d4163,%ecx
17757+ jnz verify_cpu_noamd
17758+ mov $1,%di # cpu is from AMD
17759+ jmp verify_cpu_check
17760+
17761+verify_cpu_noamd:
17762+ cmpl $0x756e6547,%ebx # GenuineIntel?
17763+ jnz verify_cpu_check
17764+ cmpl $0x49656e69,%edx
17765+ jnz verify_cpu_check
17766+ cmpl $0x6c65746e,%ecx
17767+ jnz verify_cpu_check
17768+
17769+ # only call IA32_MISC_ENABLE when:
17770+ # family > 6 || (family == 6 && model >= 0xd)
17771+ movl $0x1, %eax # check CPU family and model
17772+ cpuid
17773+ movl %eax, %ecx
17774+
17775+ andl $0x0ff00f00, %eax # mask family and extended family
17776+ shrl $8, %eax
17777+ cmpl $6, %eax
17778+ ja verify_cpu_clear_xd # family > 6, ok
17779+ jb verify_cpu_check # family < 6, skip
17780+
17781+ andl $0x000f00f0, %ecx # mask model and extended model
17782+ shrl $4, %ecx
17783+ cmpl $0xd, %ecx
17784+ jb verify_cpu_check # family == 6, model < 0xd, skip
17785+
17786+verify_cpu_clear_xd:
17787+ movl $MSR_IA32_MISC_ENABLE, %ecx
17788+ rdmsr
17789+ btrl $2, %edx # clear MSR_IA32_MISC_ENABLE_XD_DISABLE
17790+ jnc verify_cpu_check # only write MSR if bit was changed
17791+ wrmsr
17792+
17793+verify_cpu_check:
17794+ movl $0x1,%eax # Does the cpu have what it takes
17795+ cpuid
17796+ andl $REQUIRED_MASK0,%edx
17797+ xorl $REQUIRED_MASK0,%edx
17798+ jnz verify_cpu_no_longmode
17799+
17800+ movl $0x80000000,%eax # See if extended cpuid is implemented
17801+ cpuid
17802+ cmpl $0x80000001,%eax
17803+ jb verify_cpu_no_longmode # no extended cpuid
17804+
17805+ movl $0x80000001,%eax # Does the cpu have what it takes
17806+ cpuid
17807+ andl $REQUIRED_MASK1,%edx
17808+ xorl $REQUIRED_MASK1,%edx
17809+ jnz verify_cpu_no_longmode
17810+
17811+verify_cpu_sse_test:
17812+ movl $1,%eax
17813+ cpuid
17814+ andl $SSE_MASK,%edx
17815+ cmpl $SSE_MASK,%edx
17816+ je verify_cpu_sse_ok
17817+ test %di,%di
17818+ jz verify_cpu_no_longmode # only try to force SSE on AMD
17819+ movl $MSR_K7_HWCR,%ecx
17820+ rdmsr
17821+ btr $15,%eax # enable SSE
17822+ wrmsr
17823+ xor %di,%di # don't loop
17824+ jmp verify_cpu_sse_test # try again
17825+
17826+verify_cpu_no_longmode:
17827+ popfl # Restore caller passed flags
17828+ movl $1,%eax
17829+ ret
17830+verify_cpu_sse_ok:
17831+ popfl # Restore caller passed flags
17832+ xorl %eax, %eax
17833+ ret
17834diff -urNp linux-2.6.32.43/arch/x86/kernel/vm86_32.c linux-2.6.32.43/arch/x86/kernel/vm86_32.c
17835--- linux-2.6.32.43/arch/x86/kernel/vm86_32.c 2011-03-27 14:31:47.000000000 -0400
17836+++ linux-2.6.32.43/arch/x86/kernel/vm86_32.c 2011-04-17 15:56:46.000000000 -0400
17837@@ -41,6 +41,7 @@
17838 #include <linux/ptrace.h>
17839 #include <linux/audit.h>
17840 #include <linux/stddef.h>
17841+#include <linux/grsecurity.h>
17842
17843 #include <asm/uaccess.h>
17844 #include <asm/io.h>
17845@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke
17846 do_exit(SIGSEGV);
17847 }
17848
17849- tss = &per_cpu(init_tss, get_cpu());
17850+ tss = init_tss + get_cpu();
17851 current->thread.sp0 = current->thread.saved_sp0;
17852 current->thread.sysenter_cs = __KERNEL_CS;
17853 load_sp0(tss, &current->thread);
17854@@ -208,6 +209,13 @@ int sys_vm86old(struct pt_regs *regs)
17855 struct task_struct *tsk;
17856 int tmp, ret = -EPERM;
17857
17858+#ifdef CONFIG_GRKERNSEC_VM86
17859+ if (!capable(CAP_SYS_RAWIO)) {
17860+ gr_handle_vm86();
17861+ goto out;
17862+ }
17863+#endif
17864+
17865 tsk = current;
17866 if (tsk->thread.saved_sp0)
17867 goto out;
17868@@ -238,6 +246,14 @@ int sys_vm86(struct pt_regs *regs)
17869 int tmp, ret;
17870 struct vm86plus_struct __user *v86;
17871
17872+#ifdef CONFIG_GRKERNSEC_VM86
17873+ if (!capable(CAP_SYS_RAWIO)) {
17874+ gr_handle_vm86();
17875+ ret = -EPERM;
17876+ goto out;
17877+ }
17878+#endif
17879+
17880 tsk = current;
17881 switch (regs->bx) {
17882 case VM86_REQUEST_IRQ:
17883@@ -324,7 +340,7 @@ static void do_sys_vm86(struct kernel_vm
17884 tsk->thread.saved_fs = info->regs32->fs;
17885 tsk->thread.saved_gs = get_user_gs(info->regs32);
17886
17887- tss = &per_cpu(init_tss, get_cpu());
17888+ tss = init_tss + get_cpu();
17889 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
17890 if (cpu_has_sep)
17891 tsk->thread.sysenter_cs = 0;
17892@@ -529,7 +545,7 @@ static void do_int(struct kernel_vm86_re
17893 goto cannot_handle;
17894 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
17895 goto cannot_handle;
17896- intr_ptr = (unsigned long __user *) (i << 2);
17897+ intr_ptr = (__force unsigned long __user *) (i << 2);
17898 if (get_user(segoffs, intr_ptr))
17899 goto cannot_handle;
17900 if ((segoffs >> 16) == BIOSSEG)
17901diff -urNp linux-2.6.32.43/arch/x86/kernel/vmi_32.c linux-2.6.32.43/arch/x86/kernel/vmi_32.c
17902--- linux-2.6.32.43/arch/x86/kernel/vmi_32.c 2011-03-27 14:31:47.000000000 -0400
17903+++ linux-2.6.32.43/arch/x86/kernel/vmi_32.c 2011-08-05 20:33:55.000000000 -0400
17904@@ -44,12 +44,17 @@ typedef u32 __attribute__((regparm(1)))
17905 typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int);
17906
17907 #define call_vrom_func(rom,func) \
17908- (((VROMFUNC *)(rom->func))())
17909+ (((VROMFUNC *)(ktva_ktla(rom.func)))())
17910
17911 #define call_vrom_long_func(rom,func,arg) \
17912- (((VROMLONGFUNC *)(rom->func)) (arg))
17913+({\
17914+ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\
17915+ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\
17916+ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\
17917+ __reloc;\
17918+})
17919
17920-static struct vrom_header *vmi_rom;
17921+static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE)));
17922 static int disable_pge;
17923 static int disable_pse;
17924 static int disable_sep;
17925@@ -76,10 +81,10 @@ static struct {
17926 void (*set_initial_ap_state)(int, int);
17927 void (*halt)(void);
17928 void (*set_lazy_mode)(int mode);
17929-} vmi_ops;
17930+} __no_const vmi_ops __read_only;
17931
17932 /* Cached VMI operations */
17933-struct vmi_timer_ops vmi_timer_ops;
17934+struct vmi_timer_ops vmi_timer_ops __read_only;
17935
17936 /*
17937 * VMI patching routines.
17938@@ -94,7 +99,7 @@ struct vmi_timer_ops vmi_timer_ops;
17939 static inline void patch_offset(void *insnbuf,
17940 unsigned long ip, unsigned long dest)
17941 {
17942- *(unsigned long *)(insnbuf+1) = dest-ip-5;
17943+ *(unsigned long *)(insnbuf+1) = dest-ip-5;
17944 }
17945
17946 static unsigned patch_internal(int call, unsigned len, void *insnbuf,
17947@@ -102,6 +107,7 @@ static unsigned patch_internal(int call,
17948 {
17949 u64 reloc;
17950 struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc;
17951+
17952 reloc = call_vrom_long_func(vmi_rom, get_reloc, call);
17953 switch(rel->type) {
17954 case VMI_RELOCATION_CALL_REL:
17955@@ -404,13 +410,13 @@ static void vmi_set_pud(pud_t *pudp, pud
17956
17957 static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
17958 {
17959- const pte_t pte = { .pte = 0 };
17960+ const pte_t pte = __pte(0ULL);
17961 vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0));
17962 }
17963
17964 static void vmi_pmd_clear(pmd_t *pmd)
17965 {
17966- const pte_t pte = { .pte = 0 };
17967+ const pte_t pte = __pte(0ULL);
17968 vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD);
17969 }
17970 #endif
17971@@ -438,10 +444,10 @@ vmi_startup_ipi_hook(int phys_apicid, un
17972 ap.ss = __KERNEL_DS;
17973 ap.esp = (unsigned long) start_esp;
17974
17975- ap.ds = __USER_DS;
17976- ap.es = __USER_DS;
17977+ ap.ds = __KERNEL_DS;
17978+ ap.es = __KERNEL_DS;
17979 ap.fs = __KERNEL_PERCPU;
17980- ap.gs = __KERNEL_STACK_CANARY;
17981+ savesegment(gs, ap.gs);
17982
17983 ap.eflags = 0;
17984
17985@@ -486,6 +492,18 @@ static void vmi_leave_lazy_mmu(void)
17986 paravirt_leave_lazy_mmu();
17987 }
17988
17989+#ifdef CONFIG_PAX_KERNEXEC
17990+static unsigned long vmi_pax_open_kernel(void)
17991+{
17992+ return 0;
17993+}
17994+
17995+static unsigned long vmi_pax_close_kernel(void)
17996+{
17997+ return 0;
17998+}
17999+#endif
18000+
18001 static inline int __init check_vmi_rom(struct vrom_header *rom)
18002 {
18003 struct pci_header *pci;
18004@@ -498,6 +516,10 @@ static inline int __init check_vmi_rom(s
18005 return 0;
18006 if (rom->vrom_signature != VMI_SIGNATURE)
18007 return 0;
18008+ if (rom->rom_length * 512 > sizeof(*rom)) {
18009+ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512);
18010+ return 0;
18011+ }
18012 if (rom->api_version_maj != VMI_API_REV_MAJOR ||
18013 rom->api_version_min+1 < VMI_API_REV_MINOR+1) {
18014 printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n",
18015@@ -562,7 +584,7 @@ static inline int __init probe_vmi_rom(v
18016 struct vrom_header *romstart;
18017 romstart = (struct vrom_header *)isa_bus_to_virt(base);
18018 if (check_vmi_rom(romstart)) {
18019- vmi_rom = romstart;
18020+ vmi_rom = *romstart;
18021 return 1;
18022 }
18023 }
18024@@ -836,6 +858,11 @@ static inline int __init activate_vmi(vo
18025
18026 para_fill(pv_irq_ops.safe_halt, Halt);
18027
18028+#ifdef CONFIG_PAX_KERNEXEC
18029+ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel;
18030+ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel;
18031+#endif
18032+
18033 /*
18034 * Alternative instruction rewriting doesn't happen soon enough
18035 * to convert VMI_IRET to a call instead of a jump; so we have
18036@@ -853,16 +880,16 @@ static inline int __init activate_vmi(vo
18037
18038 void __init vmi_init(void)
18039 {
18040- if (!vmi_rom)
18041+ if (!vmi_rom.rom_signature)
18042 probe_vmi_rom();
18043 else
18044- check_vmi_rom(vmi_rom);
18045+ check_vmi_rom(&vmi_rom);
18046
18047 /* In case probing for or validating the ROM failed, basil */
18048- if (!vmi_rom)
18049+ if (!vmi_rom.rom_signature)
18050 return;
18051
18052- reserve_top_address(-vmi_rom->virtual_top);
18053+ reserve_top_address(-vmi_rom.virtual_top);
18054
18055 #ifdef CONFIG_X86_IO_APIC
18056 /* This is virtual hardware; timer routing is wired correctly */
18057@@ -874,7 +901,7 @@ void __init vmi_activate(void)
18058 {
18059 unsigned long flags;
18060
18061- if (!vmi_rom)
18062+ if (!vmi_rom.rom_signature)
18063 return;
18064
18065 local_irq_save(flags);
18066diff -urNp linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S
18067--- linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S 2011-03-27 14:31:47.000000000 -0400
18068+++ linux-2.6.32.43/arch/x86/kernel/vmlinux.lds.S 2011-04-17 15:56:46.000000000 -0400
18069@@ -26,6 +26,13 @@
18070 #include <asm/page_types.h>
18071 #include <asm/cache.h>
18072 #include <asm/boot.h>
18073+#include <asm/segment.h>
18074+
18075+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18076+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
18077+#else
18078+#define __KERNEL_TEXT_OFFSET 0
18079+#endif
18080
18081 #undef i386 /* in case the preprocessor is a 32bit one */
18082
18083@@ -34,40 +41,53 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF
18084 #ifdef CONFIG_X86_32
18085 OUTPUT_ARCH(i386)
18086 ENTRY(phys_startup_32)
18087-jiffies = jiffies_64;
18088 #else
18089 OUTPUT_ARCH(i386:x86-64)
18090 ENTRY(phys_startup_64)
18091-jiffies_64 = jiffies;
18092 #endif
18093
18094 PHDRS {
18095 text PT_LOAD FLAGS(5); /* R_E */
18096- data PT_LOAD FLAGS(7); /* RWE */
18097+#ifdef CONFIG_X86_32
18098+ module PT_LOAD FLAGS(5); /* R_E */
18099+#endif
18100+#ifdef CONFIG_XEN
18101+ rodata PT_LOAD FLAGS(5); /* R_E */
18102+#else
18103+ rodata PT_LOAD FLAGS(4); /* R__ */
18104+#endif
18105+ data PT_LOAD FLAGS(6); /* RW_ */
18106 #ifdef CONFIG_X86_64
18107 user PT_LOAD FLAGS(5); /* R_E */
18108+#endif
18109+ init.begin PT_LOAD FLAGS(6); /* RW_ */
18110 #ifdef CONFIG_SMP
18111 percpu PT_LOAD FLAGS(6); /* RW_ */
18112 #endif
18113+ text.init PT_LOAD FLAGS(5); /* R_E */
18114+ text.exit PT_LOAD FLAGS(5); /* R_E */
18115 init PT_LOAD FLAGS(7); /* RWE */
18116-#endif
18117 note PT_NOTE FLAGS(0); /* ___ */
18118 }
18119
18120 SECTIONS
18121 {
18122 #ifdef CONFIG_X86_32
18123- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
18124- phys_startup_32 = startup_32 - LOAD_OFFSET;
18125+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
18126 #else
18127- . = __START_KERNEL;
18128- phys_startup_64 = startup_64 - LOAD_OFFSET;
18129+ . = __START_KERNEL;
18130 #endif
18131
18132 /* Text and read-only data */
18133- .text : AT(ADDR(.text) - LOAD_OFFSET) {
18134- _text = .;
18135+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18136 /* bootstrapping code */
18137+#ifdef CONFIG_X86_32
18138+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18139+#else
18140+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18141+#endif
18142+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
18143+ _text = .;
18144 HEAD_TEXT
18145 #ifdef CONFIG_X86_32
18146 . = ALIGN(PAGE_SIZE);
18147@@ -82,28 +102,71 @@ SECTIONS
18148 IRQENTRY_TEXT
18149 *(.fixup)
18150 *(.gnu.warning)
18151- /* End of text section */
18152- _etext = .;
18153 } :text = 0x9090
18154
18155- NOTES :text :note
18156+ . += __KERNEL_TEXT_OFFSET;
18157+
18158+#ifdef CONFIG_X86_32
18159+ . = ALIGN(PAGE_SIZE);
18160+ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) {
18161+ *(.vmi.rom)
18162+ } :module
18163+
18164+ . = ALIGN(PAGE_SIZE);
18165+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
18166+
18167+#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES)
18168+ MODULES_EXEC_VADDR = .;
18169+ BYTE(0)
18170+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
18171+ . = ALIGN(HPAGE_SIZE);
18172+ MODULES_EXEC_END = . - 1;
18173+#endif
18174+
18175+ } :module
18176+#endif
18177
18178- EXCEPTION_TABLE(16) :text = 0x9090
18179+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
18180+ /* End of text section */
18181+ _etext = . - __KERNEL_TEXT_OFFSET;
18182+ }
18183+
18184+#ifdef CONFIG_X86_32
18185+ . = ALIGN(PAGE_SIZE);
18186+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
18187+ *(.idt)
18188+ . = ALIGN(PAGE_SIZE);
18189+ *(.empty_zero_page)
18190+ *(.swapper_pg_fixmap)
18191+ *(.swapper_pg_pmd)
18192+ *(.swapper_pg_dir)
18193+ *(.trampoline_pg_dir)
18194+ } :rodata
18195+#endif
18196+
18197+ . = ALIGN(PAGE_SIZE);
18198+ NOTES :rodata :note
18199+
18200+ EXCEPTION_TABLE(16) :rodata
18201
18202 RO_DATA(PAGE_SIZE)
18203
18204 /* Data */
18205 .data : AT(ADDR(.data) - LOAD_OFFSET) {
18206+
18207+#ifdef CONFIG_PAX_KERNEXEC
18208+ . = ALIGN(HPAGE_SIZE);
18209+#else
18210+ . = ALIGN(PAGE_SIZE);
18211+#endif
18212+
18213 /* Start of data section */
18214 _sdata = .;
18215
18216 /* init_task */
18217 INIT_TASK_DATA(THREAD_SIZE)
18218
18219-#ifdef CONFIG_X86_32
18220- /* 32 bit has nosave before _edata */
18221 NOSAVE_DATA
18222-#endif
18223
18224 PAGE_ALIGNED_DATA(PAGE_SIZE)
18225
18226@@ -112,6 +175,8 @@ SECTIONS
18227 DATA_DATA
18228 CONSTRUCTORS
18229
18230+ jiffies = jiffies_64;
18231+
18232 /* rarely changed data like cpu maps */
18233 READ_MOSTLY_DATA(CONFIG_X86_INTERNODE_CACHE_BYTES)
18234
18235@@ -166,12 +231,6 @@ SECTIONS
18236 }
18237 vgetcpu_mode = VVIRT(.vgetcpu_mode);
18238
18239- . = ALIGN(CONFIG_X86_L1_CACHE_BYTES);
18240- .jiffies : AT(VLOAD(.jiffies)) {
18241- *(.jiffies)
18242- }
18243- jiffies = VVIRT(.jiffies);
18244-
18245 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) {
18246 *(.vsyscall_3)
18247 }
18248@@ -187,12 +246,19 @@ SECTIONS
18249 #endif /* CONFIG_X86_64 */
18250
18251 /* Init code and data - will be freed after init */
18252- . = ALIGN(PAGE_SIZE);
18253 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
18254+ BYTE(0)
18255+
18256+#ifdef CONFIG_PAX_KERNEXEC
18257+ . = ALIGN(HPAGE_SIZE);
18258+#else
18259+ . = ALIGN(PAGE_SIZE);
18260+#endif
18261+
18262 __init_begin = .; /* paired with __init_end */
18263- }
18264+ } :init.begin
18265
18266-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
18267+#ifdef CONFIG_SMP
18268 /*
18269 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
18270 * output PHDR, so the next output section - .init.text - should
18271@@ -201,12 +267,27 @@ SECTIONS
18272 PERCPU_VADDR(0, :percpu)
18273 #endif
18274
18275- INIT_TEXT_SECTION(PAGE_SIZE)
18276-#ifdef CONFIG_X86_64
18277- :init
18278-#endif
18279+ . = ALIGN(PAGE_SIZE);
18280+ init_begin = .;
18281+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
18282+ VMLINUX_SYMBOL(_sinittext) = .;
18283+ INIT_TEXT
18284+ VMLINUX_SYMBOL(_einittext) = .;
18285+ . = ALIGN(PAGE_SIZE);
18286+ } :text.init
18287
18288- INIT_DATA_SECTION(16)
18289+ /*
18290+ * .exit.text is discard at runtime, not link time, to deal with
18291+ * references from .altinstructions and .eh_frame
18292+ */
18293+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
18294+ EXIT_TEXT
18295+ . = ALIGN(16);
18296+ } :text.exit
18297+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
18298+
18299+ . = ALIGN(PAGE_SIZE);
18300+ INIT_DATA_SECTION(16) :init
18301
18302 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
18303 __x86_cpu_dev_start = .;
18304@@ -232,19 +313,11 @@ SECTIONS
18305 *(.altinstr_replacement)
18306 }
18307
18308- /*
18309- * .exit.text is discard at runtime, not link time, to deal with
18310- * references from .altinstructions and .eh_frame
18311- */
18312- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
18313- EXIT_TEXT
18314- }
18315-
18316 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
18317 EXIT_DATA
18318 }
18319
18320-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
18321+#ifndef CONFIG_SMP
18322 PERCPU(PAGE_SIZE)
18323 #endif
18324
18325@@ -267,12 +340,6 @@ SECTIONS
18326 . = ALIGN(PAGE_SIZE);
18327 }
18328
18329-#ifdef CONFIG_X86_64
18330- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
18331- NOSAVE_DATA
18332- }
18333-#endif
18334-
18335 /* BSS */
18336 . = ALIGN(PAGE_SIZE);
18337 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
18338@@ -288,6 +355,7 @@ SECTIONS
18339 __brk_base = .;
18340 . += 64 * 1024; /* 64k alignment slop space */
18341 *(.brk_reservation) /* areas brk users have reserved */
18342+ . = ALIGN(HPAGE_SIZE);
18343 __brk_limit = .;
18344 }
18345
18346@@ -316,13 +384,12 @@ SECTIONS
18347 * for the boot processor.
18348 */
18349 #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load
18350-INIT_PER_CPU(gdt_page);
18351 INIT_PER_CPU(irq_stack_union);
18352
18353 /*
18354 * Build-time check on the image size:
18355 */
18356-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
18357+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
18358 "kernel image bigger than KERNEL_IMAGE_SIZE");
18359
18360 #ifdef CONFIG_SMP
18361diff -urNp linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c
18362--- linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c 2011-03-27 14:31:47.000000000 -0400
18363+++ linux-2.6.32.43/arch/x86/kernel/vsyscall_64.c 2011-04-23 12:56:10.000000000 -0400
18364@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa
18365
18366 write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags);
18367 /* copy vsyscall data */
18368+ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name);
18369 vsyscall_gtod_data.clock.vread = clock->vread;
18370 vsyscall_gtod_data.clock.cycle_last = clock->cycle_last;
18371 vsyscall_gtod_data.clock.mask = clock->mask;
18372@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s
18373 We do this here because otherwise user space would do it on
18374 its own in a likely inferior way (no access to jiffies).
18375 If you don't like it pass NULL. */
18376- if (tcache && tcache->blob[0] == (j = __jiffies)) {
18377+ if (tcache && tcache->blob[0] == (j = jiffies)) {
18378 p = tcache->blob[1];
18379 } else if (__vgetcpu_mode == VGETCPU_RDTSCP) {
18380 /* Load per CPU data from RDTSCP */
18381diff -urNp linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c
18382--- linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c 2011-03-27 14:31:47.000000000 -0400
18383+++ linux-2.6.32.43/arch/x86/kernel/x8664_ksyms_64.c 2011-04-17 15:56:46.000000000 -0400
18384@@ -30,8 +30,6 @@ EXPORT_SYMBOL(__put_user_8);
18385
18386 EXPORT_SYMBOL(copy_user_generic);
18387 EXPORT_SYMBOL(__copy_user_nocache);
18388-EXPORT_SYMBOL(copy_from_user);
18389-EXPORT_SYMBOL(copy_to_user);
18390 EXPORT_SYMBOL(__copy_from_user_inatomic);
18391
18392 EXPORT_SYMBOL(copy_page);
18393diff -urNp linux-2.6.32.43/arch/x86/kernel/xsave.c linux-2.6.32.43/arch/x86/kernel/xsave.c
18394--- linux-2.6.32.43/arch/x86/kernel/xsave.c 2011-03-27 14:31:47.000000000 -0400
18395+++ linux-2.6.32.43/arch/x86/kernel/xsave.c 2011-04-17 15:56:46.000000000 -0400
18396@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_
18397 fx_sw_user->xstate_size > fx_sw_user->extended_size)
18398 return -1;
18399
18400- err = __get_user(magic2, (__u32 *) (((void *)fpstate) +
18401+ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) +
18402 fx_sw_user->extended_size -
18403 FP_XSTATE_MAGIC2_SIZE));
18404 /*
18405@@ -196,7 +196,7 @@ fx_only:
18406 * the other extended state.
18407 */
18408 xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE);
18409- return fxrstor_checking((__force struct i387_fxsave_struct *)buf);
18410+ return fxrstor_checking((struct i387_fxsave_struct __user *)buf);
18411 }
18412
18413 /*
18414@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf
18415 if (task_thread_info(tsk)->status & TS_XSAVE)
18416 err = restore_user_xstate(buf);
18417 else
18418- err = fxrstor_checking((__force struct i387_fxsave_struct *)
18419+ err = fxrstor_checking((struct i387_fxsave_struct __user *)
18420 buf);
18421 if (unlikely(err)) {
18422 /*
18423diff -urNp linux-2.6.32.43/arch/x86/kvm/emulate.c linux-2.6.32.43/arch/x86/kvm/emulate.c
18424--- linux-2.6.32.43/arch/x86/kvm/emulate.c 2011-03-27 14:31:47.000000000 -0400
18425+++ linux-2.6.32.43/arch/x86/kvm/emulate.c 2011-04-17 15:56:46.000000000 -0400
18426@@ -81,8 +81,8 @@
18427 #define Src2CL (1<<29)
18428 #define Src2ImmByte (2<<29)
18429 #define Src2One (3<<29)
18430-#define Src2Imm16 (4<<29)
18431-#define Src2Mask (7<<29)
18432+#define Src2Imm16 (4U<<29)
18433+#define Src2Mask (7U<<29)
18434
18435 enum {
18436 Group1_80, Group1_81, Group1_82, Group1_83,
18437@@ -411,6 +411,7 @@ static u32 group2_table[] = {
18438
18439 #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \
18440 do { \
18441+ unsigned long _tmp; \
18442 __asm__ __volatile__ ( \
18443 _PRE_EFLAGS("0", "4", "2") \
18444 _op _suffix " %"_x"3,%1; " \
18445@@ -424,8 +425,6 @@ static u32 group2_table[] = {
18446 /* Raw emulation: instruction has two explicit operands. */
18447 #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \
18448 do { \
18449- unsigned long _tmp; \
18450- \
18451 switch ((_dst).bytes) { \
18452 case 2: \
18453 ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \
18454@@ -441,7 +440,6 @@ static u32 group2_table[] = {
18455
18456 #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
18457 do { \
18458- unsigned long _tmp; \
18459 switch ((_dst).bytes) { \
18460 case 1: \
18461 ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \
18462diff -urNp linux-2.6.32.43/arch/x86/kvm/lapic.c linux-2.6.32.43/arch/x86/kvm/lapic.c
18463--- linux-2.6.32.43/arch/x86/kvm/lapic.c 2011-03-27 14:31:47.000000000 -0400
18464+++ linux-2.6.32.43/arch/x86/kvm/lapic.c 2011-04-17 15:56:46.000000000 -0400
18465@@ -52,7 +52,7 @@
18466 #define APIC_BUS_CYCLE_NS 1
18467
18468 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
18469-#define apic_debug(fmt, arg...)
18470+#define apic_debug(fmt, arg...) do {} while (0)
18471
18472 #define APIC_LVT_NUM 6
18473 /* 14 is the version for Xeon and Pentium 8.4.8*/
18474diff -urNp linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h
18475--- linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h 2011-03-27 14:31:47.000000000 -0400
18476+++ linux-2.6.32.43/arch/x86/kvm/paging_tmpl.h 2011-05-16 21:46:57.000000000 -0400
18477@@ -416,6 +416,8 @@ static int FNAME(page_fault)(struct kvm_
18478 int level = PT_PAGE_TABLE_LEVEL;
18479 unsigned long mmu_seq;
18480
18481+ pax_track_stack();
18482+
18483 pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
18484 kvm_mmu_audit(vcpu, "pre page fault");
18485
18486diff -urNp linux-2.6.32.43/arch/x86/kvm/svm.c linux-2.6.32.43/arch/x86/kvm/svm.c
18487--- linux-2.6.32.43/arch/x86/kvm/svm.c 2011-03-27 14:31:47.000000000 -0400
18488+++ linux-2.6.32.43/arch/x86/kvm/svm.c 2011-08-05 20:33:55.000000000 -0400
18489@@ -2485,7 +2485,11 @@ static void reload_tss(struct kvm_vcpu *
18490 int cpu = raw_smp_processor_id();
18491
18492 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
18493+
18494+ pax_open_kernel();
18495 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
18496+ pax_close_kernel();
18497+
18498 load_TR_desc();
18499 }
18500
18501@@ -2946,7 +2950,7 @@ static bool svm_gb_page_enable(void)
18502 return true;
18503 }
18504
18505-static struct kvm_x86_ops svm_x86_ops = {
18506+static const struct kvm_x86_ops svm_x86_ops = {
18507 .cpu_has_kvm_support = has_svm,
18508 .disabled_by_bios = is_disabled,
18509 .hardware_setup = svm_hardware_setup,
18510diff -urNp linux-2.6.32.43/arch/x86/kvm/vmx.c linux-2.6.32.43/arch/x86/kvm/vmx.c
18511--- linux-2.6.32.43/arch/x86/kvm/vmx.c 2011-03-27 14:31:47.000000000 -0400
18512+++ linux-2.6.32.43/arch/x86/kvm/vmx.c 2011-05-04 17:56:20.000000000 -0400
18513@@ -570,7 +570,11 @@ static void reload_tss(void)
18514
18515 kvm_get_gdt(&gdt);
18516 descs = (void *)gdt.base;
18517+
18518+ pax_open_kernel();
18519 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
18520+ pax_close_kernel();
18521+
18522 load_TR_desc();
18523 }
18524
18525@@ -1409,8 +1413,11 @@ static __init int hardware_setup(void)
18526 if (!cpu_has_vmx_flexpriority())
18527 flexpriority_enabled = 0;
18528
18529- if (!cpu_has_vmx_tpr_shadow())
18530- kvm_x86_ops->update_cr8_intercept = NULL;
18531+ if (!cpu_has_vmx_tpr_shadow()) {
18532+ pax_open_kernel();
18533+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
18534+ pax_close_kernel();
18535+ }
18536
18537 if (enable_ept && !cpu_has_vmx_ept_2m_page())
18538 kvm_disable_largepages();
18539@@ -2361,7 +2368,7 @@ static int vmx_vcpu_setup(struct vcpu_vm
18540 vmcs_writel(HOST_IDTR_BASE, dt.base); /* 22.2.4 */
18541
18542 asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return));
18543- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */
18544+ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */
18545 vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0);
18546 vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0);
18547 vmcs_write32(VM_ENTRY_MSR_LOAD_COUNT, 0);
18548@@ -3717,6 +3724,12 @@ static void vmx_vcpu_run(struct kvm_vcpu
18549 "jmp .Lkvm_vmx_return \n\t"
18550 ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t"
18551 ".Lkvm_vmx_return: "
18552+
18553+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18554+ "ljmp %[cs],$.Lkvm_vmx_return2\n\t"
18555+ ".Lkvm_vmx_return2: "
18556+#endif
18557+
18558 /* Save guest registers, load host registers, keep flags */
18559 "xchg %0, (%%"R"sp) \n\t"
18560 "mov %%"R"ax, %c[rax](%0) \n\t"
18561@@ -3763,8 +3776,13 @@ static void vmx_vcpu_run(struct kvm_vcpu
18562 [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])),
18563 #endif
18564 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2))
18565+
18566+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18567+ ,[cs]"i"(__KERNEL_CS)
18568+#endif
18569+
18570 : "cc", "memory"
18571- , R"bx", R"di", R"si"
18572+ , R"ax", R"bx", R"di", R"si"
18573 #ifdef CONFIG_X86_64
18574 , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
18575 #endif
18576@@ -3781,7 +3799,16 @@ static void vmx_vcpu_run(struct kvm_vcpu
18577 if (vmx->rmode.irq.pending)
18578 fixup_rmode_irq(vmx);
18579
18580- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
18581+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r"(__KERNEL_DS));
18582+
18583+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
18584+ loadsegment(fs, __KERNEL_PERCPU);
18585+#endif
18586+
18587+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
18588+ __set_fs(current_thread_info()->addr_limit);
18589+#endif
18590+
18591 vmx->launched = 1;
18592
18593 vmx_complete_interrupts(vmx);
18594@@ -3956,7 +3983,7 @@ static bool vmx_gb_page_enable(void)
18595 return false;
18596 }
18597
18598-static struct kvm_x86_ops vmx_x86_ops = {
18599+static const struct kvm_x86_ops vmx_x86_ops = {
18600 .cpu_has_kvm_support = cpu_has_kvm_support,
18601 .disabled_by_bios = vmx_disabled_by_bios,
18602 .hardware_setup = hardware_setup,
18603diff -urNp linux-2.6.32.43/arch/x86/kvm/x86.c linux-2.6.32.43/arch/x86/kvm/x86.c
18604--- linux-2.6.32.43/arch/x86/kvm/x86.c 2011-05-10 22:12:01.000000000 -0400
18605+++ linux-2.6.32.43/arch/x86/kvm/x86.c 2011-05-10 22:12:26.000000000 -0400
18606@@ -82,7 +82,7 @@ static void update_cr8_intercept(struct
18607 static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid,
18608 struct kvm_cpuid_entry2 __user *entries);
18609
18610-struct kvm_x86_ops *kvm_x86_ops;
18611+const struct kvm_x86_ops *kvm_x86_ops;
18612 EXPORT_SYMBOL_GPL(kvm_x86_ops);
18613
18614 int ignore_msrs = 0;
18615@@ -1430,15 +1430,20 @@ static int kvm_vcpu_ioctl_set_cpuid2(str
18616 struct kvm_cpuid2 *cpuid,
18617 struct kvm_cpuid_entry2 __user *entries)
18618 {
18619- int r;
18620+ int r, i;
18621
18622 r = -E2BIG;
18623 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
18624 goto out;
18625 r = -EFAULT;
18626- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
18627- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18628+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
18629 goto out;
18630+ for (i = 0; i < cpuid->nent; ++i) {
18631+ struct kvm_cpuid_entry2 cpuid_entry;
18632+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
18633+ goto out;
18634+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
18635+ }
18636 vcpu->arch.cpuid_nent = cpuid->nent;
18637 kvm_apic_set_version(vcpu);
18638 return 0;
18639@@ -1451,16 +1456,20 @@ static int kvm_vcpu_ioctl_get_cpuid2(str
18640 struct kvm_cpuid2 *cpuid,
18641 struct kvm_cpuid_entry2 __user *entries)
18642 {
18643- int r;
18644+ int r, i;
18645
18646 vcpu_load(vcpu);
18647 r = -E2BIG;
18648 if (cpuid->nent < vcpu->arch.cpuid_nent)
18649 goto out;
18650 r = -EFAULT;
18651- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
18652- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18653+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
18654 goto out;
18655+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
18656+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
18657+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
18658+ goto out;
18659+ }
18660 return 0;
18661
18662 out:
18663@@ -1678,7 +1687,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru
18664 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
18665 struct kvm_interrupt *irq)
18666 {
18667- if (irq->irq < 0 || irq->irq >= 256)
18668+ if (irq->irq >= 256)
18669 return -EINVAL;
18670 if (irqchip_in_kernel(vcpu->kvm))
18671 return -ENXIO;
18672@@ -3260,10 +3269,10 @@ static struct notifier_block kvmclock_cp
18673 .notifier_call = kvmclock_cpufreq_notifier
18674 };
18675
18676-int kvm_arch_init(void *opaque)
18677+int kvm_arch_init(const void *opaque)
18678 {
18679 int r, cpu;
18680- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
18681+ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque;
18682
18683 if (kvm_x86_ops) {
18684 printk(KERN_ERR "kvm: already loaded the other module\n");
18685diff -urNp linux-2.6.32.43/arch/x86/lguest/boot.c linux-2.6.32.43/arch/x86/lguest/boot.c
18686--- linux-2.6.32.43/arch/x86/lguest/boot.c 2011-03-27 14:31:47.000000000 -0400
18687+++ linux-2.6.32.43/arch/x86/lguest/boot.c 2011-08-05 20:33:55.000000000 -0400
18688@@ -1172,9 +1172,10 @@ static __init int early_put_chars(u32 vt
18689 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
18690 * Launcher to reboot us.
18691 */
18692-static void lguest_restart(char *reason)
18693+static __noreturn void lguest_restart(char *reason)
18694 {
18695 kvm_hypercall2(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART);
18696+ BUG();
18697 }
18698
18699 /*G:050
18700diff -urNp linux-2.6.32.43/arch/x86/lib/atomic64_32.c linux-2.6.32.43/arch/x86/lib/atomic64_32.c
18701--- linux-2.6.32.43/arch/x86/lib/atomic64_32.c 2011-03-27 14:31:47.000000000 -0400
18702+++ linux-2.6.32.43/arch/x86/lib/atomic64_32.c 2011-05-04 17:56:28.000000000 -0400
18703@@ -25,6 +25,12 @@ u64 atomic64_cmpxchg(atomic64_t *ptr, u6
18704 }
18705 EXPORT_SYMBOL(atomic64_cmpxchg);
18706
18707+u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old_val, u64 new_val)
18708+{
18709+ return cmpxchg8b(&ptr->counter, old_val, new_val);
18710+}
18711+EXPORT_SYMBOL(atomic64_cmpxchg_unchecked);
18712+
18713 /**
18714 * atomic64_xchg - xchg atomic64 variable
18715 * @ptr: pointer to type atomic64_t
18716@@ -56,6 +62,36 @@ u64 atomic64_xchg(atomic64_t *ptr, u64 n
18717 EXPORT_SYMBOL(atomic64_xchg);
18718
18719 /**
18720+ * atomic64_xchg_unchecked - xchg atomic64 variable
18721+ * @ptr: pointer to type atomic64_unchecked_t
18722+ * @new_val: value to assign
18723+ *
18724+ * Atomically xchgs the value of @ptr to @new_val and returns
18725+ * the old value.
18726+ */
18727+u64 atomic64_xchg_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18728+{
18729+ /*
18730+ * Try first with a (possibly incorrect) assumption about
18731+ * what we have there. We'll do two loops most likely,
18732+ * but we'll get an ownership MESI transaction straight away
18733+ * instead of a read transaction followed by a
18734+ * flush-for-ownership transaction:
18735+ */
18736+ u64 old_val, real_val = 0;
18737+
18738+ do {
18739+ old_val = real_val;
18740+
18741+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18742+
18743+ } while (real_val != old_val);
18744+
18745+ return old_val;
18746+}
18747+EXPORT_SYMBOL(atomic64_xchg_unchecked);
18748+
18749+/**
18750 * atomic64_set - set atomic64 variable
18751 * @ptr: pointer to type atomic64_t
18752 * @new_val: value to assign
18753@@ -69,7 +105,19 @@ void atomic64_set(atomic64_t *ptr, u64 n
18754 EXPORT_SYMBOL(atomic64_set);
18755
18756 /**
18757-EXPORT_SYMBOL(atomic64_read);
18758+ * atomic64_unchecked_set - set atomic64 variable
18759+ * @ptr: pointer to type atomic64_unchecked_t
18760+ * @new_val: value to assign
18761+ *
18762+ * Atomically sets the value of @ptr to @new_val.
18763+ */
18764+void atomic64_set_unchecked(atomic64_unchecked_t *ptr, u64 new_val)
18765+{
18766+ atomic64_xchg_unchecked(ptr, new_val);
18767+}
18768+EXPORT_SYMBOL(atomic64_set_unchecked);
18769+
18770+/**
18771 * atomic64_add_return - add and return
18772 * @delta: integer value to add
18773 * @ptr: pointer to type atomic64_t
18774@@ -99,24 +147,72 @@ noinline u64 atomic64_add_return(u64 del
18775 }
18776 EXPORT_SYMBOL(atomic64_add_return);
18777
18778+/**
18779+ * atomic64_add_return_unchecked - add and return
18780+ * @delta: integer value to add
18781+ * @ptr: pointer to type atomic64_unchecked_t
18782+ *
18783+ * Atomically adds @delta to @ptr and returns @delta + *@ptr
18784+ */
18785+noinline u64 atomic64_add_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18786+{
18787+ /*
18788+ * Try first with a (possibly incorrect) assumption about
18789+ * what we have there. We'll do two loops most likely,
18790+ * but we'll get an ownership MESI transaction straight away
18791+ * instead of a read transaction followed by a
18792+ * flush-for-ownership transaction:
18793+ */
18794+ u64 old_val, new_val, real_val = 0;
18795+
18796+ do {
18797+ old_val = real_val;
18798+ new_val = old_val + delta;
18799+
18800+ real_val = atomic64_cmpxchg_unchecked(ptr, old_val, new_val);
18801+
18802+ } while (real_val != old_val);
18803+
18804+ return new_val;
18805+}
18806+EXPORT_SYMBOL(atomic64_add_return_unchecked);
18807+
18808 u64 atomic64_sub_return(u64 delta, atomic64_t *ptr)
18809 {
18810 return atomic64_add_return(-delta, ptr);
18811 }
18812 EXPORT_SYMBOL(atomic64_sub_return);
18813
18814+u64 atomic64_sub_return_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18815+{
18816+ return atomic64_add_return_unchecked(-delta, ptr);
18817+}
18818+EXPORT_SYMBOL(atomic64_sub_return_unchecked);
18819+
18820 u64 atomic64_inc_return(atomic64_t *ptr)
18821 {
18822 return atomic64_add_return(1, ptr);
18823 }
18824 EXPORT_SYMBOL(atomic64_inc_return);
18825
18826+u64 atomic64_inc_return_unchecked(atomic64_unchecked_t *ptr)
18827+{
18828+ return atomic64_add_return_unchecked(1, ptr);
18829+}
18830+EXPORT_SYMBOL(atomic64_inc_return_unchecked);
18831+
18832 u64 atomic64_dec_return(atomic64_t *ptr)
18833 {
18834 return atomic64_sub_return(1, ptr);
18835 }
18836 EXPORT_SYMBOL(atomic64_dec_return);
18837
18838+u64 atomic64_dec_return_unchecked(atomic64_unchecked_t *ptr)
18839+{
18840+ return atomic64_sub_return_unchecked(1, ptr);
18841+}
18842+EXPORT_SYMBOL(atomic64_dec_return_unchecked);
18843+
18844 /**
18845 * atomic64_add - add integer to atomic64 variable
18846 * @delta: integer value to add
18847@@ -131,6 +227,19 @@ void atomic64_add(u64 delta, atomic64_t
18848 EXPORT_SYMBOL(atomic64_add);
18849
18850 /**
18851+ * atomic64_add_unchecked - add integer to atomic64 variable
18852+ * @delta: integer value to add
18853+ * @ptr: pointer to type atomic64_unchecked_t
18854+ *
18855+ * Atomically adds @delta to @ptr.
18856+ */
18857+void atomic64_add_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18858+{
18859+ atomic64_add_return_unchecked(delta, ptr);
18860+}
18861+EXPORT_SYMBOL(atomic64_add_unchecked);
18862+
18863+/**
18864 * atomic64_sub - subtract the atomic64 variable
18865 * @delta: integer value to subtract
18866 * @ptr: pointer to type atomic64_t
18867@@ -144,6 +253,19 @@ void atomic64_sub(u64 delta, atomic64_t
18868 EXPORT_SYMBOL(atomic64_sub);
18869
18870 /**
18871+ * atomic64_sub_unchecked - subtract the atomic64 variable
18872+ * @delta: integer value to subtract
18873+ * @ptr: pointer to type atomic64_unchecked_t
18874+ *
18875+ * Atomically subtracts @delta from @ptr.
18876+ */
18877+void atomic64_sub_unchecked(u64 delta, atomic64_unchecked_t *ptr)
18878+{
18879+ atomic64_add_unchecked(-delta, ptr);
18880+}
18881+EXPORT_SYMBOL(atomic64_sub_unchecked);
18882+
18883+/**
18884 * atomic64_sub_and_test - subtract value from variable and test result
18885 * @delta: integer value to subtract
18886 * @ptr: pointer to type atomic64_t
18887@@ -173,6 +295,18 @@ void atomic64_inc(atomic64_t *ptr)
18888 EXPORT_SYMBOL(atomic64_inc);
18889
18890 /**
18891+ * atomic64_inc_unchecked - increment atomic64 variable
18892+ * @ptr: pointer to type atomic64_unchecked_t
18893+ *
18894+ * Atomically increments @ptr by 1.
18895+ */
18896+void atomic64_inc_unchecked(atomic64_unchecked_t *ptr)
18897+{
18898+ atomic64_add_unchecked(1, ptr);
18899+}
18900+EXPORT_SYMBOL(atomic64_inc_unchecked);
18901+
18902+/**
18903 * atomic64_dec - decrement atomic64 variable
18904 * @ptr: pointer to type atomic64_t
18905 *
18906@@ -185,6 +319,18 @@ void atomic64_dec(atomic64_t *ptr)
18907 EXPORT_SYMBOL(atomic64_dec);
18908
18909 /**
18910+ * atomic64_dec_unchecked - decrement atomic64 variable
18911+ * @ptr: pointer to type atomic64_unchecked_t
18912+ *
18913+ * Atomically decrements @ptr by 1.
18914+ */
18915+void atomic64_dec_unchecked(atomic64_unchecked_t *ptr)
18916+{
18917+ atomic64_sub_unchecked(1, ptr);
18918+}
18919+EXPORT_SYMBOL(atomic64_dec_unchecked);
18920+
18921+/**
18922 * atomic64_dec_and_test - decrement and test
18923 * @ptr: pointer to type atomic64_t
18924 *
18925diff -urNp linux-2.6.32.43/arch/x86/lib/checksum_32.S linux-2.6.32.43/arch/x86/lib/checksum_32.S
18926--- linux-2.6.32.43/arch/x86/lib/checksum_32.S 2011-03-27 14:31:47.000000000 -0400
18927+++ linux-2.6.32.43/arch/x86/lib/checksum_32.S 2011-04-17 15:56:46.000000000 -0400
18928@@ -28,7 +28,8 @@
18929 #include <linux/linkage.h>
18930 #include <asm/dwarf2.h>
18931 #include <asm/errno.h>
18932-
18933+#include <asm/segment.h>
18934+
18935 /*
18936 * computes a partial checksum, e.g. for TCP/UDP fragments
18937 */
18938@@ -304,9 +305,28 @@ unsigned int csum_partial_copy_generic (
18939
18940 #define ARGBASE 16
18941 #define FP 12
18942-
18943-ENTRY(csum_partial_copy_generic)
18944+
18945+ENTRY(csum_partial_copy_generic_to_user)
18946 CFI_STARTPROC
18947+
18948+#ifdef CONFIG_PAX_MEMORY_UDEREF
18949+ pushl %gs
18950+ CFI_ADJUST_CFA_OFFSET 4
18951+ popl %es
18952+ CFI_ADJUST_CFA_OFFSET -4
18953+ jmp csum_partial_copy_generic
18954+#endif
18955+
18956+ENTRY(csum_partial_copy_generic_from_user)
18957+
18958+#ifdef CONFIG_PAX_MEMORY_UDEREF
18959+ pushl %gs
18960+ CFI_ADJUST_CFA_OFFSET 4
18961+ popl %ds
18962+ CFI_ADJUST_CFA_OFFSET -4
18963+#endif
18964+
18965+ENTRY(csum_partial_copy_generic)
18966 subl $4,%esp
18967 CFI_ADJUST_CFA_OFFSET 4
18968 pushl %edi
18969@@ -331,7 +351,7 @@ ENTRY(csum_partial_copy_generic)
18970 jmp 4f
18971 SRC(1: movw (%esi), %bx )
18972 addl $2, %esi
18973-DST( movw %bx, (%edi) )
18974+DST( movw %bx, %es:(%edi) )
18975 addl $2, %edi
18976 addw %bx, %ax
18977 adcl $0, %eax
18978@@ -343,30 +363,30 @@ DST( movw %bx, (%edi) )
18979 SRC(1: movl (%esi), %ebx )
18980 SRC( movl 4(%esi), %edx )
18981 adcl %ebx, %eax
18982-DST( movl %ebx, (%edi) )
18983+DST( movl %ebx, %es:(%edi) )
18984 adcl %edx, %eax
18985-DST( movl %edx, 4(%edi) )
18986+DST( movl %edx, %es:4(%edi) )
18987
18988 SRC( movl 8(%esi), %ebx )
18989 SRC( movl 12(%esi), %edx )
18990 adcl %ebx, %eax
18991-DST( movl %ebx, 8(%edi) )
18992+DST( movl %ebx, %es:8(%edi) )
18993 adcl %edx, %eax
18994-DST( movl %edx, 12(%edi) )
18995+DST( movl %edx, %es:12(%edi) )
18996
18997 SRC( movl 16(%esi), %ebx )
18998 SRC( movl 20(%esi), %edx )
18999 adcl %ebx, %eax
19000-DST( movl %ebx, 16(%edi) )
19001+DST( movl %ebx, %es:16(%edi) )
19002 adcl %edx, %eax
19003-DST( movl %edx, 20(%edi) )
19004+DST( movl %edx, %es:20(%edi) )
19005
19006 SRC( movl 24(%esi), %ebx )
19007 SRC( movl 28(%esi), %edx )
19008 adcl %ebx, %eax
19009-DST( movl %ebx, 24(%edi) )
19010+DST( movl %ebx, %es:24(%edi) )
19011 adcl %edx, %eax
19012-DST( movl %edx, 28(%edi) )
19013+DST( movl %edx, %es:28(%edi) )
19014
19015 lea 32(%esi), %esi
19016 lea 32(%edi), %edi
19017@@ -380,7 +400,7 @@ DST( movl %edx, 28(%edi) )
19018 shrl $2, %edx # This clears CF
19019 SRC(3: movl (%esi), %ebx )
19020 adcl %ebx, %eax
19021-DST( movl %ebx, (%edi) )
19022+DST( movl %ebx, %es:(%edi) )
19023 lea 4(%esi), %esi
19024 lea 4(%edi), %edi
19025 dec %edx
19026@@ -392,12 +412,12 @@ DST( movl %ebx, (%edi) )
19027 jb 5f
19028 SRC( movw (%esi), %cx )
19029 leal 2(%esi), %esi
19030-DST( movw %cx, (%edi) )
19031+DST( movw %cx, %es:(%edi) )
19032 leal 2(%edi), %edi
19033 je 6f
19034 shll $16,%ecx
19035 SRC(5: movb (%esi), %cl )
19036-DST( movb %cl, (%edi) )
19037+DST( movb %cl, %es:(%edi) )
19038 6: addl %ecx, %eax
19039 adcl $0, %eax
19040 7:
19041@@ -408,7 +428,7 @@ DST( movb %cl, (%edi) )
19042
19043 6001:
19044 movl ARGBASE+20(%esp), %ebx # src_err_ptr
19045- movl $-EFAULT, (%ebx)
19046+ movl $-EFAULT, %ss:(%ebx)
19047
19048 # zero the complete destination - computing the rest
19049 # is too much work
19050@@ -421,11 +441,19 @@ DST( movb %cl, (%edi) )
19051
19052 6002:
19053 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19054- movl $-EFAULT,(%ebx)
19055+ movl $-EFAULT,%ss:(%ebx)
19056 jmp 5000b
19057
19058 .previous
19059
19060+ pushl %ss
19061+ CFI_ADJUST_CFA_OFFSET 4
19062+ popl %ds
19063+ CFI_ADJUST_CFA_OFFSET -4
19064+ pushl %ss
19065+ CFI_ADJUST_CFA_OFFSET 4
19066+ popl %es
19067+ CFI_ADJUST_CFA_OFFSET -4
19068 popl %ebx
19069 CFI_ADJUST_CFA_OFFSET -4
19070 CFI_RESTORE ebx
19071@@ -439,26 +467,47 @@ DST( movb %cl, (%edi) )
19072 CFI_ADJUST_CFA_OFFSET -4
19073 ret
19074 CFI_ENDPROC
19075-ENDPROC(csum_partial_copy_generic)
19076+ENDPROC(csum_partial_copy_generic_to_user)
19077
19078 #else
19079
19080 /* Version for PentiumII/PPro */
19081
19082 #define ROUND1(x) \
19083+ nop; nop; nop; \
19084 SRC(movl x(%esi), %ebx ) ; \
19085 addl %ebx, %eax ; \
19086- DST(movl %ebx, x(%edi) ) ;
19087+ DST(movl %ebx, %es:x(%edi)) ;
19088
19089 #define ROUND(x) \
19090+ nop; nop; nop; \
19091 SRC(movl x(%esi), %ebx ) ; \
19092 adcl %ebx, %eax ; \
19093- DST(movl %ebx, x(%edi) ) ;
19094+ DST(movl %ebx, %es:x(%edi)) ;
19095
19096 #define ARGBASE 12
19097-
19098-ENTRY(csum_partial_copy_generic)
19099+
19100+ENTRY(csum_partial_copy_generic_to_user)
19101 CFI_STARTPROC
19102+
19103+#ifdef CONFIG_PAX_MEMORY_UDEREF
19104+ pushl %gs
19105+ CFI_ADJUST_CFA_OFFSET 4
19106+ popl %es
19107+ CFI_ADJUST_CFA_OFFSET -4
19108+ jmp csum_partial_copy_generic
19109+#endif
19110+
19111+ENTRY(csum_partial_copy_generic_from_user)
19112+
19113+#ifdef CONFIG_PAX_MEMORY_UDEREF
19114+ pushl %gs
19115+ CFI_ADJUST_CFA_OFFSET 4
19116+ popl %ds
19117+ CFI_ADJUST_CFA_OFFSET -4
19118+#endif
19119+
19120+ENTRY(csum_partial_copy_generic)
19121 pushl %ebx
19122 CFI_ADJUST_CFA_OFFSET 4
19123 CFI_REL_OFFSET ebx, 0
19124@@ -482,7 +531,7 @@ ENTRY(csum_partial_copy_generic)
19125 subl %ebx, %edi
19126 lea -1(%esi),%edx
19127 andl $-32,%edx
19128- lea 3f(%ebx,%ebx), %ebx
19129+ lea 3f(%ebx,%ebx,2), %ebx
19130 testl %esi, %esi
19131 jmp *%ebx
19132 1: addl $64,%esi
19133@@ -503,19 +552,19 @@ ENTRY(csum_partial_copy_generic)
19134 jb 5f
19135 SRC( movw (%esi), %dx )
19136 leal 2(%esi), %esi
19137-DST( movw %dx, (%edi) )
19138+DST( movw %dx, %es:(%edi) )
19139 leal 2(%edi), %edi
19140 je 6f
19141 shll $16,%edx
19142 5:
19143 SRC( movb (%esi), %dl )
19144-DST( movb %dl, (%edi) )
19145+DST( movb %dl, %es:(%edi) )
19146 6: addl %edx, %eax
19147 adcl $0, %eax
19148 7:
19149 .section .fixup, "ax"
19150 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
19151- movl $-EFAULT, (%ebx)
19152+ movl $-EFAULT, %ss:(%ebx)
19153 # zero the complete destination (computing the rest is too much work)
19154 movl ARGBASE+8(%esp),%edi # dst
19155 movl ARGBASE+12(%esp),%ecx # len
19156@@ -523,10 +572,21 @@ DST( movb %dl, (%edi) )
19157 rep; stosb
19158 jmp 7b
19159 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
19160- movl $-EFAULT, (%ebx)
19161+ movl $-EFAULT, %ss:(%ebx)
19162 jmp 7b
19163 .previous
19164
19165+#ifdef CONFIG_PAX_MEMORY_UDEREF
19166+ pushl %ss
19167+ CFI_ADJUST_CFA_OFFSET 4
19168+ popl %ds
19169+ CFI_ADJUST_CFA_OFFSET -4
19170+ pushl %ss
19171+ CFI_ADJUST_CFA_OFFSET 4
19172+ popl %es
19173+ CFI_ADJUST_CFA_OFFSET -4
19174+#endif
19175+
19176 popl %esi
19177 CFI_ADJUST_CFA_OFFSET -4
19178 CFI_RESTORE esi
19179@@ -538,7 +598,7 @@ DST( movb %dl, (%edi) )
19180 CFI_RESTORE ebx
19181 ret
19182 CFI_ENDPROC
19183-ENDPROC(csum_partial_copy_generic)
19184+ENDPROC(csum_partial_copy_generic_to_user)
19185
19186 #undef ROUND
19187 #undef ROUND1
19188diff -urNp linux-2.6.32.43/arch/x86/lib/clear_page_64.S linux-2.6.32.43/arch/x86/lib/clear_page_64.S
19189--- linux-2.6.32.43/arch/x86/lib/clear_page_64.S 2011-03-27 14:31:47.000000000 -0400
19190+++ linux-2.6.32.43/arch/x86/lib/clear_page_64.S 2011-04-17 15:56:46.000000000 -0400
19191@@ -43,7 +43,7 @@ ENDPROC(clear_page)
19192
19193 #include <asm/cpufeature.h>
19194
19195- .section .altinstr_replacement,"ax"
19196+ .section .altinstr_replacement,"a"
19197 1: .byte 0xeb /* jmp <disp8> */
19198 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
19199 2:
19200diff -urNp linux-2.6.32.43/arch/x86/lib/copy_page_64.S linux-2.6.32.43/arch/x86/lib/copy_page_64.S
19201--- linux-2.6.32.43/arch/x86/lib/copy_page_64.S 2011-03-27 14:31:47.000000000 -0400
19202+++ linux-2.6.32.43/arch/x86/lib/copy_page_64.S 2011-04-17 15:56:46.000000000 -0400
19203@@ -104,7 +104,7 @@ ENDPROC(copy_page)
19204
19205 #include <asm/cpufeature.h>
19206
19207- .section .altinstr_replacement,"ax"
19208+ .section .altinstr_replacement,"a"
19209 1: .byte 0xeb /* jmp <disp8> */
19210 .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */
19211 2:
19212diff -urNp linux-2.6.32.43/arch/x86/lib/copy_user_64.S linux-2.6.32.43/arch/x86/lib/copy_user_64.S
19213--- linux-2.6.32.43/arch/x86/lib/copy_user_64.S 2011-06-25 12:55:34.000000000 -0400
19214+++ linux-2.6.32.43/arch/x86/lib/copy_user_64.S 2011-06-25 12:56:37.000000000 -0400
19215@@ -15,13 +15,14 @@
19216 #include <asm/asm-offsets.h>
19217 #include <asm/thread_info.h>
19218 #include <asm/cpufeature.h>
19219+#include <asm/pgtable.h>
19220
19221 .macro ALTERNATIVE_JUMP feature,orig,alt
19222 0:
19223 .byte 0xe9 /* 32bit jump */
19224 .long \orig-1f /* by default jump to orig */
19225 1:
19226- .section .altinstr_replacement,"ax"
19227+ .section .altinstr_replacement,"a"
19228 2: .byte 0xe9 /* near jump with 32bit immediate */
19229 .long \alt-1b /* offset */ /* or alternatively to alt */
19230 .previous
19231@@ -64,49 +65,19 @@
19232 #endif
19233 .endm
19234
19235-/* Standard copy_to_user with segment limit checking */
19236-ENTRY(copy_to_user)
19237- CFI_STARTPROC
19238- GET_THREAD_INFO(%rax)
19239- movq %rdi,%rcx
19240- addq %rdx,%rcx
19241- jc bad_to_user
19242- cmpq TI_addr_limit(%rax),%rcx
19243- ja bad_to_user
19244- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19245- CFI_ENDPROC
19246-ENDPROC(copy_to_user)
19247-
19248-/* Standard copy_from_user with segment limit checking */
19249-ENTRY(copy_from_user)
19250- CFI_STARTPROC
19251- GET_THREAD_INFO(%rax)
19252- movq %rsi,%rcx
19253- addq %rdx,%rcx
19254- jc bad_from_user
19255- cmpq TI_addr_limit(%rax),%rcx
19256- ja bad_from_user
19257- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19258- CFI_ENDPROC
19259-ENDPROC(copy_from_user)
19260-
19261 ENTRY(copy_user_generic)
19262 CFI_STARTPROC
19263 ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19264 CFI_ENDPROC
19265 ENDPROC(copy_user_generic)
19266
19267-ENTRY(__copy_from_user_inatomic)
19268- CFI_STARTPROC
19269- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string
19270- CFI_ENDPROC
19271-ENDPROC(__copy_from_user_inatomic)
19272-
19273 .section .fixup,"ax"
19274 /* must zero dest */
19275 ENTRY(bad_from_user)
19276 bad_from_user:
19277 CFI_STARTPROC
19278+ testl %edx,%edx
19279+ js bad_to_user
19280 movl %edx,%ecx
19281 xorl %eax,%eax
19282 rep
19283diff -urNp linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S
19284--- linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S 2011-03-27 14:31:47.000000000 -0400
19285+++ linux-2.6.32.43/arch/x86/lib/copy_user_nocache_64.S 2011-04-17 15:56:46.000000000 -0400
19286@@ -14,6 +14,7 @@
19287 #include <asm/current.h>
19288 #include <asm/asm-offsets.h>
19289 #include <asm/thread_info.h>
19290+#include <asm/pgtable.h>
19291
19292 .macro ALIGN_DESTINATION
19293 #ifdef FIX_ALIGNMENT
19294@@ -50,6 +51,15 @@
19295 */
19296 ENTRY(__copy_user_nocache)
19297 CFI_STARTPROC
19298+
19299+#ifdef CONFIG_PAX_MEMORY_UDEREF
19300+ mov $PAX_USER_SHADOW_BASE,%rcx
19301+ cmp %rcx,%rsi
19302+ jae 1f
19303+ add %rcx,%rsi
19304+1:
19305+#endif
19306+
19307 cmpl $8,%edx
19308 jb 20f /* less then 8 bytes, go to byte copy loop */
19309 ALIGN_DESTINATION
19310diff -urNp linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c
19311--- linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c 2011-03-27 14:31:47.000000000 -0400
19312+++ linux-2.6.32.43/arch/x86/lib/csum-wrappers_64.c 2011-05-04 17:56:20.000000000 -0400
19313@@ -52,6 +52,12 @@ csum_partial_copy_from_user(const void _
19314 len -= 2;
19315 }
19316 }
19317+
19318+#ifdef CONFIG_PAX_MEMORY_UDEREF
19319+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
19320+ src += PAX_USER_SHADOW_BASE;
19321+#endif
19322+
19323 isum = csum_partial_copy_generic((__force const void *)src,
19324 dst, len, isum, errp, NULL);
19325 if (unlikely(*errp))
19326@@ -105,6 +111,12 @@ csum_partial_copy_to_user(const void *sr
19327 }
19328
19329 *errp = 0;
19330+
19331+#ifdef CONFIG_PAX_MEMORY_UDEREF
19332+ if ((unsigned long)dst < PAX_USER_SHADOW_BASE)
19333+ dst += PAX_USER_SHADOW_BASE;
19334+#endif
19335+
19336 return csum_partial_copy_generic(src, (void __force *)dst,
19337 len, isum, NULL, errp);
19338 }
19339diff -urNp linux-2.6.32.43/arch/x86/lib/getuser.S linux-2.6.32.43/arch/x86/lib/getuser.S
19340--- linux-2.6.32.43/arch/x86/lib/getuser.S 2011-03-27 14:31:47.000000000 -0400
19341+++ linux-2.6.32.43/arch/x86/lib/getuser.S 2011-04-17 15:56:46.000000000 -0400
19342@@ -33,14 +33,35 @@
19343 #include <asm/asm-offsets.h>
19344 #include <asm/thread_info.h>
19345 #include <asm/asm.h>
19346+#include <asm/segment.h>
19347+#include <asm/pgtable.h>
19348+
19349+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19350+#define __copyuser_seg gs;
19351+#else
19352+#define __copyuser_seg
19353+#endif
19354
19355 .text
19356 ENTRY(__get_user_1)
19357 CFI_STARTPROC
19358+
19359+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19360 GET_THREAD_INFO(%_ASM_DX)
19361 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19362 jae bad_get_user
19363-1: movzb (%_ASM_AX),%edx
19364+
19365+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19366+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19367+ cmp %_ASM_DX,%_ASM_AX
19368+ jae 1234f
19369+ add %_ASM_DX,%_ASM_AX
19370+1234:
19371+#endif
19372+
19373+#endif
19374+
19375+1: __copyuser_seg movzb (%_ASM_AX),%edx
19376 xor %eax,%eax
19377 ret
19378 CFI_ENDPROC
19379@@ -49,11 +70,24 @@ ENDPROC(__get_user_1)
19380 ENTRY(__get_user_2)
19381 CFI_STARTPROC
19382 add $1,%_ASM_AX
19383+
19384+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19385 jc bad_get_user
19386 GET_THREAD_INFO(%_ASM_DX)
19387 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19388 jae bad_get_user
19389-2: movzwl -1(%_ASM_AX),%edx
19390+
19391+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19392+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19393+ cmp %_ASM_DX,%_ASM_AX
19394+ jae 1234f
19395+ add %_ASM_DX,%_ASM_AX
19396+1234:
19397+#endif
19398+
19399+#endif
19400+
19401+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
19402 xor %eax,%eax
19403 ret
19404 CFI_ENDPROC
19405@@ -62,11 +96,24 @@ ENDPROC(__get_user_2)
19406 ENTRY(__get_user_4)
19407 CFI_STARTPROC
19408 add $3,%_ASM_AX
19409+
19410+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19411 jc bad_get_user
19412 GET_THREAD_INFO(%_ASM_DX)
19413 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19414 jae bad_get_user
19415-3: mov -3(%_ASM_AX),%edx
19416+
19417+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19418+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19419+ cmp %_ASM_DX,%_ASM_AX
19420+ jae 1234f
19421+ add %_ASM_DX,%_ASM_AX
19422+1234:
19423+#endif
19424+
19425+#endif
19426+
19427+3: __copyuser_seg mov -3(%_ASM_AX),%edx
19428 xor %eax,%eax
19429 ret
19430 CFI_ENDPROC
19431@@ -80,6 +127,15 @@ ENTRY(__get_user_8)
19432 GET_THREAD_INFO(%_ASM_DX)
19433 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
19434 jae bad_get_user
19435+
19436+#ifdef CONFIG_PAX_MEMORY_UDEREF
19437+ mov $PAX_USER_SHADOW_BASE,%_ASM_DX
19438+ cmp %_ASM_DX,%_ASM_AX
19439+ jae 1234f
19440+ add %_ASM_DX,%_ASM_AX
19441+1234:
19442+#endif
19443+
19444 4: movq -7(%_ASM_AX),%_ASM_DX
19445 xor %eax,%eax
19446 ret
19447diff -urNp linux-2.6.32.43/arch/x86/lib/memcpy_64.S linux-2.6.32.43/arch/x86/lib/memcpy_64.S
19448--- linux-2.6.32.43/arch/x86/lib/memcpy_64.S 2011-03-27 14:31:47.000000000 -0400
19449+++ linux-2.6.32.43/arch/x86/lib/memcpy_64.S 2011-04-17 15:56:46.000000000 -0400
19450@@ -128,7 +128,7 @@ ENDPROC(__memcpy)
19451 * It is also a lot simpler. Use this when possible:
19452 */
19453
19454- .section .altinstr_replacement, "ax"
19455+ .section .altinstr_replacement, "a"
19456 1: .byte 0xeb /* jmp <disp8> */
19457 .byte (memcpy_c - memcpy) - (2f - 1b) /* offset */
19458 2:
19459diff -urNp linux-2.6.32.43/arch/x86/lib/memset_64.S linux-2.6.32.43/arch/x86/lib/memset_64.S
19460--- linux-2.6.32.43/arch/x86/lib/memset_64.S 2011-03-27 14:31:47.000000000 -0400
19461+++ linux-2.6.32.43/arch/x86/lib/memset_64.S 2011-04-17 15:56:46.000000000 -0400
19462@@ -118,7 +118,7 @@ ENDPROC(__memset)
19463
19464 #include <asm/cpufeature.h>
19465
19466- .section .altinstr_replacement,"ax"
19467+ .section .altinstr_replacement,"a"
19468 1: .byte 0xeb /* jmp <disp8> */
19469 .byte (memset_c - memset) - (2f - 1b) /* offset */
19470 2:
19471diff -urNp linux-2.6.32.43/arch/x86/lib/mmx_32.c linux-2.6.32.43/arch/x86/lib/mmx_32.c
19472--- linux-2.6.32.43/arch/x86/lib/mmx_32.c 2011-03-27 14:31:47.000000000 -0400
19473+++ linux-2.6.32.43/arch/x86/lib/mmx_32.c 2011-04-17 15:56:46.000000000 -0400
19474@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *
19475 {
19476 void *p;
19477 int i;
19478+ unsigned long cr0;
19479
19480 if (unlikely(in_interrupt()))
19481 return __memcpy(to, from, len);
19482@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *
19483 kernel_fpu_begin();
19484
19485 __asm__ __volatile__ (
19486- "1: prefetch (%0)\n" /* This set is 28 bytes */
19487- " prefetch 64(%0)\n"
19488- " prefetch 128(%0)\n"
19489- " prefetch 192(%0)\n"
19490- " prefetch 256(%0)\n"
19491+ "1: prefetch (%1)\n" /* This set is 28 bytes */
19492+ " prefetch 64(%1)\n"
19493+ " prefetch 128(%1)\n"
19494+ " prefetch 192(%1)\n"
19495+ " prefetch 256(%1)\n"
19496 "2: \n"
19497 ".section .fixup, \"ax\"\n"
19498- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19499+ "3: \n"
19500+
19501+#ifdef CONFIG_PAX_KERNEXEC
19502+ " movl %%cr0, %0\n"
19503+ " movl %0, %%eax\n"
19504+ " andl $0xFFFEFFFF, %%eax\n"
19505+ " movl %%eax, %%cr0\n"
19506+#endif
19507+
19508+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19509+
19510+#ifdef CONFIG_PAX_KERNEXEC
19511+ " movl %0, %%cr0\n"
19512+#endif
19513+
19514 " jmp 2b\n"
19515 ".previous\n"
19516 _ASM_EXTABLE(1b, 3b)
19517- : : "r" (from));
19518+ : "=&r" (cr0) : "r" (from) : "ax");
19519
19520 for ( ; i > 5; i--) {
19521 __asm__ __volatile__ (
19522- "1: prefetch 320(%0)\n"
19523- "2: movq (%0), %%mm0\n"
19524- " movq 8(%0), %%mm1\n"
19525- " movq 16(%0), %%mm2\n"
19526- " movq 24(%0), %%mm3\n"
19527- " movq %%mm0, (%1)\n"
19528- " movq %%mm1, 8(%1)\n"
19529- " movq %%mm2, 16(%1)\n"
19530- " movq %%mm3, 24(%1)\n"
19531- " movq 32(%0), %%mm0\n"
19532- " movq 40(%0), %%mm1\n"
19533- " movq 48(%0), %%mm2\n"
19534- " movq 56(%0), %%mm3\n"
19535- " movq %%mm0, 32(%1)\n"
19536- " movq %%mm1, 40(%1)\n"
19537- " movq %%mm2, 48(%1)\n"
19538- " movq %%mm3, 56(%1)\n"
19539+ "1: prefetch 320(%1)\n"
19540+ "2: movq (%1), %%mm0\n"
19541+ " movq 8(%1), %%mm1\n"
19542+ " movq 16(%1), %%mm2\n"
19543+ " movq 24(%1), %%mm3\n"
19544+ " movq %%mm0, (%2)\n"
19545+ " movq %%mm1, 8(%2)\n"
19546+ " movq %%mm2, 16(%2)\n"
19547+ " movq %%mm3, 24(%2)\n"
19548+ " movq 32(%1), %%mm0\n"
19549+ " movq 40(%1), %%mm1\n"
19550+ " movq 48(%1), %%mm2\n"
19551+ " movq 56(%1), %%mm3\n"
19552+ " movq %%mm0, 32(%2)\n"
19553+ " movq %%mm1, 40(%2)\n"
19554+ " movq %%mm2, 48(%2)\n"
19555+ " movq %%mm3, 56(%2)\n"
19556 ".section .fixup, \"ax\"\n"
19557- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19558+ "3:\n"
19559+
19560+#ifdef CONFIG_PAX_KERNEXEC
19561+ " movl %%cr0, %0\n"
19562+ " movl %0, %%eax\n"
19563+ " andl $0xFFFEFFFF, %%eax\n"
19564+ " movl %%eax, %%cr0\n"
19565+#endif
19566+
19567+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19568+
19569+#ifdef CONFIG_PAX_KERNEXEC
19570+ " movl %0, %%cr0\n"
19571+#endif
19572+
19573 " jmp 2b\n"
19574 ".previous\n"
19575 _ASM_EXTABLE(1b, 3b)
19576- : : "r" (from), "r" (to) : "memory");
19577+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19578
19579 from += 64;
19580 to += 64;
19581@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
19582 static void fast_copy_page(void *to, void *from)
19583 {
19584 int i;
19585+ unsigned long cr0;
19586
19587 kernel_fpu_begin();
19588
19589@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi
19590 * but that is for later. -AV
19591 */
19592 __asm__ __volatile__(
19593- "1: prefetch (%0)\n"
19594- " prefetch 64(%0)\n"
19595- " prefetch 128(%0)\n"
19596- " prefetch 192(%0)\n"
19597- " prefetch 256(%0)\n"
19598+ "1: prefetch (%1)\n"
19599+ " prefetch 64(%1)\n"
19600+ " prefetch 128(%1)\n"
19601+ " prefetch 192(%1)\n"
19602+ " prefetch 256(%1)\n"
19603 "2: \n"
19604 ".section .fixup, \"ax\"\n"
19605- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19606+ "3: \n"
19607+
19608+#ifdef CONFIG_PAX_KERNEXEC
19609+ " movl %%cr0, %0\n"
19610+ " movl %0, %%eax\n"
19611+ " andl $0xFFFEFFFF, %%eax\n"
19612+ " movl %%eax, %%cr0\n"
19613+#endif
19614+
19615+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19616+
19617+#ifdef CONFIG_PAX_KERNEXEC
19618+ " movl %0, %%cr0\n"
19619+#endif
19620+
19621 " jmp 2b\n"
19622 ".previous\n"
19623- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19624+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19625
19626 for (i = 0; i < (4096-320)/64; i++) {
19627 __asm__ __volatile__ (
19628- "1: prefetch 320(%0)\n"
19629- "2: movq (%0), %%mm0\n"
19630- " movntq %%mm0, (%1)\n"
19631- " movq 8(%0), %%mm1\n"
19632- " movntq %%mm1, 8(%1)\n"
19633- " movq 16(%0), %%mm2\n"
19634- " movntq %%mm2, 16(%1)\n"
19635- " movq 24(%0), %%mm3\n"
19636- " movntq %%mm3, 24(%1)\n"
19637- " movq 32(%0), %%mm4\n"
19638- " movntq %%mm4, 32(%1)\n"
19639- " movq 40(%0), %%mm5\n"
19640- " movntq %%mm5, 40(%1)\n"
19641- " movq 48(%0), %%mm6\n"
19642- " movntq %%mm6, 48(%1)\n"
19643- " movq 56(%0), %%mm7\n"
19644- " movntq %%mm7, 56(%1)\n"
19645+ "1: prefetch 320(%1)\n"
19646+ "2: movq (%1), %%mm0\n"
19647+ " movntq %%mm0, (%2)\n"
19648+ " movq 8(%1), %%mm1\n"
19649+ " movntq %%mm1, 8(%2)\n"
19650+ " movq 16(%1), %%mm2\n"
19651+ " movntq %%mm2, 16(%2)\n"
19652+ " movq 24(%1), %%mm3\n"
19653+ " movntq %%mm3, 24(%2)\n"
19654+ " movq 32(%1), %%mm4\n"
19655+ " movntq %%mm4, 32(%2)\n"
19656+ " movq 40(%1), %%mm5\n"
19657+ " movntq %%mm5, 40(%2)\n"
19658+ " movq 48(%1), %%mm6\n"
19659+ " movntq %%mm6, 48(%2)\n"
19660+ " movq 56(%1), %%mm7\n"
19661+ " movntq %%mm7, 56(%2)\n"
19662 ".section .fixup, \"ax\"\n"
19663- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19664+ "3:\n"
19665+
19666+#ifdef CONFIG_PAX_KERNEXEC
19667+ " movl %%cr0, %0\n"
19668+ " movl %0, %%eax\n"
19669+ " andl $0xFFFEFFFF, %%eax\n"
19670+ " movl %%eax, %%cr0\n"
19671+#endif
19672+
19673+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19674+
19675+#ifdef CONFIG_PAX_KERNEXEC
19676+ " movl %0, %%cr0\n"
19677+#endif
19678+
19679 " jmp 2b\n"
19680 ".previous\n"
19681- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
19682+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19683
19684 from += 64;
19685 to += 64;
19686@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
19687 static void fast_copy_page(void *to, void *from)
19688 {
19689 int i;
19690+ unsigned long cr0;
19691
19692 kernel_fpu_begin();
19693
19694 __asm__ __volatile__ (
19695- "1: prefetch (%0)\n"
19696- " prefetch 64(%0)\n"
19697- " prefetch 128(%0)\n"
19698- " prefetch 192(%0)\n"
19699- " prefetch 256(%0)\n"
19700+ "1: prefetch (%1)\n"
19701+ " prefetch 64(%1)\n"
19702+ " prefetch 128(%1)\n"
19703+ " prefetch 192(%1)\n"
19704+ " prefetch 256(%1)\n"
19705 "2: \n"
19706 ".section .fixup, \"ax\"\n"
19707- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19708+ "3: \n"
19709+
19710+#ifdef CONFIG_PAX_KERNEXEC
19711+ " movl %%cr0, %0\n"
19712+ " movl %0, %%eax\n"
19713+ " andl $0xFFFEFFFF, %%eax\n"
19714+ " movl %%eax, %%cr0\n"
19715+#endif
19716+
19717+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
19718+
19719+#ifdef CONFIG_PAX_KERNEXEC
19720+ " movl %0, %%cr0\n"
19721+#endif
19722+
19723 " jmp 2b\n"
19724 ".previous\n"
19725- _ASM_EXTABLE(1b, 3b) : : "r" (from));
19726+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
19727
19728 for (i = 0; i < 4096/64; i++) {
19729 __asm__ __volatile__ (
19730- "1: prefetch 320(%0)\n"
19731- "2: movq (%0), %%mm0\n"
19732- " movq 8(%0), %%mm1\n"
19733- " movq 16(%0), %%mm2\n"
19734- " movq 24(%0), %%mm3\n"
19735- " movq %%mm0, (%1)\n"
19736- " movq %%mm1, 8(%1)\n"
19737- " movq %%mm2, 16(%1)\n"
19738- " movq %%mm3, 24(%1)\n"
19739- " movq 32(%0), %%mm0\n"
19740- " movq 40(%0), %%mm1\n"
19741- " movq 48(%0), %%mm2\n"
19742- " movq 56(%0), %%mm3\n"
19743- " movq %%mm0, 32(%1)\n"
19744- " movq %%mm1, 40(%1)\n"
19745- " movq %%mm2, 48(%1)\n"
19746- " movq %%mm3, 56(%1)\n"
19747+ "1: prefetch 320(%1)\n"
19748+ "2: movq (%1), %%mm0\n"
19749+ " movq 8(%1), %%mm1\n"
19750+ " movq 16(%1), %%mm2\n"
19751+ " movq 24(%1), %%mm3\n"
19752+ " movq %%mm0, (%2)\n"
19753+ " movq %%mm1, 8(%2)\n"
19754+ " movq %%mm2, 16(%2)\n"
19755+ " movq %%mm3, 24(%2)\n"
19756+ " movq 32(%1), %%mm0\n"
19757+ " movq 40(%1), %%mm1\n"
19758+ " movq 48(%1), %%mm2\n"
19759+ " movq 56(%1), %%mm3\n"
19760+ " movq %%mm0, 32(%2)\n"
19761+ " movq %%mm1, 40(%2)\n"
19762+ " movq %%mm2, 48(%2)\n"
19763+ " movq %%mm3, 56(%2)\n"
19764 ".section .fixup, \"ax\"\n"
19765- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19766+ "3:\n"
19767+
19768+#ifdef CONFIG_PAX_KERNEXEC
19769+ " movl %%cr0, %0\n"
19770+ " movl %0, %%eax\n"
19771+ " andl $0xFFFEFFFF, %%eax\n"
19772+ " movl %%eax, %%cr0\n"
19773+#endif
19774+
19775+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
19776+
19777+#ifdef CONFIG_PAX_KERNEXEC
19778+ " movl %0, %%cr0\n"
19779+#endif
19780+
19781 " jmp 2b\n"
19782 ".previous\n"
19783 _ASM_EXTABLE(1b, 3b)
19784- : : "r" (from), "r" (to) : "memory");
19785+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
19786
19787 from += 64;
19788 to += 64;
19789diff -urNp linux-2.6.32.43/arch/x86/lib/putuser.S linux-2.6.32.43/arch/x86/lib/putuser.S
19790--- linux-2.6.32.43/arch/x86/lib/putuser.S 2011-03-27 14:31:47.000000000 -0400
19791+++ linux-2.6.32.43/arch/x86/lib/putuser.S 2011-04-17 15:56:46.000000000 -0400
19792@@ -15,7 +15,8 @@
19793 #include <asm/thread_info.h>
19794 #include <asm/errno.h>
19795 #include <asm/asm.h>
19796-
19797+#include <asm/segment.h>
19798+#include <asm/pgtable.h>
19799
19800 /*
19801 * __put_user_X
19802@@ -29,52 +30,119 @@
19803 * as they get called from within inline assembly.
19804 */
19805
19806-#define ENTER CFI_STARTPROC ; \
19807- GET_THREAD_INFO(%_ASM_BX)
19808+#define ENTER CFI_STARTPROC
19809 #define EXIT ret ; \
19810 CFI_ENDPROC
19811
19812+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19813+#define _DEST %_ASM_CX,%_ASM_BX
19814+#else
19815+#define _DEST %_ASM_CX
19816+#endif
19817+
19818+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
19819+#define __copyuser_seg gs;
19820+#else
19821+#define __copyuser_seg
19822+#endif
19823+
19824 .text
19825 ENTRY(__put_user_1)
19826 ENTER
19827+
19828+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19829+ GET_THREAD_INFO(%_ASM_BX)
19830 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
19831 jae bad_put_user
19832-1: movb %al,(%_ASM_CX)
19833+
19834+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19835+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19836+ cmp %_ASM_BX,%_ASM_CX
19837+ jb 1234f
19838+ xor %ebx,%ebx
19839+1234:
19840+#endif
19841+
19842+#endif
19843+
19844+1: __copyuser_seg movb %al,(_DEST)
19845 xor %eax,%eax
19846 EXIT
19847 ENDPROC(__put_user_1)
19848
19849 ENTRY(__put_user_2)
19850 ENTER
19851+
19852+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19853+ GET_THREAD_INFO(%_ASM_BX)
19854 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19855 sub $1,%_ASM_BX
19856 cmp %_ASM_BX,%_ASM_CX
19857 jae bad_put_user
19858-2: movw %ax,(%_ASM_CX)
19859+
19860+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19861+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19862+ cmp %_ASM_BX,%_ASM_CX
19863+ jb 1234f
19864+ xor %ebx,%ebx
19865+1234:
19866+#endif
19867+
19868+#endif
19869+
19870+2: __copyuser_seg movw %ax,(_DEST)
19871 xor %eax,%eax
19872 EXIT
19873 ENDPROC(__put_user_2)
19874
19875 ENTRY(__put_user_4)
19876 ENTER
19877+
19878+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19879+ GET_THREAD_INFO(%_ASM_BX)
19880 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19881 sub $3,%_ASM_BX
19882 cmp %_ASM_BX,%_ASM_CX
19883 jae bad_put_user
19884-3: movl %eax,(%_ASM_CX)
19885+
19886+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19887+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19888+ cmp %_ASM_BX,%_ASM_CX
19889+ jb 1234f
19890+ xor %ebx,%ebx
19891+1234:
19892+#endif
19893+
19894+#endif
19895+
19896+3: __copyuser_seg movl %eax,(_DEST)
19897 xor %eax,%eax
19898 EXIT
19899 ENDPROC(__put_user_4)
19900
19901 ENTRY(__put_user_8)
19902 ENTER
19903+
19904+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
19905+ GET_THREAD_INFO(%_ASM_BX)
19906 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
19907 sub $7,%_ASM_BX
19908 cmp %_ASM_BX,%_ASM_CX
19909 jae bad_put_user
19910-4: mov %_ASM_AX,(%_ASM_CX)
19911+
19912+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
19913+ mov $PAX_USER_SHADOW_BASE,%_ASM_BX
19914+ cmp %_ASM_BX,%_ASM_CX
19915+ jb 1234f
19916+ xor %ebx,%ebx
19917+1234:
19918+#endif
19919+
19920+#endif
19921+
19922+4: __copyuser_seg mov %_ASM_AX,(_DEST)
19923 #ifdef CONFIG_X86_32
19924-5: movl %edx,4(%_ASM_CX)
19925+5: __copyuser_seg movl %edx,4(_DEST)
19926 #endif
19927 xor %eax,%eax
19928 EXIT
19929diff -urNp linux-2.6.32.43/arch/x86/lib/usercopy_32.c linux-2.6.32.43/arch/x86/lib/usercopy_32.c
19930--- linux-2.6.32.43/arch/x86/lib/usercopy_32.c 2011-03-27 14:31:47.000000000 -0400
19931+++ linux-2.6.32.43/arch/x86/lib/usercopy_32.c 2011-04-23 21:12:28.000000000 -0400
19932@@ -43,7 +43,7 @@ do { \
19933 __asm__ __volatile__( \
19934 " testl %1,%1\n" \
19935 " jz 2f\n" \
19936- "0: lodsb\n" \
19937+ "0: "__copyuser_seg"lodsb\n" \
19938 " stosb\n" \
19939 " testb %%al,%%al\n" \
19940 " jz 1f\n" \
19941@@ -128,10 +128,12 @@ do { \
19942 int __d0; \
19943 might_fault(); \
19944 __asm__ __volatile__( \
19945+ __COPYUSER_SET_ES \
19946 "0: rep; stosl\n" \
19947 " movl %2,%0\n" \
19948 "1: rep; stosb\n" \
19949 "2:\n" \
19950+ __COPYUSER_RESTORE_ES \
19951 ".section .fixup,\"ax\"\n" \
19952 "3: lea 0(%2,%0,4),%0\n" \
19953 " jmp 2b\n" \
19954@@ -200,6 +202,7 @@ long strnlen_user(const char __user *s,
19955 might_fault();
19956
19957 __asm__ __volatile__(
19958+ __COPYUSER_SET_ES
19959 " testl %0, %0\n"
19960 " jz 3f\n"
19961 " andl %0,%%ecx\n"
19962@@ -208,6 +211,7 @@ long strnlen_user(const char __user *s,
19963 " subl %%ecx,%0\n"
19964 " addl %0,%%eax\n"
19965 "1:\n"
19966+ __COPYUSER_RESTORE_ES
19967 ".section .fixup,\"ax\"\n"
19968 "2: xorl %%eax,%%eax\n"
19969 " jmp 1b\n"
19970@@ -227,7 +231,7 @@ EXPORT_SYMBOL(strnlen_user);
19971
19972 #ifdef CONFIG_X86_INTEL_USERCOPY
19973 static unsigned long
19974-__copy_user_intel(void __user *to, const void *from, unsigned long size)
19975+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
19976 {
19977 int d0, d1;
19978 __asm__ __volatile__(
19979@@ -239,36 +243,36 @@ __copy_user_intel(void __user *to, const
19980 " .align 2,0x90\n"
19981 "3: movl 0(%4), %%eax\n"
19982 "4: movl 4(%4), %%edx\n"
19983- "5: movl %%eax, 0(%3)\n"
19984- "6: movl %%edx, 4(%3)\n"
19985+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
19986+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
19987 "7: movl 8(%4), %%eax\n"
19988 "8: movl 12(%4),%%edx\n"
19989- "9: movl %%eax, 8(%3)\n"
19990- "10: movl %%edx, 12(%3)\n"
19991+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
19992+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
19993 "11: movl 16(%4), %%eax\n"
19994 "12: movl 20(%4), %%edx\n"
19995- "13: movl %%eax, 16(%3)\n"
19996- "14: movl %%edx, 20(%3)\n"
19997+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
19998+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
19999 "15: movl 24(%4), %%eax\n"
20000 "16: movl 28(%4), %%edx\n"
20001- "17: movl %%eax, 24(%3)\n"
20002- "18: movl %%edx, 28(%3)\n"
20003+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
20004+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
20005 "19: movl 32(%4), %%eax\n"
20006 "20: movl 36(%4), %%edx\n"
20007- "21: movl %%eax, 32(%3)\n"
20008- "22: movl %%edx, 36(%3)\n"
20009+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
20010+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
20011 "23: movl 40(%4), %%eax\n"
20012 "24: movl 44(%4), %%edx\n"
20013- "25: movl %%eax, 40(%3)\n"
20014- "26: movl %%edx, 44(%3)\n"
20015+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
20016+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
20017 "27: movl 48(%4), %%eax\n"
20018 "28: movl 52(%4), %%edx\n"
20019- "29: movl %%eax, 48(%3)\n"
20020- "30: movl %%edx, 52(%3)\n"
20021+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
20022+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
20023 "31: movl 56(%4), %%eax\n"
20024 "32: movl 60(%4), %%edx\n"
20025- "33: movl %%eax, 56(%3)\n"
20026- "34: movl %%edx, 60(%3)\n"
20027+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
20028+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
20029 " addl $-64, %0\n"
20030 " addl $64, %4\n"
20031 " addl $64, %3\n"
20032@@ -278,10 +282,119 @@ __copy_user_intel(void __user *to, const
20033 " shrl $2, %0\n"
20034 " andl $3, %%eax\n"
20035 " cld\n"
20036+ __COPYUSER_SET_ES
20037 "99: rep; movsl\n"
20038 "36: movl %%eax, %0\n"
20039 "37: rep; movsb\n"
20040 "100:\n"
20041+ __COPYUSER_RESTORE_ES
20042+ ".section .fixup,\"ax\"\n"
20043+ "101: lea 0(%%eax,%0,4),%0\n"
20044+ " jmp 100b\n"
20045+ ".previous\n"
20046+ ".section __ex_table,\"a\"\n"
20047+ " .align 4\n"
20048+ " .long 1b,100b\n"
20049+ " .long 2b,100b\n"
20050+ " .long 3b,100b\n"
20051+ " .long 4b,100b\n"
20052+ " .long 5b,100b\n"
20053+ " .long 6b,100b\n"
20054+ " .long 7b,100b\n"
20055+ " .long 8b,100b\n"
20056+ " .long 9b,100b\n"
20057+ " .long 10b,100b\n"
20058+ " .long 11b,100b\n"
20059+ " .long 12b,100b\n"
20060+ " .long 13b,100b\n"
20061+ " .long 14b,100b\n"
20062+ " .long 15b,100b\n"
20063+ " .long 16b,100b\n"
20064+ " .long 17b,100b\n"
20065+ " .long 18b,100b\n"
20066+ " .long 19b,100b\n"
20067+ " .long 20b,100b\n"
20068+ " .long 21b,100b\n"
20069+ " .long 22b,100b\n"
20070+ " .long 23b,100b\n"
20071+ " .long 24b,100b\n"
20072+ " .long 25b,100b\n"
20073+ " .long 26b,100b\n"
20074+ " .long 27b,100b\n"
20075+ " .long 28b,100b\n"
20076+ " .long 29b,100b\n"
20077+ " .long 30b,100b\n"
20078+ " .long 31b,100b\n"
20079+ " .long 32b,100b\n"
20080+ " .long 33b,100b\n"
20081+ " .long 34b,100b\n"
20082+ " .long 35b,100b\n"
20083+ " .long 36b,100b\n"
20084+ " .long 37b,100b\n"
20085+ " .long 99b,101b\n"
20086+ ".previous"
20087+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
20088+ : "1"(to), "2"(from), "0"(size)
20089+ : "eax", "edx", "memory");
20090+ return size;
20091+}
20092+
20093+static unsigned long
20094+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
20095+{
20096+ int d0, d1;
20097+ __asm__ __volatile__(
20098+ " .align 2,0x90\n"
20099+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
20100+ " cmpl $67, %0\n"
20101+ " jbe 3f\n"
20102+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
20103+ " .align 2,0x90\n"
20104+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
20105+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
20106+ "5: movl %%eax, 0(%3)\n"
20107+ "6: movl %%edx, 4(%3)\n"
20108+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
20109+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
20110+ "9: movl %%eax, 8(%3)\n"
20111+ "10: movl %%edx, 12(%3)\n"
20112+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
20113+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
20114+ "13: movl %%eax, 16(%3)\n"
20115+ "14: movl %%edx, 20(%3)\n"
20116+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
20117+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
20118+ "17: movl %%eax, 24(%3)\n"
20119+ "18: movl %%edx, 28(%3)\n"
20120+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
20121+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
20122+ "21: movl %%eax, 32(%3)\n"
20123+ "22: movl %%edx, 36(%3)\n"
20124+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
20125+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
20126+ "25: movl %%eax, 40(%3)\n"
20127+ "26: movl %%edx, 44(%3)\n"
20128+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
20129+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
20130+ "29: movl %%eax, 48(%3)\n"
20131+ "30: movl %%edx, 52(%3)\n"
20132+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
20133+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
20134+ "33: movl %%eax, 56(%3)\n"
20135+ "34: movl %%edx, 60(%3)\n"
20136+ " addl $-64, %0\n"
20137+ " addl $64, %4\n"
20138+ " addl $64, %3\n"
20139+ " cmpl $63, %0\n"
20140+ " ja 1b\n"
20141+ "35: movl %0, %%eax\n"
20142+ " shrl $2, %0\n"
20143+ " andl $3, %%eax\n"
20144+ " cld\n"
20145+ "99: rep; "__copyuser_seg" movsl\n"
20146+ "36: movl %%eax, %0\n"
20147+ "37: rep; "__copyuser_seg" movsb\n"
20148+ "100:\n"
20149 ".section .fixup,\"ax\"\n"
20150 "101: lea 0(%%eax,%0,4),%0\n"
20151 " jmp 100b\n"
20152@@ -339,41 +452,41 @@ __copy_user_zeroing_intel(void *to, cons
20153 int d0, d1;
20154 __asm__ __volatile__(
20155 " .align 2,0x90\n"
20156- "0: movl 32(%4), %%eax\n"
20157+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20158 " cmpl $67, %0\n"
20159 " jbe 2f\n"
20160- "1: movl 64(%4), %%eax\n"
20161+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20162 " .align 2,0x90\n"
20163- "2: movl 0(%4), %%eax\n"
20164- "21: movl 4(%4), %%edx\n"
20165+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20166+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20167 " movl %%eax, 0(%3)\n"
20168 " movl %%edx, 4(%3)\n"
20169- "3: movl 8(%4), %%eax\n"
20170- "31: movl 12(%4),%%edx\n"
20171+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20172+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20173 " movl %%eax, 8(%3)\n"
20174 " movl %%edx, 12(%3)\n"
20175- "4: movl 16(%4), %%eax\n"
20176- "41: movl 20(%4), %%edx\n"
20177+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20178+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20179 " movl %%eax, 16(%3)\n"
20180 " movl %%edx, 20(%3)\n"
20181- "10: movl 24(%4), %%eax\n"
20182- "51: movl 28(%4), %%edx\n"
20183+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20184+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20185 " movl %%eax, 24(%3)\n"
20186 " movl %%edx, 28(%3)\n"
20187- "11: movl 32(%4), %%eax\n"
20188- "61: movl 36(%4), %%edx\n"
20189+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20190+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20191 " movl %%eax, 32(%3)\n"
20192 " movl %%edx, 36(%3)\n"
20193- "12: movl 40(%4), %%eax\n"
20194- "71: movl 44(%4), %%edx\n"
20195+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20196+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20197 " movl %%eax, 40(%3)\n"
20198 " movl %%edx, 44(%3)\n"
20199- "13: movl 48(%4), %%eax\n"
20200- "81: movl 52(%4), %%edx\n"
20201+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20202+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20203 " movl %%eax, 48(%3)\n"
20204 " movl %%edx, 52(%3)\n"
20205- "14: movl 56(%4), %%eax\n"
20206- "91: movl 60(%4), %%edx\n"
20207+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20208+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20209 " movl %%eax, 56(%3)\n"
20210 " movl %%edx, 60(%3)\n"
20211 " addl $-64, %0\n"
20212@@ -385,9 +498,9 @@ __copy_user_zeroing_intel(void *to, cons
20213 " shrl $2, %0\n"
20214 " andl $3, %%eax\n"
20215 " cld\n"
20216- "6: rep; movsl\n"
20217+ "6: rep; "__copyuser_seg" movsl\n"
20218 " movl %%eax,%0\n"
20219- "7: rep; movsb\n"
20220+ "7: rep; "__copyuser_seg" movsb\n"
20221 "8:\n"
20222 ".section .fixup,\"ax\"\n"
20223 "9: lea 0(%%eax,%0,4),%0\n"
20224@@ -440,41 +553,41 @@ static unsigned long __copy_user_zeroing
20225
20226 __asm__ __volatile__(
20227 " .align 2,0x90\n"
20228- "0: movl 32(%4), %%eax\n"
20229+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20230 " cmpl $67, %0\n"
20231 " jbe 2f\n"
20232- "1: movl 64(%4), %%eax\n"
20233+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20234 " .align 2,0x90\n"
20235- "2: movl 0(%4), %%eax\n"
20236- "21: movl 4(%4), %%edx\n"
20237+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20238+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20239 " movnti %%eax, 0(%3)\n"
20240 " movnti %%edx, 4(%3)\n"
20241- "3: movl 8(%4), %%eax\n"
20242- "31: movl 12(%4),%%edx\n"
20243+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20244+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20245 " movnti %%eax, 8(%3)\n"
20246 " movnti %%edx, 12(%3)\n"
20247- "4: movl 16(%4), %%eax\n"
20248- "41: movl 20(%4), %%edx\n"
20249+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20250+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20251 " movnti %%eax, 16(%3)\n"
20252 " movnti %%edx, 20(%3)\n"
20253- "10: movl 24(%4), %%eax\n"
20254- "51: movl 28(%4), %%edx\n"
20255+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20256+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20257 " movnti %%eax, 24(%3)\n"
20258 " movnti %%edx, 28(%3)\n"
20259- "11: movl 32(%4), %%eax\n"
20260- "61: movl 36(%4), %%edx\n"
20261+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20262+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20263 " movnti %%eax, 32(%3)\n"
20264 " movnti %%edx, 36(%3)\n"
20265- "12: movl 40(%4), %%eax\n"
20266- "71: movl 44(%4), %%edx\n"
20267+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20268+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20269 " movnti %%eax, 40(%3)\n"
20270 " movnti %%edx, 44(%3)\n"
20271- "13: movl 48(%4), %%eax\n"
20272- "81: movl 52(%4), %%edx\n"
20273+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20274+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20275 " movnti %%eax, 48(%3)\n"
20276 " movnti %%edx, 52(%3)\n"
20277- "14: movl 56(%4), %%eax\n"
20278- "91: movl 60(%4), %%edx\n"
20279+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20280+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20281 " movnti %%eax, 56(%3)\n"
20282 " movnti %%edx, 60(%3)\n"
20283 " addl $-64, %0\n"
20284@@ -487,9 +600,9 @@ static unsigned long __copy_user_zeroing
20285 " shrl $2, %0\n"
20286 " andl $3, %%eax\n"
20287 " cld\n"
20288- "6: rep; movsl\n"
20289+ "6: rep; "__copyuser_seg" movsl\n"
20290 " movl %%eax,%0\n"
20291- "7: rep; movsb\n"
20292+ "7: rep; "__copyuser_seg" movsb\n"
20293 "8:\n"
20294 ".section .fixup,\"ax\"\n"
20295 "9: lea 0(%%eax,%0,4),%0\n"
20296@@ -537,41 +650,41 @@ static unsigned long __copy_user_intel_n
20297
20298 __asm__ __volatile__(
20299 " .align 2,0x90\n"
20300- "0: movl 32(%4), %%eax\n"
20301+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
20302 " cmpl $67, %0\n"
20303 " jbe 2f\n"
20304- "1: movl 64(%4), %%eax\n"
20305+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
20306 " .align 2,0x90\n"
20307- "2: movl 0(%4), %%eax\n"
20308- "21: movl 4(%4), %%edx\n"
20309+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
20310+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
20311 " movnti %%eax, 0(%3)\n"
20312 " movnti %%edx, 4(%3)\n"
20313- "3: movl 8(%4), %%eax\n"
20314- "31: movl 12(%4),%%edx\n"
20315+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
20316+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
20317 " movnti %%eax, 8(%3)\n"
20318 " movnti %%edx, 12(%3)\n"
20319- "4: movl 16(%4), %%eax\n"
20320- "41: movl 20(%4), %%edx\n"
20321+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
20322+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
20323 " movnti %%eax, 16(%3)\n"
20324 " movnti %%edx, 20(%3)\n"
20325- "10: movl 24(%4), %%eax\n"
20326- "51: movl 28(%4), %%edx\n"
20327+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
20328+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
20329 " movnti %%eax, 24(%3)\n"
20330 " movnti %%edx, 28(%3)\n"
20331- "11: movl 32(%4), %%eax\n"
20332- "61: movl 36(%4), %%edx\n"
20333+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
20334+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
20335 " movnti %%eax, 32(%3)\n"
20336 " movnti %%edx, 36(%3)\n"
20337- "12: movl 40(%4), %%eax\n"
20338- "71: movl 44(%4), %%edx\n"
20339+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
20340+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
20341 " movnti %%eax, 40(%3)\n"
20342 " movnti %%edx, 44(%3)\n"
20343- "13: movl 48(%4), %%eax\n"
20344- "81: movl 52(%4), %%edx\n"
20345+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
20346+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
20347 " movnti %%eax, 48(%3)\n"
20348 " movnti %%edx, 52(%3)\n"
20349- "14: movl 56(%4), %%eax\n"
20350- "91: movl 60(%4), %%edx\n"
20351+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
20352+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
20353 " movnti %%eax, 56(%3)\n"
20354 " movnti %%edx, 60(%3)\n"
20355 " addl $-64, %0\n"
20356@@ -584,9 +697,9 @@ static unsigned long __copy_user_intel_n
20357 " shrl $2, %0\n"
20358 " andl $3, %%eax\n"
20359 " cld\n"
20360- "6: rep; movsl\n"
20361+ "6: rep; "__copyuser_seg" movsl\n"
20362 " movl %%eax,%0\n"
20363- "7: rep; movsb\n"
20364+ "7: rep; "__copyuser_seg" movsb\n"
20365 "8:\n"
20366 ".section .fixup,\"ax\"\n"
20367 "9: lea 0(%%eax,%0,4),%0\n"
20368@@ -629,32 +742,36 @@ static unsigned long __copy_user_intel_n
20369 */
20370 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
20371 unsigned long size);
20372-unsigned long __copy_user_intel(void __user *to, const void *from,
20373+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
20374+ unsigned long size);
20375+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
20376 unsigned long size);
20377 unsigned long __copy_user_zeroing_intel_nocache(void *to,
20378 const void __user *from, unsigned long size);
20379 #endif /* CONFIG_X86_INTEL_USERCOPY */
20380
20381 /* Generic arbitrary sized copy. */
20382-#define __copy_user(to, from, size) \
20383+#define __copy_user(to, from, size, prefix, set, restore) \
20384 do { \
20385 int __d0, __d1, __d2; \
20386 __asm__ __volatile__( \
20387+ set \
20388 " cmp $7,%0\n" \
20389 " jbe 1f\n" \
20390 " movl %1,%0\n" \
20391 " negl %0\n" \
20392 " andl $7,%0\n" \
20393 " subl %0,%3\n" \
20394- "4: rep; movsb\n" \
20395+ "4: rep; "prefix"movsb\n" \
20396 " movl %3,%0\n" \
20397 " shrl $2,%0\n" \
20398 " andl $3,%3\n" \
20399 " .align 2,0x90\n" \
20400- "0: rep; movsl\n" \
20401+ "0: rep; "prefix"movsl\n" \
20402 " movl %3,%0\n" \
20403- "1: rep; movsb\n" \
20404+ "1: rep; "prefix"movsb\n" \
20405 "2:\n" \
20406+ restore \
20407 ".section .fixup,\"ax\"\n" \
20408 "5: addl %3,%0\n" \
20409 " jmp 2b\n" \
20410@@ -682,14 +799,14 @@ do { \
20411 " negl %0\n" \
20412 " andl $7,%0\n" \
20413 " subl %0,%3\n" \
20414- "4: rep; movsb\n" \
20415+ "4: rep; "__copyuser_seg"movsb\n" \
20416 " movl %3,%0\n" \
20417 " shrl $2,%0\n" \
20418 " andl $3,%3\n" \
20419 " .align 2,0x90\n" \
20420- "0: rep; movsl\n" \
20421+ "0: rep; "__copyuser_seg"movsl\n" \
20422 " movl %3,%0\n" \
20423- "1: rep; movsb\n" \
20424+ "1: rep; "__copyuser_seg"movsb\n" \
20425 "2:\n" \
20426 ".section .fixup,\"ax\"\n" \
20427 "5: addl %3,%0\n" \
20428@@ -775,9 +892,9 @@ survive:
20429 }
20430 #endif
20431 if (movsl_is_ok(to, from, n))
20432- __copy_user(to, from, n);
20433+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
20434 else
20435- n = __copy_user_intel(to, from, n);
20436+ n = __generic_copy_to_user_intel(to, from, n);
20437 return n;
20438 }
20439 EXPORT_SYMBOL(__copy_to_user_ll);
20440@@ -797,10 +914,9 @@ unsigned long __copy_from_user_ll_nozero
20441 unsigned long n)
20442 {
20443 if (movsl_is_ok(to, from, n))
20444- __copy_user(to, from, n);
20445+ __copy_user(to, from, n, __copyuser_seg, "", "");
20446 else
20447- n = __copy_user_intel((void __user *)to,
20448- (const void *)from, n);
20449+ n = __generic_copy_from_user_intel(to, from, n);
20450 return n;
20451 }
20452 EXPORT_SYMBOL(__copy_from_user_ll_nozero);
20453@@ -827,59 +943,38 @@ unsigned long __copy_from_user_ll_nocach
20454 if (n > 64 && cpu_has_xmm2)
20455 n = __copy_user_intel_nocache(to, from, n);
20456 else
20457- __copy_user(to, from, n);
20458+ __copy_user(to, from, n, __copyuser_seg, "", "");
20459 #else
20460- __copy_user(to, from, n);
20461+ __copy_user(to, from, n, __copyuser_seg, "", "");
20462 #endif
20463 return n;
20464 }
20465 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
20466
20467-/**
20468- * copy_to_user: - Copy a block of data into user space.
20469- * @to: Destination address, in user space.
20470- * @from: Source address, in kernel space.
20471- * @n: Number of bytes to copy.
20472- *
20473- * Context: User context only. This function may sleep.
20474- *
20475- * Copy data from kernel space to user space.
20476- *
20477- * Returns number of bytes that could not be copied.
20478- * On success, this will be zero.
20479- */
20480-unsigned long
20481-copy_to_user(void __user *to, const void *from, unsigned long n)
20482+#ifdef CONFIG_PAX_MEMORY_UDEREF
20483+void __set_fs(mm_segment_t x)
20484 {
20485- if (access_ok(VERIFY_WRITE, to, n))
20486- n = __copy_to_user(to, from, n);
20487- return n;
20488+ switch (x.seg) {
20489+ case 0:
20490+ loadsegment(gs, 0);
20491+ break;
20492+ case TASK_SIZE_MAX:
20493+ loadsegment(gs, __USER_DS);
20494+ break;
20495+ case -1UL:
20496+ loadsegment(gs, __KERNEL_DS);
20497+ break;
20498+ default:
20499+ BUG();
20500+ }
20501+ return;
20502 }
20503-EXPORT_SYMBOL(copy_to_user);
20504+EXPORT_SYMBOL(__set_fs);
20505
20506-/**
20507- * copy_from_user: - Copy a block of data from user space.
20508- * @to: Destination address, in kernel space.
20509- * @from: Source address, in user space.
20510- * @n: Number of bytes to copy.
20511- *
20512- * Context: User context only. This function may sleep.
20513- *
20514- * Copy data from user space to kernel space.
20515- *
20516- * Returns number of bytes that could not be copied.
20517- * On success, this will be zero.
20518- *
20519- * If some data could not be copied, this function will pad the copied
20520- * data to the requested size using zero bytes.
20521- */
20522-unsigned long
20523-copy_from_user(void *to, const void __user *from, unsigned long n)
20524+void set_fs(mm_segment_t x)
20525 {
20526- if (access_ok(VERIFY_READ, from, n))
20527- n = __copy_from_user(to, from, n);
20528- else
20529- memset(to, 0, n);
20530- return n;
20531+ current_thread_info()->addr_limit = x;
20532+ __set_fs(x);
20533 }
20534-EXPORT_SYMBOL(copy_from_user);
20535+EXPORT_SYMBOL(set_fs);
20536+#endif
20537diff -urNp linux-2.6.32.43/arch/x86/lib/usercopy_64.c linux-2.6.32.43/arch/x86/lib/usercopy_64.c
20538--- linux-2.6.32.43/arch/x86/lib/usercopy_64.c 2011-03-27 14:31:47.000000000 -0400
20539+++ linux-2.6.32.43/arch/x86/lib/usercopy_64.c 2011-05-04 17:56:20.000000000 -0400
20540@@ -42,6 +42,12 @@ long
20541 __strncpy_from_user(char *dst, const char __user *src, long count)
20542 {
20543 long res;
20544+
20545+#ifdef CONFIG_PAX_MEMORY_UDEREF
20546+ if ((unsigned long)src < PAX_USER_SHADOW_BASE)
20547+ src += PAX_USER_SHADOW_BASE;
20548+#endif
20549+
20550 __do_strncpy_from_user(dst, src, count, res);
20551 return res;
20552 }
20553@@ -65,6 +71,12 @@ unsigned long __clear_user(void __user *
20554 {
20555 long __d0;
20556 might_fault();
20557+
20558+#ifdef CONFIG_PAX_MEMORY_UDEREF
20559+ if ((unsigned long)addr < PAX_USER_SHADOW_BASE)
20560+ addr += PAX_USER_SHADOW_BASE;
20561+#endif
20562+
20563 /* no memory constraint because it doesn't change any memory gcc knows
20564 about */
20565 asm volatile(
20566@@ -151,10 +163,18 @@ EXPORT_SYMBOL(strlen_user);
20567
20568 unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
20569 {
20570- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20571+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
20572+
20573+#ifdef CONFIG_PAX_MEMORY_UDEREF
20574+ if ((unsigned long)to < PAX_USER_SHADOW_BASE)
20575+ to += PAX_USER_SHADOW_BASE;
20576+ if ((unsigned long)from < PAX_USER_SHADOW_BASE)
20577+ from += PAX_USER_SHADOW_BASE;
20578+#endif
20579+
20580 return copy_user_generic((__force void *)to, (__force void *)from, len);
20581- }
20582- return len;
20583+ }
20584+ return len;
20585 }
20586 EXPORT_SYMBOL(copy_in_user);
20587
20588diff -urNp linux-2.6.32.43/arch/x86/Makefile linux-2.6.32.43/arch/x86/Makefile
20589--- linux-2.6.32.43/arch/x86/Makefile 2011-03-27 14:31:47.000000000 -0400
20590+++ linux-2.6.32.43/arch/x86/Makefile 2011-07-19 18:16:02.000000000 -0400
20591@@ -44,6 +44,7 @@ ifeq ($(CONFIG_X86_32),y)
20592 else
20593 BITS := 64
20594 UTS_MACHINE := x86_64
20595+ biarch := $(call cc-option,-m64)
20596 CHECKFLAGS += -D__x86_64__ -m64
20597
20598 KBUILD_AFLAGS += -m64
20599@@ -189,3 +190,12 @@ define archhelp
20600 echo ' FDARGS="..." arguments for the booted kernel'
20601 echo ' FDINITRD=file initrd for the booted kernel'
20602 endef
20603+
20604+define OLD_LD
20605+
20606+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
20607+*** Please upgrade your binutils to 2.18 or newer
20608+endef
20609+
20610+archprepare:
20611+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
20612diff -urNp linux-2.6.32.43/arch/x86/mm/extable.c linux-2.6.32.43/arch/x86/mm/extable.c
20613--- linux-2.6.32.43/arch/x86/mm/extable.c 2011-03-27 14:31:47.000000000 -0400
20614+++ linux-2.6.32.43/arch/x86/mm/extable.c 2011-04-17 15:56:46.000000000 -0400
20615@@ -1,14 +1,71 @@
20616 #include <linux/module.h>
20617 #include <linux/spinlock.h>
20618+#include <linux/sort.h>
20619 #include <asm/uaccess.h>
20620+#include <asm/pgtable.h>
20621
20622+/*
20623+ * The exception table needs to be sorted so that the binary
20624+ * search that we use to find entries in it works properly.
20625+ * This is used both for the kernel exception table and for
20626+ * the exception tables of modules that get loaded.
20627+ */
20628+static int cmp_ex(const void *a, const void *b)
20629+{
20630+ const struct exception_table_entry *x = a, *y = b;
20631+
20632+ /* avoid overflow */
20633+ if (x->insn > y->insn)
20634+ return 1;
20635+ if (x->insn < y->insn)
20636+ return -1;
20637+ return 0;
20638+}
20639+
20640+static void swap_ex(void *a, void *b, int size)
20641+{
20642+ struct exception_table_entry t, *x = a, *y = b;
20643+
20644+ t = *x;
20645+
20646+ pax_open_kernel();
20647+ *x = *y;
20648+ *y = t;
20649+ pax_close_kernel();
20650+}
20651+
20652+void sort_extable(struct exception_table_entry *start,
20653+ struct exception_table_entry *finish)
20654+{
20655+ sort(start, finish - start, sizeof(struct exception_table_entry),
20656+ cmp_ex, swap_ex);
20657+}
20658+
20659+#ifdef CONFIG_MODULES
20660+/*
20661+ * If the exception table is sorted, any referring to the module init
20662+ * will be at the beginning or the end.
20663+ */
20664+void trim_init_extable(struct module *m)
20665+{
20666+ /*trim the beginning*/
20667+ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) {
20668+ m->extable++;
20669+ m->num_exentries--;
20670+ }
20671+ /*trim the end*/
20672+ while (m->num_exentries &&
20673+ within_module_init(m->extable[m->num_exentries-1].insn, m))
20674+ m->num_exentries--;
20675+}
20676+#endif /* CONFIG_MODULES */
20677
20678 int fixup_exception(struct pt_regs *regs)
20679 {
20680 const struct exception_table_entry *fixup;
20681
20682 #ifdef CONFIG_PNPBIOS
20683- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
20684+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
20685 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
20686 extern u32 pnp_bios_is_utter_crap;
20687 pnp_bios_is_utter_crap = 1;
20688diff -urNp linux-2.6.32.43/arch/x86/mm/fault.c linux-2.6.32.43/arch/x86/mm/fault.c
20689--- linux-2.6.32.43/arch/x86/mm/fault.c 2011-03-27 14:31:47.000000000 -0400
20690+++ linux-2.6.32.43/arch/x86/mm/fault.c 2011-06-06 17:35:16.000000000 -0400
20691@@ -11,10 +11,19 @@
20692 #include <linux/kprobes.h> /* __kprobes, ... */
20693 #include <linux/mmiotrace.h> /* kmmio_handler, ... */
20694 #include <linux/perf_event.h> /* perf_sw_event */
20695+#include <linux/unistd.h>
20696+#include <linux/compiler.h>
20697
20698 #include <asm/traps.h> /* dotraplinkage, ... */
20699 #include <asm/pgalloc.h> /* pgd_*(), ... */
20700 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
20701+#include <asm/vsyscall.h>
20702+#include <asm/tlbflush.h>
20703+
20704+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
20705+#include <asm/stacktrace.h>
20706+#include "../kernel/dumpstack.h"
20707+#endif
20708
20709 /*
20710 * Page fault error code bits:
20711@@ -51,7 +60,7 @@ static inline int notify_page_fault(stru
20712 int ret = 0;
20713
20714 /* kprobe_running() needs smp_processor_id() */
20715- if (kprobes_built_in() && !user_mode_vm(regs)) {
20716+ if (kprobes_built_in() && !user_mode(regs)) {
20717 preempt_disable();
20718 if (kprobe_running() && kprobe_fault_handler(regs, 14))
20719 ret = 1;
20720@@ -112,7 +121,10 @@ check_prefetch_opcode(struct pt_regs *re
20721 return !instr_lo || (instr_lo>>1) == 1;
20722 case 0x00:
20723 /* Prefetch instruction is 0x0F0D or 0x0F18 */
20724- if (probe_kernel_address(instr, opcode))
20725+ if (user_mode(regs)) {
20726+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20727+ return 0;
20728+ } else if (probe_kernel_address(instr, opcode))
20729 return 0;
20730
20731 *prefetch = (instr_lo == 0xF) &&
20732@@ -146,7 +158,10 @@ is_prefetch(struct pt_regs *regs, unsign
20733 while (instr < max_instr) {
20734 unsigned char opcode;
20735
20736- if (probe_kernel_address(instr, opcode))
20737+ if (user_mode(regs)) {
20738+ if (__copy_from_user_inatomic(&opcode, (__force unsigned char __user *)(instr), 1))
20739+ break;
20740+ } else if (probe_kernel_address(instr, opcode))
20741 break;
20742
20743 instr++;
20744@@ -172,6 +187,30 @@ force_sig_info_fault(int si_signo, int s
20745 force_sig_info(si_signo, &info, tsk);
20746 }
20747
20748+#ifdef CONFIG_PAX_EMUTRAMP
20749+static int pax_handle_fetch_fault(struct pt_regs *regs);
20750+#endif
20751+
20752+#ifdef CONFIG_PAX_PAGEEXEC
20753+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
20754+{
20755+ pgd_t *pgd;
20756+ pud_t *pud;
20757+ pmd_t *pmd;
20758+
20759+ pgd = pgd_offset(mm, address);
20760+ if (!pgd_present(*pgd))
20761+ return NULL;
20762+ pud = pud_offset(pgd, address);
20763+ if (!pud_present(*pud))
20764+ return NULL;
20765+ pmd = pmd_offset(pud, address);
20766+ if (!pmd_present(*pmd))
20767+ return NULL;
20768+ return pmd;
20769+}
20770+#endif
20771+
20772 DEFINE_SPINLOCK(pgd_lock);
20773 LIST_HEAD(pgd_list);
20774
20775@@ -224,11 +263,24 @@ void vmalloc_sync_all(void)
20776 address += PMD_SIZE) {
20777
20778 unsigned long flags;
20779+
20780+#ifdef CONFIG_PAX_PER_CPU_PGD
20781+ unsigned long cpu;
20782+#else
20783 struct page *page;
20784+#endif
20785
20786 spin_lock_irqsave(&pgd_lock, flags);
20787+
20788+#ifdef CONFIG_PAX_PER_CPU_PGD
20789+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20790+ pgd_t *pgd = get_cpu_pgd(cpu);
20791+#else
20792 list_for_each_entry(page, &pgd_list, lru) {
20793- if (!vmalloc_sync_one(page_address(page), address))
20794+ pgd_t *pgd = page_address(page);
20795+#endif
20796+
20797+ if (!vmalloc_sync_one(pgd, address))
20798 break;
20799 }
20800 spin_unlock_irqrestore(&pgd_lock, flags);
20801@@ -258,6 +310,11 @@ static noinline int vmalloc_fault(unsign
20802 * an interrupt in the middle of a task switch..
20803 */
20804 pgd_paddr = read_cr3();
20805+
20806+#ifdef CONFIG_PAX_PER_CPU_PGD
20807+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
20808+#endif
20809+
20810 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
20811 if (!pmd_k)
20812 return -1;
20813@@ -332,15 +389,27 @@ void vmalloc_sync_all(void)
20814
20815 const pgd_t *pgd_ref = pgd_offset_k(address);
20816 unsigned long flags;
20817+
20818+#ifdef CONFIG_PAX_PER_CPU_PGD
20819+ unsigned long cpu;
20820+#else
20821 struct page *page;
20822+#endif
20823
20824 if (pgd_none(*pgd_ref))
20825 continue;
20826
20827 spin_lock_irqsave(&pgd_lock, flags);
20828+
20829+#ifdef CONFIG_PAX_PER_CPU_PGD
20830+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
20831+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
20832+#else
20833 list_for_each_entry(page, &pgd_list, lru) {
20834 pgd_t *pgd;
20835 pgd = (pgd_t *)page_address(page) + pgd_index(address);
20836+#endif
20837+
20838 if (pgd_none(*pgd))
20839 set_pgd(pgd, *pgd_ref);
20840 else
20841@@ -373,7 +442,14 @@ static noinline int vmalloc_fault(unsign
20842 * happen within a race in page table update. In the later
20843 * case just flush:
20844 */
20845+
20846+#ifdef CONFIG_PAX_PER_CPU_PGD
20847+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
20848+ pgd = pgd_offset_cpu(smp_processor_id(), address);
20849+#else
20850 pgd = pgd_offset(current->active_mm, address);
20851+#endif
20852+
20853 pgd_ref = pgd_offset_k(address);
20854 if (pgd_none(*pgd_ref))
20855 return -1;
20856@@ -535,7 +611,7 @@ static int is_errata93(struct pt_regs *r
20857 static int is_errata100(struct pt_regs *regs, unsigned long address)
20858 {
20859 #ifdef CONFIG_X86_64
20860- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
20861+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
20862 return 1;
20863 #endif
20864 return 0;
20865@@ -562,7 +638,7 @@ static int is_f00f_bug(struct pt_regs *r
20866 }
20867
20868 static const char nx_warning[] = KERN_CRIT
20869-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
20870+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
20871
20872 static void
20873 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
20874@@ -571,15 +647,26 @@ show_fault_oops(struct pt_regs *regs, un
20875 if (!oops_may_print())
20876 return;
20877
20878- if (error_code & PF_INSTR) {
20879+ if (nx_enabled && (error_code & PF_INSTR)) {
20880 unsigned int level;
20881
20882 pte_t *pte = lookup_address(address, &level);
20883
20884 if (pte && pte_present(*pte) && !pte_exec(*pte))
20885- printk(nx_warning, current_uid());
20886+ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current));
20887 }
20888
20889+#ifdef CONFIG_PAX_KERNEXEC
20890+ if (init_mm.start_code <= address && address < init_mm.end_code) {
20891+ if (current->signal->curr_ip)
20892+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20893+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
20894+ else
20895+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
20896+ current->comm, task_pid_nr(current), current_uid(), current_euid());
20897+ }
20898+#endif
20899+
20900 printk(KERN_ALERT "BUG: unable to handle kernel ");
20901 if (address < PAGE_SIZE)
20902 printk(KERN_CONT "NULL pointer dereference");
20903@@ -704,6 +791,68 @@ __bad_area_nosemaphore(struct pt_regs *r
20904 unsigned long address, int si_code)
20905 {
20906 struct task_struct *tsk = current;
20907+ struct mm_struct *mm = tsk->mm;
20908+
20909+#ifdef CONFIG_X86_64
20910+ if (mm && (error_code & PF_INSTR) && mm->context.vdso) {
20911+ if (regs->ip == (unsigned long)vgettimeofday) {
20912+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday);
20913+ return;
20914+ } else if (regs->ip == (unsigned long)vtime) {
20915+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time);
20916+ return;
20917+ } else if (regs->ip == (unsigned long)vgetcpu) {
20918+ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu);
20919+ return;
20920+ }
20921+ }
20922+#endif
20923+
20924+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
20925+ if (mm && (error_code & PF_USER)) {
20926+ unsigned long ip = regs->ip;
20927+
20928+ if (v8086_mode(regs))
20929+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
20930+
20931+ /*
20932+ * It's possible to have interrupts off here:
20933+ */
20934+ local_irq_enable();
20935+
20936+#ifdef CONFIG_PAX_PAGEEXEC
20937+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) &&
20938+ ((nx_enabled && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && ip == address))) {
20939+
20940+#ifdef CONFIG_PAX_EMUTRAMP
20941+ switch (pax_handle_fetch_fault(regs)) {
20942+ case 2:
20943+ return;
20944+ }
20945+#endif
20946+
20947+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20948+ do_group_exit(SIGKILL);
20949+ }
20950+#endif
20951+
20952+#ifdef CONFIG_PAX_SEGMEXEC
20953+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address)) {
20954+
20955+#ifdef CONFIG_PAX_EMUTRAMP
20956+ switch (pax_handle_fetch_fault(regs)) {
20957+ case 2:
20958+ return;
20959+ }
20960+#endif
20961+
20962+ pax_report_fault(regs, (void *)ip, (void *)regs->sp);
20963+ do_group_exit(SIGKILL);
20964+ }
20965+#endif
20966+
20967+ }
20968+#endif
20969
20970 /* User mode accesses just cause a SIGSEGV */
20971 if (error_code & PF_USER) {
20972@@ -857,6 +1006,99 @@ static int spurious_fault_check(unsigned
20973 return 1;
20974 }
20975
20976+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
20977+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
20978+{
20979+ pte_t *pte;
20980+ pmd_t *pmd;
20981+ spinlock_t *ptl;
20982+ unsigned char pte_mask;
20983+
20984+ if (nx_enabled || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
20985+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
20986+ return 0;
20987+
20988+ /* PaX: it's our fault, let's handle it if we can */
20989+
20990+ /* PaX: take a look at read faults before acquiring any locks */
20991+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
20992+ /* instruction fetch attempt from a protected page in user mode */
20993+ up_read(&mm->mmap_sem);
20994+
20995+#ifdef CONFIG_PAX_EMUTRAMP
20996+ switch (pax_handle_fetch_fault(regs)) {
20997+ case 2:
20998+ return 1;
20999+ }
21000+#endif
21001+
21002+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
21003+ do_group_exit(SIGKILL);
21004+ }
21005+
21006+ pmd = pax_get_pmd(mm, address);
21007+ if (unlikely(!pmd))
21008+ return 0;
21009+
21010+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
21011+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
21012+ pte_unmap_unlock(pte, ptl);
21013+ return 0;
21014+ }
21015+
21016+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
21017+ /* write attempt to a protected page in user mode */
21018+ pte_unmap_unlock(pte, ptl);
21019+ return 0;
21020+ }
21021+
21022+#ifdef CONFIG_SMP
21023+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
21024+#else
21025+ if (likely(address > get_limit(regs->cs)))
21026+#endif
21027+ {
21028+ set_pte(pte, pte_mkread(*pte));
21029+ __flush_tlb_one(address);
21030+ pte_unmap_unlock(pte, ptl);
21031+ up_read(&mm->mmap_sem);
21032+ return 1;
21033+ }
21034+
21035+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
21036+
21037+ /*
21038+ * PaX: fill DTLB with user rights and retry
21039+ */
21040+ __asm__ __volatile__ (
21041+ "orb %2,(%1)\n"
21042+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
21043+/*
21044+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
21045+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
21046+ * page fault when examined during a TLB load attempt. this is true not only
21047+ * for PTEs holding a non-present entry but also present entries that will
21048+ * raise a page fault (such as those set up by PaX, or the copy-on-write
21049+ * mechanism). in effect it means that we do *not* need to flush the TLBs
21050+ * for our target pages since their PTEs are simply not in the TLBs at all.
21051+
21052+ * the best thing in omitting it is that we gain around 15-20% speed in the
21053+ * fast path of the page fault handler and can get rid of tracing since we
21054+ * can no longer flush unintended entries.
21055+ */
21056+ "invlpg (%0)\n"
21057+#endif
21058+ __copyuser_seg"testb $0,(%0)\n"
21059+ "xorb %3,(%1)\n"
21060+ :
21061+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
21062+ : "memory", "cc");
21063+ pte_unmap_unlock(pte, ptl);
21064+ up_read(&mm->mmap_sem);
21065+ return 1;
21066+}
21067+#endif
21068+
21069 /*
21070 * Handle a spurious fault caused by a stale TLB entry.
21071 *
21072@@ -923,6 +1165,9 @@ int show_unhandled_signals = 1;
21073 static inline int
21074 access_error(unsigned long error_code, int write, struct vm_area_struct *vma)
21075 {
21076+ if (nx_enabled && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
21077+ return 1;
21078+
21079 if (write) {
21080 /* write, present and write, not present: */
21081 if (unlikely(!(vma->vm_flags & VM_WRITE)))
21082@@ -956,17 +1201,31 @@ do_page_fault(struct pt_regs *regs, unsi
21083 {
21084 struct vm_area_struct *vma;
21085 struct task_struct *tsk;
21086- unsigned long address;
21087 struct mm_struct *mm;
21088 int write;
21089 int fault;
21090
21091+ /* Get the faulting address: */
21092+ unsigned long address = read_cr2();
21093+
21094+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
21095+ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) {
21096+ if (!search_exception_tables(regs->ip)) {
21097+ bad_area_nosemaphore(regs, error_code, address);
21098+ return;
21099+ }
21100+ if (address < PAX_USER_SHADOW_BASE) {
21101+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
21102+ printk(KERN_ERR "PAX: faulting IP: %pA\n", (void *)regs->ip);
21103+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
21104+ } else
21105+ address -= PAX_USER_SHADOW_BASE;
21106+ }
21107+#endif
21108+
21109 tsk = current;
21110 mm = tsk->mm;
21111
21112- /* Get the faulting address: */
21113- address = read_cr2();
21114-
21115 /*
21116 * Detect and handle instructions that would cause a page fault for
21117 * both a tracked kernel page and a userspace page.
21118@@ -1026,7 +1285,7 @@ do_page_fault(struct pt_regs *regs, unsi
21119 * User-mode registers count as a user access even for any
21120 * potential system fault or CPU buglet:
21121 */
21122- if (user_mode_vm(regs)) {
21123+ if (user_mode(regs)) {
21124 local_irq_enable();
21125 error_code |= PF_USER;
21126 } else {
21127@@ -1080,6 +1339,11 @@ do_page_fault(struct pt_regs *regs, unsi
21128 might_sleep();
21129 }
21130
21131+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
21132+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
21133+ return;
21134+#endif
21135+
21136 vma = find_vma(mm, address);
21137 if (unlikely(!vma)) {
21138 bad_area(regs, error_code, address);
21139@@ -1091,18 +1355,24 @@ do_page_fault(struct pt_regs *regs, unsi
21140 bad_area(regs, error_code, address);
21141 return;
21142 }
21143- if (error_code & PF_USER) {
21144- /*
21145- * Accessing the stack below %sp is always a bug.
21146- * The large cushion allows instructions like enter
21147- * and pusha to work. ("enter $65535, $31" pushes
21148- * 32 pointers and then decrements %sp by 65535.)
21149- */
21150- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
21151- bad_area(regs, error_code, address);
21152- return;
21153- }
21154+ /*
21155+ * Accessing the stack below %sp is always a bug.
21156+ * The large cushion allows instructions like enter
21157+ * and pusha to work. ("enter $65535, $31" pushes
21158+ * 32 pointers and then decrements %sp by 65535.)
21159+ */
21160+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
21161+ bad_area(regs, error_code, address);
21162+ return;
21163+ }
21164+
21165+#ifdef CONFIG_PAX_SEGMEXEC
21166+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
21167+ bad_area(regs, error_code, address);
21168+ return;
21169 }
21170+#endif
21171+
21172 if (unlikely(expand_stack(vma, address))) {
21173 bad_area(regs, error_code, address);
21174 return;
21175@@ -1146,3 +1416,199 @@ good_area:
21176
21177 up_read(&mm->mmap_sem);
21178 }
21179+
21180+#ifdef CONFIG_PAX_EMUTRAMP
21181+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
21182+{
21183+ int err;
21184+
21185+ do { /* PaX: gcc trampoline emulation #1 */
21186+ unsigned char mov1, mov2;
21187+ unsigned short jmp;
21188+ unsigned int addr1, addr2;
21189+
21190+#ifdef CONFIG_X86_64
21191+ if ((regs->ip + 11) >> 32)
21192+ break;
21193+#endif
21194+
21195+ err = get_user(mov1, (unsigned char __user *)regs->ip);
21196+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21197+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
21198+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21199+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
21200+
21201+ if (err)
21202+ break;
21203+
21204+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
21205+ regs->cx = addr1;
21206+ regs->ax = addr2;
21207+ regs->ip = addr2;
21208+ return 2;
21209+ }
21210+ } while (0);
21211+
21212+ do { /* PaX: gcc trampoline emulation #2 */
21213+ unsigned char mov, jmp;
21214+ unsigned int addr1, addr2;
21215+
21216+#ifdef CONFIG_X86_64
21217+ if ((regs->ip + 9) >> 32)
21218+ break;
21219+#endif
21220+
21221+ err = get_user(mov, (unsigned char __user *)regs->ip);
21222+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
21223+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
21224+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
21225+
21226+ if (err)
21227+ break;
21228+
21229+ if (mov == 0xB9 && jmp == 0xE9) {
21230+ regs->cx = addr1;
21231+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
21232+ return 2;
21233+ }
21234+ } while (0);
21235+
21236+ return 1; /* PaX in action */
21237+}
21238+
21239+#ifdef CONFIG_X86_64
21240+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
21241+{
21242+ int err;
21243+
21244+ do { /* PaX: gcc trampoline emulation #1 */
21245+ unsigned short mov1, mov2, jmp1;
21246+ unsigned char jmp2;
21247+ unsigned int addr1;
21248+ unsigned long addr2;
21249+
21250+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21251+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
21252+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
21253+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
21254+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
21255+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
21256+
21257+ if (err)
21258+ break;
21259+
21260+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21261+ regs->r11 = addr1;
21262+ regs->r10 = addr2;
21263+ regs->ip = addr1;
21264+ return 2;
21265+ }
21266+ } while (0);
21267+
21268+ do { /* PaX: gcc trampoline emulation #2 */
21269+ unsigned short mov1, mov2, jmp1;
21270+ unsigned char jmp2;
21271+ unsigned long addr1, addr2;
21272+
21273+ err = get_user(mov1, (unsigned short __user *)regs->ip);
21274+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
21275+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
21276+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
21277+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
21278+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
21279+
21280+ if (err)
21281+ break;
21282+
21283+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
21284+ regs->r11 = addr1;
21285+ regs->r10 = addr2;
21286+ regs->ip = addr1;
21287+ return 2;
21288+ }
21289+ } while (0);
21290+
21291+ return 1; /* PaX in action */
21292+}
21293+#endif
21294+
21295+/*
21296+ * PaX: decide what to do with offenders (regs->ip = fault address)
21297+ *
21298+ * returns 1 when task should be killed
21299+ * 2 when gcc trampoline was detected
21300+ */
21301+static int pax_handle_fetch_fault(struct pt_regs *regs)
21302+{
21303+ if (v8086_mode(regs))
21304+ return 1;
21305+
21306+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
21307+ return 1;
21308+
21309+#ifdef CONFIG_X86_32
21310+ return pax_handle_fetch_fault_32(regs);
21311+#else
21312+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
21313+ return pax_handle_fetch_fault_32(regs);
21314+ else
21315+ return pax_handle_fetch_fault_64(regs);
21316+#endif
21317+}
21318+#endif
21319+
21320+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21321+void pax_report_insns(void *pc, void *sp)
21322+{
21323+ long i;
21324+
21325+ printk(KERN_ERR "PAX: bytes at PC: ");
21326+ for (i = 0; i < 20; i++) {
21327+ unsigned char c;
21328+ if (get_user(c, (__force unsigned char __user *)pc+i))
21329+ printk(KERN_CONT "?? ");
21330+ else
21331+ printk(KERN_CONT "%02x ", c);
21332+ }
21333+ printk("\n");
21334+
21335+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
21336+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
21337+ unsigned long c;
21338+ if (get_user(c, (__force unsigned long __user *)sp+i))
21339+#ifdef CONFIG_X86_32
21340+ printk(KERN_CONT "???????? ");
21341+#else
21342+ printk(KERN_CONT "???????????????? ");
21343+#endif
21344+ else
21345+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
21346+ }
21347+ printk("\n");
21348+}
21349+#endif
21350+
21351+/**
21352+ * probe_kernel_write(): safely attempt to write to a location
21353+ * @dst: address to write to
21354+ * @src: pointer to the data that shall be written
21355+ * @size: size of the data chunk
21356+ *
21357+ * Safely write to address @dst from the buffer at @src. If a kernel fault
21358+ * happens, handle that and return -EFAULT.
21359+ */
21360+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
21361+{
21362+ long ret;
21363+ mm_segment_t old_fs = get_fs();
21364+
21365+ set_fs(KERNEL_DS);
21366+ pagefault_disable();
21367+ pax_open_kernel();
21368+ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
21369+ pax_close_kernel();
21370+ pagefault_enable();
21371+ set_fs(old_fs);
21372+
21373+ return ret ? -EFAULT : 0;
21374+}
21375diff -urNp linux-2.6.32.43/arch/x86/mm/gup.c linux-2.6.32.43/arch/x86/mm/gup.c
21376--- linux-2.6.32.43/arch/x86/mm/gup.c 2011-03-27 14:31:47.000000000 -0400
21377+++ linux-2.6.32.43/arch/x86/mm/gup.c 2011-04-17 15:56:46.000000000 -0400
21378@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long
21379 addr = start;
21380 len = (unsigned long) nr_pages << PAGE_SHIFT;
21381 end = start + len;
21382- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21383+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
21384 (void __user *)start, len)))
21385 return 0;
21386
21387diff -urNp linux-2.6.32.43/arch/x86/mm/highmem_32.c linux-2.6.32.43/arch/x86/mm/highmem_32.c
21388--- linux-2.6.32.43/arch/x86/mm/highmem_32.c 2011-03-27 14:31:47.000000000 -0400
21389+++ linux-2.6.32.43/arch/x86/mm/highmem_32.c 2011-04-17 15:56:46.000000000 -0400
21390@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page
21391 idx = type + KM_TYPE_NR*smp_processor_id();
21392 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
21393 BUG_ON(!pte_none(*(kmap_pte-idx)));
21394+
21395+ pax_open_kernel();
21396 set_pte(kmap_pte-idx, mk_pte(page, prot));
21397+ pax_close_kernel();
21398
21399 return (void *)vaddr;
21400 }
21401diff -urNp linux-2.6.32.43/arch/x86/mm/hugetlbpage.c linux-2.6.32.43/arch/x86/mm/hugetlbpage.c
21402--- linux-2.6.32.43/arch/x86/mm/hugetlbpage.c 2011-03-27 14:31:47.000000000 -0400
21403+++ linux-2.6.32.43/arch/x86/mm/hugetlbpage.c 2011-04-17 15:56:46.000000000 -0400
21404@@ -267,13 +267,20 @@ static unsigned long hugetlb_get_unmappe
21405 struct hstate *h = hstate_file(file);
21406 struct mm_struct *mm = current->mm;
21407 struct vm_area_struct *vma;
21408- unsigned long start_addr;
21409+ unsigned long start_addr, pax_task_size = TASK_SIZE;
21410+
21411+#ifdef CONFIG_PAX_SEGMEXEC
21412+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21413+ pax_task_size = SEGMEXEC_TASK_SIZE;
21414+#endif
21415+
21416+ pax_task_size -= PAGE_SIZE;
21417
21418 if (len > mm->cached_hole_size) {
21419- start_addr = mm->free_area_cache;
21420+ start_addr = mm->free_area_cache;
21421 } else {
21422- start_addr = TASK_UNMAPPED_BASE;
21423- mm->cached_hole_size = 0;
21424+ start_addr = mm->mmap_base;
21425+ mm->cached_hole_size = 0;
21426 }
21427
21428 full_search:
21429@@ -281,26 +288,27 @@ full_search:
21430
21431 for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
21432 /* At this point: (!vma || addr < vma->vm_end). */
21433- if (TASK_SIZE - len < addr) {
21434+ if (pax_task_size - len < addr) {
21435 /*
21436 * Start a new search - just in case we missed
21437 * some holes.
21438 */
21439- if (start_addr != TASK_UNMAPPED_BASE) {
21440- start_addr = TASK_UNMAPPED_BASE;
21441+ if (start_addr != mm->mmap_base) {
21442+ start_addr = mm->mmap_base;
21443 mm->cached_hole_size = 0;
21444 goto full_search;
21445 }
21446 return -ENOMEM;
21447 }
21448- if (!vma || addr + len <= vma->vm_start) {
21449- mm->free_area_cache = addr + len;
21450- return addr;
21451- }
21452+ if (check_heap_stack_gap(vma, addr, len))
21453+ break;
21454 if (addr + mm->cached_hole_size < vma->vm_start)
21455 mm->cached_hole_size = vma->vm_start - addr;
21456 addr = ALIGN(vma->vm_end, huge_page_size(h));
21457 }
21458+
21459+ mm->free_area_cache = addr + len;
21460+ return addr;
21461 }
21462
21463 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
21464@@ -309,10 +317,9 @@ static unsigned long hugetlb_get_unmappe
21465 {
21466 struct hstate *h = hstate_file(file);
21467 struct mm_struct *mm = current->mm;
21468- struct vm_area_struct *vma, *prev_vma;
21469- unsigned long base = mm->mmap_base, addr = addr0;
21470+ struct vm_area_struct *vma;
21471+ unsigned long base = mm->mmap_base, addr;
21472 unsigned long largest_hole = mm->cached_hole_size;
21473- int first_time = 1;
21474
21475 /* don't allow allocations above current base */
21476 if (mm->free_area_cache > base)
21477@@ -322,64 +329,63 @@ static unsigned long hugetlb_get_unmappe
21478 largest_hole = 0;
21479 mm->free_area_cache = base;
21480 }
21481-try_again:
21482+
21483 /* make sure it can fit in the remaining address space */
21484 if (mm->free_area_cache < len)
21485 goto fail;
21486
21487 /* either no address requested or cant fit in requested address hole */
21488- addr = (mm->free_area_cache - len) & huge_page_mask(h);
21489+ addr = (mm->free_area_cache - len);
21490 do {
21491+ addr &= huge_page_mask(h);
21492+ vma = find_vma(mm, addr);
21493 /*
21494 * Lookup failure means no vma is above this address,
21495 * i.e. return with success:
21496- */
21497- if (!(vma = find_vma_prev(mm, addr, &prev_vma)))
21498- return addr;
21499-
21500- /*
21501 * new region fits between prev_vma->vm_end and
21502 * vma->vm_start, use it:
21503 */
21504- if (addr + len <= vma->vm_start &&
21505- (!prev_vma || (addr >= prev_vma->vm_end))) {
21506+ if (check_heap_stack_gap(vma, addr, len)) {
21507 /* remember the address as a hint for next time */
21508- mm->cached_hole_size = largest_hole;
21509- return (mm->free_area_cache = addr);
21510- } else {
21511- /* pull free_area_cache down to the first hole */
21512- if (mm->free_area_cache == vma->vm_end) {
21513- mm->free_area_cache = vma->vm_start;
21514- mm->cached_hole_size = largest_hole;
21515- }
21516+ mm->cached_hole_size = largest_hole;
21517+ return (mm->free_area_cache = addr);
21518+ }
21519+ /* pull free_area_cache down to the first hole */
21520+ if (mm->free_area_cache == vma->vm_end) {
21521+ mm->free_area_cache = vma->vm_start;
21522+ mm->cached_hole_size = largest_hole;
21523 }
21524
21525 /* remember the largest hole we saw so far */
21526 if (addr + largest_hole < vma->vm_start)
21527- largest_hole = vma->vm_start - addr;
21528+ largest_hole = vma->vm_start - addr;
21529
21530 /* try just below the current vma->vm_start */
21531- addr = (vma->vm_start - len) & huge_page_mask(h);
21532- } while (len <= vma->vm_start);
21533+ addr = skip_heap_stack_gap(vma, len);
21534+ } while (!IS_ERR_VALUE(addr));
21535
21536 fail:
21537 /*
21538- * if hint left us with no space for the requested
21539- * mapping then try again:
21540- */
21541- if (first_time) {
21542- mm->free_area_cache = base;
21543- largest_hole = 0;
21544- first_time = 0;
21545- goto try_again;
21546- }
21547- /*
21548 * A failed mmap() very likely causes application failure,
21549 * so fall back to the bottom-up function here. This scenario
21550 * can happen with large stack limits and large mmap()
21551 * allocations.
21552 */
21553- mm->free_area_cache = TASK_UNMAPPED_BASE;
21554+
21555+#ifdef CONFIG_PAX_SEGMEXEC
21556+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21557+ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE;
21558+ else
21559+#endif
21560+
21561+ mm->mmap_base = TASK_UNMAPPED_BASE;
21562+
21563+#ifdef CONFIG_PAX_RANDMMAP
21564+ if (mm->pax_flags & MF_PAX_RANDMMAP)
21565+ mm->mmap_base += mm->delta_mmap;
21566+#endif
21567+
21568+ mm->free_area_cache = mm->mmap_base;
21569 mm->cached_hole_size = ~0UL;
21570 addr = hugetlb_get_unmapped_area_bottomup(file, addr0,
21571 len, pgoff, flags);
21572@@ -387,6 +393,7 @@ fail:
21573 /*
21574 * Restore the topdown base:
21575 */
21576+ mm->mmap_base = base;
21577 mm->free_area_cache = base;
21578 mm->cached_hole_size = ~0UL;
21579
21580@@ -400,10 +407,19 @@ hugetlb_get_unmapped_area(struct file *f
21581 struct hstate *h = hstate_file(file);
21582 struct mm_struct *mm = current->mm;
21583 struct vm_area_struct *vma;
21584+ unsigned long pax_task_size = TASK_SIZE;
21585
21586 if (len & ~huge_page_mask(h))
21587 return -EINVAL;
21588- if (len > TASK_SIZE)
21589+
21590+#ifdef CONFIG_PAX_SEGMEXEC
21591+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
21592+ pax_task_size = SEGMEXEC_TASK_SIZE;
21593+#endif
21594+
21595+ pax_task_size -= PAGE_SIZE;
21596+
21597+ if (len > pax_task_size)
21598 return -ENOMEM;
21599
21600 if (flags & MAP_FIXED) {
21601@@ -415,8 +431,7 @@ hugetlb_get_unmapped_area(struct file *f
21602 if (addr) {
21603 addr = ALIGN(addr, huge_page_size(h));
21604 vma = find_vma(mm, addr);
21605- if (TASK_SIZE - len >= addr &&
21606- (!vma || addr + len <= vma->vm_start))
21607+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len))
21608 return addr;
21609 }
21610 if (mm->get_unmapped_area == arch_get_unmapped_area)
21611diff -urNp linux-2.6.32.43/arch/x86/mm/init_32.c linux-2.6.32.43/arch/x86/mm/init_32.c
21612--- linux-2.6.32.43/arch/x86/mm/init_32.c 2011-03-27 14:31:47.000000000 -0400
21613+++ linux-2.6.32.43/arch/x86/mm/init_32.c 2011-04-17 15:56:46.000000000 -0400
21614@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void)
21615 }
21616
21617 /*
21618- * Creates a middle page table and puts a pointer to it in the
21619- * given global directory entry. This only returns the gd entry
21620- * in non-PAE compilation mode, since the middle layer is folded.
21621- */
21622-static pmd_t * __init one_md_table_init(pgd_t *pgd)
21623-{
21624- pud_t *pud;
21625- pmd_t *pmd_table;
21626-
21627-#ifdef CONFIG_X86_PAE
21628- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
21629- if (after_bootmem)
21630- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE);
21631- else
21632- pmd_table = (pmd_t *)alloc_low_page();
21633- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
21634- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
21635- pud = pud_offset(pgd, 0);
21636- BUG_ON(pmd_table != pmd_offset(pud, 0));
21637-
21638- return pmd_table;
21639- }
21640-#endif
21641- pud = pud_offset(pgd, 0);
21642- pmd_table = pmd_offset(pud, 0);
21643-
21644- return pmd_table;
21645-}
21646-
21647-/*
21648 * Create a page table and place a pointer to it in a middle page
21649 * directory entry:
21650 */
21651@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini
21652 page_table = (pte_t *)alloc_low_page();
21653
21654 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
21655+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21656+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
21657+#else
21658 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
21659+#endif
21660 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
21661 }
21662
21663 return pte_offset_kernel(pmd, 0);
21664 }
21665
21666+static pmd_t * __init one_md_table_init(pgd_t *pgd)
21667+{
21668+ pud_t *pud;
21669+ pmd_t *pmd_table;
21670+
21671+ pud = pud_offset(pgd, 0);
21672+ pmd_table = pmd_offset(pud, 0);
21673+
21674+ return pmd_table;
21675+}
21676+
21677 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
21678 {
21679 int pgd_idx = pgd_index(vaddr);
21680@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star
21681 int pgd_idx, pmd_idx;
21682 unsigned long vaddr;
21683 pgd_t *pgd;
21684+ pud_t *pud;
21685 pmd_t *pmd;
21686 pte_t *pte = NULL;
21687
21688@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star
21689 pgd = pgd_base + pgd_idx;
21690
21691 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
21692- pmd = one_md_table_init(pgd);
21693- pmd = pmd + pmd_index(vaddr);
21694+ pud = pud_offset(pgd, vaddr);
21695+ pmd = pmd_offset(pud, vaddr);
21696+
21697+#ifdef CONFIG_X86_PAE
21698+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21699+#endif
21700+
21701 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
21702 pmd++, pmd_idx++) {
21703 pte = page_table_kmap_check(one_page_table_init(pmd),
21704@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star
21705 }
21706 }
21707
21708-static inline int is_kernel_text(unsigned long addr)
21709+static inline int is_kernel_text(unsigned long start, unsigned long end)
21710 {
21711- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end)
21712- return 1;
21713- return 0;
21714+ if ((start > ktla_ktva((unsigned long)_etext) ||
21715+ end <= ktla_ktva((unsigned long)_stext)) &&
21716+ (start > ktla_ktva((unsigned long)_einittext) ||
21717+ end <= ktla_ktva((unsigned long)_sinittext)) &&
21718+
21719+#ifdef CONFIG_ACPI_SLEEP
21720+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
21721+#endif
21722+
21723+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
21724+ return 0;
21725+ return 1;
21726 }
21727
21728 /*
21729@@ -243,9 +243,10 @@ kernel_physical_mapping_init(unsigned lo
21730 int use_pse = page_size_mask == (1<<PG_LEVEL_2M);
21731 unsigned long start_pfn, end_pfn;
21732 pgd_t *pgd_base = swapper_pg_dir;
21733- int pgd_idx, pmd_idx, pte_ofs;
21734+ unsigned int pgd_idx, pmd_idx, pte_ofs;
21735 unsigned long pfn;
21736 pgd_t *pgd;
21737+ pud_t *pud;
21738 pmd_t *pmd;
21739 pte_t *pte;
21740 unsigned pages_2m, pages_4k;
21741@@ -278,8 +279,13 @@ repeat:
21742 pfn = start_pfn;
21743 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21744 pgd = pgd_base + pgd_idx;
21745- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
21746- pmd = one_md_table_init(pgd);
21747+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
21748+ pud = pud_offset(pgd, 0);
21749+ pmd = pmd_offset(pud, 0);
21750+
21751+#ifdef CONFIG_X86_PAE
21752+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
21753+#endif
21754
21755 if (pfn >= end_pfn)
21756 continue;
21757@@ -291,14 +297,13 @@ repeat:
21758 #endif
21759 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
21760 pmd++, pmd_idx++) {
21761- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
21762+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
21763
21764 /*
21765 * Map with big pages if possible, otherwise
21766 * create normal page tables:
21767 */
21768 if (use_pse) {
21769- unsigned int addr2;
21770 pgprot_t prot = PAGE_KERNEL_LARGE;
21771 /*
21772 * first pass will use the same initial
21773@@ -308,11 +313,7 @@ repeat:
21774 __pgprot(PTE_IDENT_ATTR |
21775 _PAGE_PSE);
21776
21777- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
21778- PAGE_OFFSET + PAGE_SIZE-1;
21779-
21780- if (is_kernel_text(addr) ||
21781- is_kernel_text(addr2))
21782+ if (is_kernel_text(address, address + PMD_SIZE))
21783 prot = PAGE_KERNEL_LARGE_EXEC;
21784
21785 pages_2m++;
21786@@ -329,7 +330,7 @@ repeat:
21787 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
21788 pte += pte_ofs;
21789 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
21790- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
21791+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
21792 pgprot_t prot = PAGE_KERNEL;
21793 /*
21794 * first pass will use the same initial
21795@@ -337,7 +338,7 @@ repeat:
21796 */
21797 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
21798
21799- if (is_kernel_text(addr))
21800+ if (is_kernel_text(address, address + PAGE_SIZE))
21801 prot = PAGE_KERNEL_EXEC;
21802
21803 pages_4k++;
21804@@ -489,7 +490,7 @@ void __init native_pagetable_setup_start
21805
21806 pud = pud_offset(pgd, va);
21807 pmd = pmd_offset(pud, va);
21808- if (!pmd_present(*pmd))
21809+ if (!pmd_present(*pmd) || pmd_huge(*pmd))
21810 break;
21811
21812 pte = pte_offset_kernel(pmd, va);
21813@@ -541,9 +542,7 @@ void __init early_ioremap_page_table_ran
21814
21815 static void __init pagetable_init(void)
21816 {
21817- pgd_t *pgd_base = swapper_pg_dir;
21818-
21819- permanent_kmaps_init(pgd_base);
21820+ permanent_kmaps_init(swapper_pg_dir);
21821 }
21822
21823 #ifdef CONFIG_ACPI_SLEEP
21824@@ -551,12 +550,12 @@ static void __init pagetable_init(void)
21825 * ACPI suspend needs this for resume, because things like the intel-agp
21826 * driver might have split up a kernel 4MB mapping.
21827 */
21828-char swsusp_pg_dir[PAGE_SIZE]
21829+pgd_t swsusp_pg_dir[PTRS_PER_PGD]
21830 __attribute__ ((aligned(PAGE_SIZE)));
21831
21832 static inline void save_pg_dir(void)
21833 {
21834- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE);
21835+ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD);
21836 }
21837 #else /* !CONFIG_ACPI_SLEEP */
21838 static inline void save_pg_dir(void)
21839@@ -588,7 +587,7 @@ void zap_low_mappings(bool early)
21840 flush_tlb_all();
21841 }
21842
21843-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21844+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
21845 EXPORT_SYMBOL_GPL(__supported_pte_mask);
21846
21847 /* user-defined highmem size */
21848@@ -777,7 +776,7 @@ void __init setup_bootmem_allocator(void
21849 * Initialize the boot-time allocator (with low memory only):
21850 */
21851 bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT;
21852- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21853+ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size,
21854 PAGE_SIZE);
21855 if (bootmap == -1L)
21856 panic("Cannot find bootmem map of size %ld\n", bootmap_size);
21857@@ -864,6 +863,12 @@ void __init mem_init(void)
21858
21859 pci_iommu_alloc();
21860
21861+#ifdef CONFIG_PAX_PER_CPU_PGD
21862+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21863+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21864+ KERNEL_PGD_PTRS);
21865+#endif
21866+
21867 #ifdef CONFIG_FLATMEM
21868 BUG_ON(!mem_map);
21869 #endif
21870@@ -881,7 +886,7 @@ void __init mem_init(void)
21871 set_highmem_pages_init();
21872
21873 codesize = (unsigned long) &_etext - (unsigned long) &_text;
21874- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
21875+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
21876 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
21877
21878 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
21879@@ -923,10 +928,10 @@ void __init mem_init(void)
21880 ((unsigned long)&__init_end -
21881 (unsigned long)&__init_begin) >> 10,
21882
21883- (unsigned long)&_etext, (unsigned long)&_edata,
21884- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
21885+ (unsigned long)&_sdata, (unsigned long)&_edata,
21886+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
21887
21888- (unsigned long)&_text, (unsigned long)&_etext,
21889+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
21890 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
21891
21892 /*
21893@@ -1007,6 +1012,7 @@ void set_kernel_text_rw(void)
21894 if (!kernel_set_to_readonly)
21895 return;
21896
21897+ start = ktla_ktva(start);
21898 pr_debug("Set kernel text: %lx - %lx for read write\n",
21899 start, start+size);
21900
21901@@ -1021,6 +1027,7 @@ void set_kernel_text_ro(void)
21902 if (!kernel_set_to_readonly)
21903 return;
21904
21905+ start = ktla_ktva(start);
21906 pr_debug("Set kernel text: %lx - %lx for read only\n",
21907 start, start+size);
21908
21909@@ -1032,6 +1039,7 @@ void mark_rodata_ro(void)
21910 unsigned long start = PFN_ALIGN(_text);
21911 unsigned long size = PFN_ALIGN(_etext) - start;
21912
21913+ start = ktla_ktva(start);
21914 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
21915 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
21916 size >> 10);
21917diff -urNp linux-2.6.32.43/arch/x86/mm/init_64.c linux-2.6.32.43/arch/x86/mm/init_64.c
21918--- linux-2.6.32.43/arch/x86/mm/init_64.c 2011-04-17 17:00:52.000000000 -0400
21919+++ linux-2.6.32.43/arch/x86/mm/init_64.c 2011-04-17 17:03:05.000000000 -0400
21920@@ -164,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page,
21921 pmd = fill_pmd(pud, vaddr);
21922 pte = fill_pte(pmd, vaddr);
21923
21924+ pax_open_kernel();
21925 set_pte(pte, new_pte);
21926+ pax_close_kernel();
21927
21928 /*
21929 * It's enough to flush this one mapping.
21930@@ -223,14 +225,12 @@ static void __init __init_extra_mapping(
21931 pgd = pgd_offset_k((unsigned long)__va(phys));
21932 if (pgd_none(*pgd)) {
21933 pud = (pud_t *) spp_getpage();
21934- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
21935- _PAGE_USER));
21936+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
21937 }
21938 pud = pud_offset(pgd, (unsigned long)__va(phys));
21939 if (pud_none(*pud)) {
21940 pmd = (pmd_t *) spp_getpage();
21941- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
21942- _PAGE_USER));
21943+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
21944 }
21945 pmd = pmd_offset(pud, phys);
21946 BUG_ON(!pmd_none(*pmd));
21947@@ -675,6 +675,12 @@ void __init mem_init(void)
21948
21949 pci_iommu_alloc();
21950
21951+#ifdef CONFIG_PAX_PER_CPU_PGD
21952+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
21953+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
21954+ KERNEL_PGD_PTRS);
21955+#endif
21956+
21957 /* clear_bss() already clear the empty_zero_page */
21958
21959 reservedpages = 0;
21960@@ -861,8 +867,8 @@ int kern_addr_valid(unsigned long addr)
21961 static struct vm_area_struct gate_vma = {
21962 .vm_start = VSYSCALL_START,
21963 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
21964- .vm_page_prot = PAGE_READONLY_EXEC,
21965- .vm_flags = VM_READ | VM_EXEC
21966+ .vm_page_prot = PAGE_READONLY,
21967+ .vm_flags = VM_READ
21968 };
21969
21970 struct vm_area_struct *get_gate_vma(struct task_struct *tsk)
21971@@ -896,7 +902,7 @@ int in_gate_area_no_task(unsigned long a
21972
21973 const char *arch_vma_name(struct vm_area_struct *vma)
21974 {
21975- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
21976+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
21977 return "[vdso]";
21978 if (vma == &gate_vma)
21979 return "[vsyscall]";
21980diff -urNp linux-2.6.32.43/arch/x86/mm/init.c linux-2.6.32.43/arch/x86/mm/init.c
21981--- linux-2.6.32.43/arch/x86/mm/init.c 2011-04-17 17:00:52.000000000 -0400
21982+++ linux-2.6.32.43/arch/x86/mm/init.c 2011-06-07 19:06:09.000000000 -0400
21983@@ -69,11 +69,7 @@ static void __init find_early_table_spac
21984 * cause a hotspot and fill up ZONE_DMA. The page tables
21985 * need roughly 0.5KB per GB.
21986 */
21987-#ifdef CONFIG_X86_32
21988- start = 0x7000;
21989-#else
21990- start = 0x8000;
21991-#endif
21992+ start = 0x100000;
21993 e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT,
21994 tables, PAGE_SIZE);
21995 if (e820_table_start == -1UL)
21996@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m
21997 #endif
21998
21999 set_nx();
22000- if (nx_enabled)
22001+ if (nx_enabled && cpu_has_nx)
22002 printk(KERN_INFO "NX (Execute Disable) protection: active\n");
22003
22004 /* Enable PSE if available */
22005@@ -329,10 +325,27 @@ unsigned long __init_refok init_memory_m
22006 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
22007 * mmio resources as well as potential bios/acpi data regions.
22008 */
22009+
22010 int devmem_is_allowed(unsigned long pagenr)
22011 {
22012+#ifdef CONFIG_GRKERNSEC_KMEM
22013+ /* allow BDA */
22014+ if (!pagenr)
22015+ return 1;
22016+ /* allow EBDA */
22017+ if ((0x9f000 >> PAGE_SHIFT) == pagenr)
22018+ return 1;
22019+ /* allow ISA/video mem */
22020+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22021+ return 1;
22022+ /* throw out everything else below 1MB */
22023+ if (pagenr <= 256)
22024+ return 0;
22025+#else
22026 if (pagenr <= 256)
22027 return 1;
22028+#endif
22029+
22030 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
22031 return 0;
22032 if (!page_is_ram(pagenr))
22033@@ -379,6 +392,86 @@ void free_init_pages(char *what, unsigne
22034
22035 void free_initmem(void)
22036 {
22037+
22038+#ifdef CONFIG_PAX_KERNEXEC
22039+#ifdef CONFIG_X86_32
22040+ /* PaX: limit KERNEL_CS to actual size */
22041+ unsigned long addr, limit;
22042+ struct desc_struct d;
22043+ int cpu;
22044+
22045+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
22046+ limit = (limit - 1UL) >> PAGE_SHIFT;
22047+
22048+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
22049+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
22050+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
22051+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
22052+ }
22053+
22054+ /* PaX: make KERNEL_CS read-only */
22055+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
22056+ if (!paravirt_enabled())
22057+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
22058+/*
22059+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
22060+ pgd = pgd_offset_k(addr);
22061+ pud = pud_offset(pgd, addr);
22062+ pmd = pmd_offset(pud, addr);
22063+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22064+ }
22065+*/
22066+#ifdef CONFIG_X86_PAE
22067+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
22068+/*
22069+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
22070+ pgd = pgd_offset_k(addr);
22071+ pud = pud_offset(pgd, addr);
22072+ pmd = pmd_offset(pud, addr);
22073+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22074+ }
22075+*/
22076+#endif
22077+
22078+#ifdef CONFIG_MODULES
22079+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
22080+#endif
22081+
22082+#else
22083+ pgd_t *pgd;
22084+ pud_t *pud;
22085+ pmd_t *pmd;
22086+ unsigned long addr, end;
22087+
22088+ /* PaX: make kernel code/rodata read-only, rest non-executable */
22089+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
22090+ pgd = pgd_offset_k(addr);
22091+ pud = pud_offset(pgd, addr);
22092+ pmd = pmd_offset(pud, addr);
22093+ if (!pmd_present(*pmd))
22094+ continue;
22095+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
22096+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22097+ else
22098+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
22099+ }
22100+
22101+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
22102+ end = addr + KERNEL_IMAGE_SIZE;
22103+ for (; addr < end; addr += PMD_SIZE) {
22104+ pgd = pgd_offset_k(addr);
22105+ pud = pud_offset(pgd, addr);
22106+ pmd = pmd_offset(pud, addr);
22107+ if (!pmd_present(*pmd))
22108+ continue;
22109+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
22110+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
22111+ }
22112+#endif
22113+
22114+ flush_tlb_all();
22115+#endif
22116+
22117 free_init_pages("unused kernel memory",
22118 (unsigned long)(&__init_begin),
22119 (unsigned long)(&__init_end));
22120diff -urNp linux-2.6.32.43/arch/x86/mm/iomap_32.c linux-2.6.32.43/arch/x86/mm/iomap_32.c
22121--- linux-2.6.32.43/arch/x86/mm/iomap_32.c 2011-03-27 14:31:47.000000000 -0400
22122+++ linux-2.6.32.43/arch/x86/mm/iomap_32.c 2011-04-17 15:56:46.000000000 -0400
22123@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long
22124 debug_kmap_atomic(type);
22125 idx = type + KM_TYPE_NR * smp_processor_id();
22126 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
22127+
22128+ pax_open_kernel();
22129 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
22130+ pax_close_kernel();
22131+
22132 arch_flush_lazy_mmu_mode();
22133
22134 return (void *)vaddr;
22135diff -urNp linux-2.6.32.43/arch/x86/mm/ioremap.c linux-2.6.32.43/arch/x86/mm/ioremap.c
22136--- linux-2.6.32.43/arch/x86/mm/ioremap.c 2011-03-27 14:31:47.000000000 -0400
22137+++ linux-2.6.32.43/arch/x86/mm/ioremap.c 2011-04-17 15:56:46.000000000 -0400
22138@@ -41,8 +41,8 @@ int page_is_ram(unsigned long pagenr)
22139 * Second special case: Some BIOSen report the PC BIOS
22140 * area (640->1Mb) as ram even though it is not.
22141 */
22142- if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
22143- pagenr < (BIOS_END >> PAGE_SHIFT))
22144+ if (pagenr >= (ISA_START_ADDRESS >> PAGE_SHIFT) &&
22145+ pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
22146 return 0;
22147
22148 for (i = 0; i < e820.nr_map; i++) {
22149@@ -137,13 +137,10 @@ static void __iomem *__ioremap_caller(re
22150 /*
22151 * Don't allow anybody to remap normal RAM that we're using..
22152 */
22153- for (pfn = phys_addr >> PAGE_SHIFT;
22154- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
22155- pfn++) {
22156-
22157+ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) {
22158 int is_ram = page_is_ram(pfn);
22159
22160- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
22161+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
22162 return NULL;
22163 WARN_ON_ONCE(is_ram);
22164 }
22165@@ -407,7 +404,7 @@ static int __init early_ioremap_debug_se
22166 early_param("early_ioremap_debug", early_ioremap_debug_setup);
22167
22168 static __initdata int after_paging_init;
22169-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
22170+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
22171
22172 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
22173 {
22174@@ -439,8 +436,7 @@ void __init early_ioremap_init(void)
22175 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
22176
22177 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
22178- memset(bm_pte, 0, sizeof(bm_pte));
22179- pmd_populate_kernel(&init_mm, pmd, bm_pte);
22180+ pmd_populate_user(&init_mm, pmd, bm_pte);
22181
22182 /*
22183 * The boot-ioremap range spans multiple pmds, for which
22184diff -urNp linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c
22185--- linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c 2011-03-27 14:31:47.000000000 -0400
22186+++ linux-2.6.32.43/arch/x86/mm/kmemcheck/kmemcheck.c 2011-04-17 15:56:46.000000000 -0400
22187@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg
22188 * memory (e.g. tracked pages)? For now, we need this to avoid
22189 * invoking kmemcheck for PnP BIOS calls.
22190 */
22191- if (regs->flags & X86_VM_MASK)
22192+ if (v8086_mode(regs))
22193 return false;
22194- if (regs->cs != __KERNEL_CS)
22195+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
22196 return false;
22197
22198 pte = kmemcheck_pte_lookup(address);
22199diff -urNp linux-2.6.32.43/arch/x86/mm/mmap.c linux-2.6.32.43/arch/x86/mm/mmap.c
22200--- linux-2.6.32.43/arch/x86/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
22201+++ linux-2.6.32.43/arch/x86/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
22202@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size
22203 * Leave an at least ~128 MB hole with possible stack randomization.
22204 */
22205 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
22206-#define MAX_GAP (TASK_SIZE/6*5)
22207+#define MAX_GAP (pax_task_size/6*5)
22208
22209 /*
22210 * True on X86_32 or when emulating IA32 on X86_64
22211@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void)
22212 return rnd << PAGE_SHIFT;
22213 }
22214
22215-static unsigned long mmap_base(void)
22216+static unsigned long mmap_base(struct mm_struct *mm)
22217 {
22218 unsigned long gap = current->signal->rlim[RLIMIT_STACK].rlim_cur;
22219+ unsigned long pax_task_size = TASK_SIZE;
22220+
22221+#ifdef CONFIG_PAX_SEGMEXEC
22222+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22223+ pax_task_size = SEGMEXEC_TASK_SIZE;
22224+#endif
22225
22226 if (gap < MIN_GAP)
22227 gap = MIN_GAP;
22228 else if (gap > MAX_GAP)
22229 gap = MAX_GAP;
22230
22231- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
22232+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
22233 }
22234
22235 /*
22236 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
22237 * does, but not when emulating X86_32
22238 */
22239-static unsigned long mmap_legacy_base(void)
22240+static unsigned long mmap_legacy_base(struct mm_struct *mm)
22241 {
22242- if (mmap_is_ia32())
22243+ if (mmap_is_ia32()) {
22244+
22245+#ifdef CONFIG_PAX_SEGMEXEC
22246+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22247+ return SEGMEXEC_TASK_UNMAPPED_BASE;
22248+ else
22249+#endif
22250+
22251 return TASK_UNMAPPED_BASE;
22252- else
22253+ } else
22254 return TASK_UNMAPPED_BASE + mmap_rnd();
22255 }
22256
22257@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo
22258 void arch_pick_mmap_layout(struct mm_struct *mm)
22259 {
22260 if (mmap_is_legacy()) {
22261- mm->mmap_base = mmap_legacy_base();
22262+ mm->mmap_base = mmap_legacy_base(mm);
22263+
22264+#ifdef CONFIG_PAX_RANDMMAP
22265+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22266+ mm->mmap_base += mm->delta_mmap;
22267+#endif
22268+
22269 mm->get_unmapped_area = arch_get_unmapped_area;
22270 mm->unmap_area = arch_unmap_area;
22271 } else {
22272- mm->mmap_base = mmap_base();
22273+ mm->mmap_base = mmap_base(mm);
22274+
22275+#ifdef CONFIG_PAX_RANDMMAP
22276+ if (mm->pax_flags & MF_PAX_RANDMMAP)
22277+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
22278+#endif
22279+
22280 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
22281 mm->unmap_area = arch_unmap_area_topdown;
22282 }
22283diff -urNp linux-2.6.32.43/arch/x86/mm/mmio-mod.c linux-2.6.32.43/arch/x86/mm/mmio-mod.c
22284--- linux-2.6.32.43/arch/x86/mm/mmio-mod.c 2011-03-27 14:31:47.000000000 -0400
22285+++ linux-2.6.32.43/arch/x86/mm/mmio-mod.c 2011-07-06 19:53:33.000000000 -0400
22286@@ -193,7 +193,7 @@ static void pre(struct kmmio_probe *p, s
22287 break;
22288 default:
22289 {
22290- unsigned char *ip = (unsigned char *)instptr;
22291+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
22292 my_trace->opcode = MMIO_UNKNOWN_OP;
22293 my_trace->width = 0;
22294 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
22295@@ -233,7 +233,7 @@ static void post(struct kmmio_probe *p,
22296 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
22297 void __iomem *addr)
22298 {
22299- static atomic_t next_id;
22300+ static atomic_unchecked_t next_id;
22301 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
22302 /* These are page-unaligned. */
22303 struct mmiotrace_map map = {
22304@@ -257,7 +257,7 @@ static void ioremap_trace_core(resource_
22305 .private = trace
22306 },
22307 .phys = offset,
22308- .id = atomic_inc_return(&next_id)
22309+ .id = atomic_inc_return_unchecked(&next_id)
22310 };
22311 map.map_id = trace->id;
22312
22313diff -urNp linux-2.6.32.43/arch/x86/mm/numa_32.c linux-2.6.32.43/arch/x86/mm/numa_32.c
22314--- linux-2.6.32.43/arch/x86/mm/numa_32.c 2011-03-27 14:31:47.000000000 -0400
22315+++ linux-2.6.32.43/arch/x86/mm/numa_32.c 2011-04-17 15:56:46.000000000 -0400
22316@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int
22317 }
22318 #endif
22319
22320-extern unsigned long find_max_low_pfn(void);
22321 extern unsigned long highend_pfn, highstart_pfn;
22322
22323 #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
22324diff -urNp linux-2.6.32.43/arch/x86/mm/pageattr.c linux-2.6.32.43/arch/x86/mm/pageattr.c
22325--- linux-2.6.32.43/arch/x86/mm/pageattr.c 2011-03-27 14:31:47.000000000 -0400
22326+++ linux-2.6.32.43/arch/x86/mm/pageattr.c 2011-04-17 15:56:46.000000000 -0400
22327@@ -261,16 +261,17 @@ static inline pgprot_t static_protection
22328 * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support.
22329 */
22330 if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
22331- pgprot_val(forbidden) |= _PAGE_NX;
22332+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22333
22334 /*
22335 * The kernel text needs to be executable for obvious reasons
22336 * Does not cover __inittext since that is gone later on. On
22337 * 64bit we do not enforce !NX on the low mapping
22338 */
22339- if (within(address, (unsigned long)_text, (unsigned long)_etext))
22340- pgprot_val(forbidden) |= _PAGE_NX;
22341+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
22342+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22343
22344+#ifdef CONFIG_DEBUG_RODATA
22345 /*
22346 * The .rodata section needs to be read-only. Using the pfn
22347 * catches all aliases.
22348@@ -278,6 +279,14 @@ static inline pgprot_t static_protection
22349 if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT,
22350 __pa((unsigned long)__end_rodata) >> PAGE_SHIFT))
22351 pgprot_val(forbidden) |= _PAGE_RW;
22352+#endif
22353+
22354+#ifdef CONFIG_PAX_KERNEXEC
22355+ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) {
22356+ pgprot_val(forbidden) |= _PAGE_RW;
22357+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
22358+ }
22359+#endif
22360
22361 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
22362
22363@@ -331,23 +340,37 @@ EXPORT_SYMBOL_GPL(lookup_address);
22364 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
22365 {
22366 /* change init_mm */
22367+ pax_open_kernel();
22368 set_pte_atomic(kpte, pte);
22369+
22370 #ifdef CONFIG_X86_32
22371 if (!SHARED_KERNEL_PMD) {
22372+
22373+#ifdef CONFIG_PAX_PER_CPU_PGD
22374+ unsigned long cpu;
22375+#else
22376 struct page *page;
22377+#endif
22378
22379+#ifdef CONFIG_PAX_PER_CPU_PGD
22380+ for (cpu = 0; cpu < NR_CPUS; ++cpu) {
22381+ pgd_t *pgd = get_cpu_pgd(cpu);
22382+#else
22383 list_for_each_entry(page, &pgd_list, lru) {
22384- pgd_t *pgd;
22385+ pgd_t *pgd = (pgd_t *)page_address(page);
22386+#endif
22387+
22388 pud_t *pud;
22389 pmd_t *pmd;
22390
22391- pgd = (pgd_t *)page_address(page) + pgd_index(address);
22392+ pgd += pgd_index(address);
22393 pud = pud_offset(pgd, address);
22394 pmd = pmd_offset(pud, address);
22395 set_pte_atomic((pte_t *)pmd, pte);
22396 }
22397 }
22398 #endif
22399+ pax_close_kernel();
22400 }
22401
22402 static int
22403diff -urNp linux-2.6.32.43/arch/x86/mm/pageattr-test.c linux-2.6.32.43/arch/x86/mm/pageattr-test.c
22404--- linux-2.6.32.43/arch/x86/mm/pageattr-test.c 2011-03-27 14:31:47.000000000 -0400
22405+++ linux-2.6.32.43/arch/x86/mm/pageattr-test.c 2011-04-17 15:56:46.000000000 -0400
22406@@ -36,7 +36,7 @@ enum {
22407
22408 static int pte_testbit(pte_t pte)
22409 {
22410- return pte_flags(pte) & _PAGE_UNUSED1;
22411+ return pte_flags(pte) & _PAGE_CPA_TEST;
22412 }
22413
22414 struct split_state {
22415diff -urNp linux-2.6.32.43/arch/x86/mm/pat.c linux-2.6.32.43/arch/x86/mm/pat.c
22416--- linux-2.6.32.43/arch/x86/mm/pat.c 2011-03-27 14:31:47.000000000 -0400
22417+++ linux-2.6.32.43/arch/x86/mm/pat.c 2011-04-17 15:56:46.000000000 -0400
22418@@ -258,7 +258,7 @@ chk_conflict(struct memtype *new, struct
22419
22420 conflict:
22421 printk(KERN_INFO "%s:%d conflicting memory types "
22422- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, new->start,
22423+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), new->start,
22424 new->end, cattr_name(new->type), cattr_name(entry->type));
22425 return -EBUSY;
22426 }
22427@@ -559,7 +559,7 @@ unlock_ret:
22428
22429 if (err) {
22430 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n",
22431- current->comm, current->pid, start, end);
22432+ current->comm, task_pid_nr(current), start, end);
22433 }
22434
22435 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end);
22436@@ -689,8 +689,8 @@ static inline int range_is_allowed(unsig
22437 while (cursor < to) {
22438 if (!devmem_is_allowed(pfn)) {
22439 printk(KERN_INFO
22440- "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
22441- current->comm, from, to);
22442+ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n",
22443+ current->comm, from, to, cursor);
22444 return 0;
22445 }
22446 cursor += PAGE_SIZE;
22447@@ -755,7 +755,7 @@ int kernel_map_sync_memtype(u64 base, un
22448 printk(KERN_INFO
22449 "%s:%d ioremap_change_attr failed %s "
22450 "for %Lx-%Lx\n",
22451- current->comm, current->pid,
22452+ current->comm, task_pid_nr(current),
22453 cattr_name(flags),
22454 base, (unsigned long long)(base + size));
22455 return -EINVAL;
22456@@ -813,7 +813,7 @@ static int reserve_pfn_range(u64 paddr,
22457 free_memtype(paddr, paddr + size);
22458 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
22459 " for %Lx-%Lx, got %s\n",
22460- current->comm, current->pid,
22461+ current->comm, task_pid_nr(current),
22462 cattr_name(want_flags),
22463 (unsigned long long)paddr,
22464 (unsigned long long)(paddr + size),
22465diff -urNp linux-2.6.32.43/arch/x86/mm/pf_in.c linux-2.6.32.43/arch/x86/mm/pf_in.c
22466--- linux-2.6.32.43/arch/x86/mm/pf_in.c 2011-03-27 14:31:47.000000000 -0400
22467+++ linux-2.6.32.43/arch/x86/mm/pf_in.c 2011-07-06 19:53:33.000000000 -0400
22468@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned l
22469 int i;
22470 enum reason_type rv = OTHERS;
22471
22472- p = (unsigned char *)ins_addr;
22473+ p = (unsigned char *)ktla_ktva(ins_addr);
22474 p += skip_prefix(p, &prf);
22475 p += get_opcode(p, &opcode);
22476
22477@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(un
22478 struct prefix_bits prf;
22479 int i;
22480
22481- p = (unsigned char *)ins_addr;
22482+ p = (unsigned char *)ktla_ktva(ins_addr);
22483 p += skip_prefix(p, &prf);
22484 p += get_opcode(p, &opcode);
22485
22486@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned
22487 struct prefix_bits prf;
22488 int i;
22489
22490- p = (unsigned char *)ins_addr;
22491+ p = (unsigned char *)ktla_ktva(ins_addr);
22492 p += skip_prefix(p, &prf);
22493 p += get_opcode(p, &opcode);
22494
22495@@ -417,7 +417,7 @@ unsigned long get_ins_reg_val(unsigned l
22496 int i;
22497 unsigned long rv;
22498
22499- p = (unsigned char *)ins_addr;
22500+ p = (unsigned char *)ktla_ktva(ins_addr);
22501 p += skip_prefix(p, &prf);
22502 p += get_opcode(p, &opcode);
22503 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
22504@@ -472,7 +472,7 @@ unsigned long get_ins_imm_val(unsigned l
22505 int i;
22506 unsigned long rv;
22507
22508- p = (unsigned char *)ins_addr;
22509+ p = (unsigned char *)ktla_ktva(ins_addr);
22510 p += skip_prefix(p, &prf);
22511 p += get_opcode(p, &opcode);
22512 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
22513diff -urNp linux-2.6.32.43/arch/x86/mm/pgtable_32.c linux-2.6.32.43/arch/x86/mm/pgtable_32.c
22514--- linux-2.6.32.43/arch/x86/mm/pgtable_32.c 2011-03-27 14:31:47.000000000 -0400
22515+++ linux-2.6.32.43/arch/x86/mm/pgtable_32.c 2011-04-17 15:56:46.000000000 -0400
22516@@ -49,10 +49,13 @@ void set_pte_vaddr(unsigned long vaddr,
22517 return;
22518 }
22519 pte = pte_offset_kernel(pmd, vaddr);
22520+
22521+ pax_open_kernel();
22522 if (pte_val(pteval))
22523 set_pte_at(&init_mm, vaddr, pte, pteval);
22524 else
22525 pte_clear(&init_mm, vaddr, pte);
22526+ pax_close_kernel();
22527
22528 /*
22529 * It's enough to flush this one mapping.
22530diff -urNp linux-2.6.32.43/arch/x86/mm/pgtable.c linux-2.6.32.43/arch/x86/mm/pgtable.c
22531--- linux-2.6.32.43/arch/x86/mm/pgtable.c 2011-03-27 14:31:47.000000000 -0400
22532+++ linux-2.6.32.43/arch/x86/mm/pgtable.c 2011-05-11 18:25:15.000000000 -0400
22533@@ -83,9 +83,52 @@ static inline void pgd_list_del(pgd_t *p
22534 list_del(&page->lru);
22535 }
22536
22537-#define UNSHARED_PTRS_PER_PGD \
22538- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22539+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22540+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
22541
22542+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22543+{
22544+ while (count--)
22545+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
22546+}
22547+#endif
22548+
22549+#ifdef CONFIG_PAX_PER_CPU_PGD
22550+void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count)
22551+{
22552+ while (count--)
22553+
22554+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
22555+ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask);
22556+#else
22557+ *dst++ = *src++;
22558+#endif
22559+
22560+}
22561+#endif
22562+
22563+#ifdef CONFIG_X86_64
22564+#define pxd_t pud_t
22565+#define pyd_t pgd_t
22566+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
22567+#define pxd_free(mm, pud) pud_free((mm), (pud))
22568+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
22569+#define pyd_offset(mm ,address) pgd_offset((mm), (address))
22570+#define PYD_SIZE PGDIR_SIZE
22571+#else
22572+#define pxd_t pmd_t
22573+#define pyd_t pud_t
22574+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
22575+#define pxd_free(mm, pud) pmd_free((mm), (pud))
22576+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
22577+#define pyd_offset(mm ,address) pud_offset((mm), (address))
22578+#define PYD_SIZE PUD_SIZE
22579+#endif
22580+
22581+#ifdef CONFIG_PAX_PER_CPU_PGD
22582+static inline void pgd_ctor(pgd_t *pgd) {}
22583+static inline void pgd_dtor(pgd_t *pgd) {}
22584+#else
22585 static void pgd_ctor(pgd_t *pgd)
22586 {
22587 /* If the pgd points to a shared pagetable level (either the
22588@@ -119,6 +162,7 @@ static void pgd_dtor(pgd_t *pgd)
22589 pgd_list_del(pgd);
22590 spin_unlock_irqrestore(&pgd_lock, flags);
22591 }
22592+#endif
22593
22594 /*
22595 * List of all pgd's needed for non-PAE so it can invalidate entries
22596@@ -131,7 +175,7 @@ static void pgd_dtor(pgd_t *pgd)
22597 * -- wli
22598 */
22599
22600-#ifdef CONFIG_X86_PAE
22601+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22602 /*
22603 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
22604 * updating the top-level pagetable entries to guarantee the
22605@@ -143,7 +187,7 @@ static void pgd_dtor(pgd_t *pgd)
22606 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
22607 * and initialize the kernel pmds here.
22608 */
22609-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
22610+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
22611
22612 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
22613 {
22614@@ -161,36 +205,38 @@ void pud_populate(struct mm_struct *mm,
22615 */
22616 flush_tlb_mm(mm);
22617 }
22618+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
22619+#define PREALLOCATED_PXDS USER_PGD_PTRS
22620 #else /* !CONFIG_X86_PAE */
22621
22622 /* No need to prepopulate any pagetable entries in non-PAE modes. */
22623-#define PREALLOCATED_PMDS 0
22624+#define PREALLOCATED_PXDS 0
22625
22626 #endif /* CONFIG_X86_PAE */
22627
22628-static void free_pmds(pmd_t *pmds[])
22629+static void free_pxds(pxd_t *pxds[])
22630 {
22631 int i;
22632
22633- for(i = 0; i < PREALLOCATED_PMDS; i++)
22634- if (pmds[i])
22635- free_page((unsigned long)pmds[i]);
22636+ for(i = 0; i < PREALLOCATED_PXDS; i++)
22637+ if (pxds[i])
22638+ free_page((unsigned long)pxds[i]);
22639 }
22640
22641-static int preallocate_pmds(pmd_t *pmds[])
22642+static int preallocate_pxds(pxd_t *pxds[])
22643 {
22644 int i;
22645 bool failed = false;
22646
22647- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22648- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
22649- if (pmd == NULL)
22650+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22651+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
22652+ if (pxd == NULL)
22653 failed = true;
22654- pmds[i] = pmd;
22655+ pxds[i] = pxd;
22656 }
22657
22658 if (failed) {
22659- free_pmds(pmds);
22660+ free_pxds(pxds);
22661 return -ENOMEM;
22662 }
22663
22664@@ -203,51 +249,56 @@ static int preallocate_pmds(pmd_t *pmds[
22665 * preallocate which never got a corresponding vma will need to be
22666 * freed manually.
22667 */
22668-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
22669+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
22670 {
22671 int i;
22672
22673- for(i = 0; i < PREALLOCATED_PMDS; i++) {
22674+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
22675 pgd_t pgd = pgdp[i];
22676
22677 if (pgd_val(pgd) != 0) {
22678- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
22679+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
22680
22681- pgdp[i] = native_make_pgd(0);
22682+ set_pgd(pgdp + i, native_make_pgd(0));
22683
22684- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
22685- pmd_free(mm, pmd);
22686+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
22687+ pxd_free(mm, pxd);
22688 }
22689 }
22690 }
22691
22692-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
22693+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
22694 {
22695- pud_t *pud;
22696+ pyd_t *pyd;
22697 unsigned long addr;
22698 int i;
22699
22700- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
22701+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
22702 return;
22703
22704- pud = pud_offset(pgd, 0);
22705+#ifdef CONFIG_X86_64
22706+ pyd = pyd_offset(mm, 0L);
22707+#else
22708+ pyd = pyd_offset(pgd, 0L);
22709+#endif
22710
22711- for (addr = i = 0; i < PREALLOCATED_PMDS;
22712- i++, pud++, addr += PUD_SIZE) {
22713- pmd_t *pmd = pmds[i];
22714+ for (addr = i = 0; i < PREALLOCATED_PXDS;
22715+ i++, pyd++, addr += PYD_SIZE) {
22716+ pxd_t *pxd = pxds[i];
22717
22718 if (i >= KERNEL_PGD_BOUNDARY)
22719- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22720- sizeof(pmd_t) * PTRS_PER_PMD);
22721+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
22722+ sizeof(pxd_t) * PTRS_PER_PMD);
22723
22724- pud_populate(mm, pud, pmd);
22725+ pyd_populate(mm, pyd, pxd);
22726 }
22727 }
22728
22729 pgd_t *pgd_alloc(struct mm_struct *mm)
22730 {
22731 pgd_t *pgd;
22732- pmd_t *pmds[PREALLOCATED_PMDS];
22733+ pxd_t *pxds[PREALLOCATED_PXDS];
22734+
22735 unsigned long flags;
22736
22737 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
22738@@ -257,11 +308,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22739
22740 mm->pgd = pgd;
22741
22742- if (preallocate_pmds(pmds) != 0)
22743+ if (preallocate_pxds(pxds) != 0)
22744 goto out_free_pgd;
22745
22746 if (paravirt_pgd_alloc(mm) != 0)
22747- goto out_free_pmds;
22748+ goto out_free_pxds;
22749
22750 /*
22751 * Make sure that pre-populating the pmds is atomic with
22752@@ -271,14 +322,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
22753 spin_lock_irqsave(&pgd_lock, flags);
22754
22755 pgd_ctor(pgd);
22756- pgd_prepopulate_pmd(mm, pgd, pmds);
22757+ pgd_prepopulate_pxd(mm, pgd, pxds);
22758
22759 spin_unlock_irqrestore(&pgd_lock, flags);
22760
22761 return pgd;
22762
22763-out_free_pmds:
22764- free_pmds(pmds);
22765+out_free_pxds:
22766+ free_pxds(pxds);
22767 out_free_pgd:
22768 free_page((unsigned long)pgd);
22769 out:
22770@@ -287,7 +338,7 @@ out:
22771
22772 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
22773 {
22774- pgd_mop_up_pmds(mm, pgd);
22775+ pgd_mop_up_pxds(mm, pgd);
22776 pgd_dtor(pgd);
22777 paravirt_pgd_free(mm, pgd);
22778 free_page((unsigned long)pgd);
22779diff -urNp linux-2.6.32.43/arch/x86/mm/setup_nx.c linux-2.6.32.43/arch/x86/mm/setup_nx.c
22780--- linux-2.6.32.43/arch/x86/mm/setup_nx.c 2011-03-27 14:31:47.000000000 -0400
22781+++ linux-2.6.32.43/arch/x86/mm/setup_nx.c 2011-04-17 15:56:46.000000000 -0400
22782@@ -4,11 +4,10 @@
22783
22784 #include <asm/pgtable.h>
22785
22786+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
22787 int nx_enabled;
22788
22789-#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
22790-static int disable_nx __cpuinitdata;
22791-
22792+#ifndef CONFIG_PAX_PAGEEXEC
22793 /*
22794 * noexec = on|off
22795 *
22796@@ -22,32 +21,26 @@ static int __init noexec_setup(char *str
22797 if (!str)
22798 return -EINVAL;
22799 if (!strncmp(str, "on", 2)) {
22800- __supported_pte_mask |= _PAGE_NX;
22801- disable_nx = 0;
22802+ nx_enabled = 1;
22803 } else if (!strncmp(str, "off", 3)) {
22804- disable_nx = 1;
22805- __supported_pte_mask &= ~_PAGE_NX;
22806+ nx_enabled = 0;
22807 }
22808 return 0;
22809 }
22810 early_param("noexec", noexec_setup);
22811 #endif
22812+#endif
22813
22814 #ifdef CONFIG_X86_PAE
22815 void __init set_nx(void)
22816 {
22817- unsigned int v[4], l, h;
22818+ if (!nx_enabled && cpu_has_nx) {
22819+ unsigned l, h;
22820
22821- if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
22822- cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);
22823-
22824- if ((v[3] & (1 << 20)) && !disable_nx) {
22825- rdmsr(MSR_EFER, l, h);
22826- l |= EFER_NX;
22827- wrmsr(MSR_EFER, l, h);
22828- nx_enabled = 1;
22829- __supported_pte_mask |= _PAGE_NX;
22830- }
22831+ __supported_pte_mask &= ~_PAGE_NX;
22832+ rdmsr(MSR_EFER, l, h);
22833+ l &= ~EFER_NX;
22834+ wrmsr(MSR_EFER, l, h);
22835 }
22836 }
22837 #else
22838@@ -62,7 +55,7 @@ void __cpuinit check_efer(void)
22839 unsigned long efer;
22840
22841 rdmsrl(MSR_EFER, efer);
22842- if (!(efer & EFER_NX) || disable_nx)
22843+ if (!(efer & EFER_NX) || !nx_enabled)
22844 __supported_pte_mask &= ~_PAGE_NX;
22845 }
22846 #endif
22847diff -urNp linux-2.6.32.43/arch/x86/mm/tlb.c linux-2.6.32.43/arch/x86/mm/tlb.c
22848--- linux-2.6.32.43/arch/x86/mm/tlb.c 2011-03-27 14:31:47.000000000 -0400
22849+++ linux-2.6.32.43/arch/x86/mm/tlb.c 2011-04-23 12:56:10.000000000 -0400
22850@@ -61,7 +61,11 @@ void leave_mm(int cpu)
22851 BUG();
22852 cpumask_clear_cpu(cpu,
22853 mm_cpumask(percpu_read(cpu_tlbstate.active_mm)));
22854+
22855+#ifndef CONFIG_PAX_PER_CPU_PGD
22856 load_cr3(swapper_pg_dir);
22857+#endif
22858+
22859 }
22860 EXPORT_SYMBOL_GPL(leave_mm);
22861
22862diff -urNp linux-2.6.32.43/arch/x86/oprofile/backtrace.c linux-2.6.32.43/arch/x86/oprofile/backtrace.c
22863--- linux-2.6.32.43/arch/x86/oprofile/backtrace.c 2011-03-27 14:31:47.000000000 -0400
22864+++ linux-2.6.32.43/arch/x86/oprofile/backtrace.c 2011-04-17 15:56:46.000000000 -0400
22865@@ -57,7 +57,7 @@ static struct frame_head *dump_user_back
22866 struct frame_head bufhead[2];
22867
22868 /* Also check accessibility of one struct frame_head beyond */
22869- if (!access_ok(VERIFY_READ, head, sizeof(bufhead)))
22870+ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead)))
22871 return NULL;
22872 if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead)))
22873 return NULL;
22874@@ -77,7 +77,7 @@ x86_backtrace(struct pt_regs * const reg
22875 {
22876 struct frame_head *head = (struct frame_head *)frame_pointer(regs);
22877
22878- if (!user_mode_vm(regs)) {
22879+ if (!user_mode(regs)) {
22880 unsigned long stack = kernel_stack_pointer(regs);
22881 if (depth)
22882 dump_trace(NULL, regs, (unsigned long *)stack, 0,
22883diff -urNp linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c
22884--- linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c 2011-03-27 14:31:47.000000000 -0400
22885+++ linux-2.6.32.43/arch/x86/oprofile/op_model_p4.c 2011-04-17 15:56:46.000000000 -0400
22886@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo
22887 #endif
22888 }
22889
22890-static int inline addr_increment(void)
22891+static inline int addr_increment(void)
22892 {
22893 #ifdef CONFIG_SMP
22894 return smp_num_siblings == 2 ? 2 : 1;
22895diff -urNp linux-2.6.32.43/arch/x86/pci/common.c linux-2.6.32.43/arch/x86/pci/common.c
22896--- linux-2.6.32.43/arch/x86/pci/common.c 2011-03-27 14:31:47.000000000 -0400
22897+++ linux-2.6.32.43/arch/x86/pci/common.c 2011-04-23 12:56:10.000000000 -0400
22898@@ -31,8 +31,8 @@ int noioapicreroute = 1;
22899 int pcibios_last_bus = -1;
22900 unsigned long pirq_table_addr;
22901 struct pci_bus *pci_root_bus;
22902-struct pci_raw_ops *raw_pci_ops;
22903-struct pci_raw_ops *raw_pci_ext_ops;
22904+const struct pci_raw_ops *raw_pci_ops;
22905+const struct pci_raw_ops *raw_pci_ext_ops;
22906
22907 int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn,
22908 int reg, int len, u32 *val)
22909diff -urNp linux-2.6.32.43/arch/x86/pci/direct.c linux-2.6.32.43/arch/x86/pci/direct.c
22910--- linux-2.6.32.43/arch/x86/pci/direct.c 2011-03-27 14:31:47.000000000 -0400
22911+++ linux-2.6.32.43/arch/x86/pci/direct.c 2011-04-17 15:56:46.000000000 -0400
22912@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int
22913
22914 #undef PCI_CONF1_ADDRESS
22915
22916-struct pci_raw_ops pci_direct_conf1 = {
22917+const struct pci_raw_ops pci_direct_conf1 = {
22918 .read = pci_conf1_read,
22919 .write = pci_conf1_write,
22920 };
22921@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int
22922
22923 #undef PCI_CONF2_ADDRESS
22924
22925-struct pci_raw_ops pci_direct_conf2 = {
22926+const struct pci_raw_ops pci_direct_conf2 = {
22927 .read = pci_conf2_read,
22928 .write = pci_conf2_write,
22929 };
22930@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = {
22931 * This should be close to trivial, but it isn't, because there are buggy
22932 * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID.
22933 */
22934-static int __init pci_sanity_check(struct pci_raw_ops *o)
22935+static int __init pci_sanity_check(const struct pci_raw_ops *o)
22936 {
22937 u32 x = 0;
22938 int year, devfn;
22939diff -urNp linux-2.6.32.43/arch/x86/pci/mmconfig_32.c linux-2.6.32.43/arch/x86/pci/mmconfig_32.c
22940--- linux-2.6.32.43/arch/x86/pci/mmconfig_32.c 2011-03-27 14:31:47.000000000 -0400
22941+++ linux-2.6.32.43/arch/x86/pci/mmconfig_32.c 2011-04-17 15:56:46.000000000 -0400
22942@@ -125,7 +125,7 @@ static int pci_mmcfg_write(unsigned int
22943 return 0;
22944 }
22945
22946-static struct pci_raw_ops pci_mmcfg = {
22947+static const struct pci_raw_ops pci_mmcfg = {
22948 .read = pci_mmcfg_read,
22949 .write = pci_mmcfg_write,
22950 };
22951diff -urNp linux-2.6.32.43/arch/x86/pci/mmconfig_64.c linux-2.6.32.43/arch/x86/pci/mmconfig_64.c
22952--- linux-2.6.32.43/arch/x86/pci/mmconfig_64.c 2011-03-27 14:31:47.000000000 -0400
22953+++ linux-2.6.32.43/arch/x86/pci/mmconfig_64.c 2011-04-17 15:56:46.000000000 -0400
22954@@ -104,7 +104,7 @@ static int pci_mmcfg_write(unsigned int
22955 return 0;
22956 }
22957
22958-static struct pci_raw_ops pci_mmcfg = {
22959+static const struct pci_raw_ops pci_mmcfg = {
22960 .read = pci_mmcfg_read,
22961 .write = pci_mmcfg_write,
22962 };
22963diff -urNp linux-2.6.32.43/arch/x86/pci/numaq_32.c linux-2.6.32.43/arch/x86/pci/numaq_32.c
22964--- linux-2.6.32.43/arch/x86/pci/numaq_32.c 2011-03-27 14:31:47.000000000 -0400
22965+++ linux-2.6.32.43/arch/x86/pci/numaq_32.c 2011-04-17 15:56:46.000000000 -0400
22966@@ -112,7 +112,7 @@ static int pci_conf1_mq_write(unsigned i
22967
22968 #undef PCI_CONF1_MQ_ADDRESS
22969
22970-static struct pci_raw_ops pci_direct_conf1_mq = {
22971+static const struct pci_raw_ops pci_direct_conf1_mq = {
22972 .read = pci_conf1_mq_read,
22973 .write = pci_conf1_mq_write
22974 };
22975diff -urNp linux-2.6.32.43/arch/x86/pci/olpc.c linux-2.6.32.43/arch/x86/pci/olpc.c
22976--- linux-2.6.32.43/arch/x86/pci/olpc.c 2011-03-27 14:31:47.000000000 -0400
22977+++ linux-2.6.32.43/arch/x86/pci/olpc.c 2011-04-17 15:56:46.000000000 -0400
22978@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s
22979 return 0;
22980 }
22981
22982-static struct pci_raw_ops pci_olpc_conf = {
22983+static const struct pci_raw_ops pci_olpc_conf = {
22984 .read = pci_olpc_read,
22985 .write = pci_olpc_write,
22986 };
22987diff -urNp linux-2.6.32.43/arch/x86/pci/pcbios.c linux-2.6.32.43/arch/x86/pci/pcbios.c
22988--- linux-2.6.32.43/arch/x86/pci/pcbios.c 2011-03-27 14:31:47.000000000 -0400
22989+++ linux-2.6.32.43/arch/x86/pci/pcbios.c 2011-04-17 15:56:46.000000000 -0400
22990@@ -56,50 +56,93 @@ union bios32 {
22991 static struct {
22992 unsigned long address;
22993 unsigned short segment;
22994-} bios32_indirect = { 0, __KERNEL_CS };
22995+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
22996
22997 /*
22998 * Returns the entry point for the given service, NULL on error
22999 */
23000
23001-static unsigned long bios32_service(unsigned long service)
23002+static unsigned long __devinit bios32_service(unsigned long service)
23003 {
23004 unsigned char return_code; /* %al */
23005 unsigned long address; /* %ebx */
23006 unsigned long length; /* %ecx */
23007 unsigned long entry; /* %edx */
23008 unsigned long flags;
23009+ struct desc_struct d, *gdt;
23010
23011 local_irq_save(flags);
23012- __asm__("lcall *(%%edi); cld"
23013+
23014+ gdt = get_cpu_gdt_table(smp_processor_id());
23015+
23016+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
23017+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23018+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
23019+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23020+
23021+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
23022 : "=a" (return_code),
23023 "=b" (address),
23024 "=c" (length),
23025 "=d" (entry)
23026 : "0" (service),
23027 "1" (0),
23028- "D" (&bios32_indirect));
23029+ "D" (&bios32_indirect),
23030+ "r"(__PCIBIOS_DS)
23031+ : "memory");
23032+
23033+ pax_open_kernel();
23034+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
23035+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
23036+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
23037+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
23038+ pax_close_kernel();
23039+
23040 local_irq_restore(flags);
23041
23042 switch (return_code) {
23043- case 0:
23044- return address + entry;
23045- case 0x80: /* Not present */
23046- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23047- return 0;
23048- default: /* Shouldn't happen */
23049- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23050- service, return_code);
23051+ case 0: {
23052+ int cpu;
23053+ unsigned char flags;
23054+
23055+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
23056+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
23057+ printk(KERN_WARNING "bios32_service: not valid\n");
23058 return 0;
23059+ }
23060+ address = address + PAGE_OFFSET;
23061+ length += 16UL; /* some BIOSs underreport this... */
23062+ flags = 4;
23063+ if (length >= 64*1024*1024) {
23064+ length >>= PAGE_SHIFT;
23065+ flags |= 8;
23066+ }
23067+
23068+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
23069+ gdt = get_cpu_gdt_table(cpu);
23070+ pack_descriptor(&d, address, length, 0x9b, flags);
23071+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
23072+ pack_descriptor(&d, address, length, 0x93, flags);
23073+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
23074+ }
23075+ return entry;
23076+ }
23077+ case 0x80: /* Not present */
23078+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
23079+ return 0;
23080+ default: /* Shouldn't happen */
23081+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
23082+ service, return_code);
23083+ return 0;
23084 }
23085 }
23086
23087 static struct {
23088 unsigned long address;
23089 unsigned short segment;
23090-} pci_indirect = { 0, __KERNEL_CS };
23091+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
23092
23093-static int pci_bios_present;
23094+static int pci_bios_present __read_only;
23095
23096 static int __devinit check_pcibios(void)
23097 {
23098@@ -108,11 +151,13 @@ static int __devinit check_pcibios(void)
23099 unsigned long flags, pcibios_entry;
23100
23101 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
23102- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
23103+ pci_indirect.address = pcibios_entry;
23104
23105 local_irq_save(flags);
23106- __asm__(
23107- "lcall *(%%edi); cld\n\t"
23108+ __asm__("movw %w6, %%ds\n\t"
23109+ "lcall *%%ss:(%%edi); cld\n\t"
23110+ "push %%ss\n\t"
23111+ "pop %%ds\n\t"
23112 "jc 1f\n\t"
23113 "xor %%ah, %%ah\n"
23114 "1:"
23115@@ -121,7 +166,8 @@ static int __devinit check_pcibios(void)
23116 "=b" (ebx),
23117 "=c" (ecx)
23118 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
23119- "D" (&pci_indirect)
23120+ "D" (&pci_indirect),
23121+ "r" (__PCIBIOS_DS)
23122 : "memory");
23123 local_irq_restore(flags);
23124
23125@@ -165,7 +211,10 @@ static int pci_bios_read(unsigned int se
23126
23127 switch (len) {
23128 case 1:
23129- __asm__("lcall *(%%esi); cld\n\t"
23130+ __asm__("movw %w6, %%ds\n\t"
23131+ "lcall *%%ss:(%%esi); cld\n\t"
23132+ "push %%ss\n\t"
23133+ "pop %%ds\n\t"
23134 "jc 1f\n\t"
23135 "xor %%ah, %%ah\n"
23136 "1:"
23137@@ -174,7 +223,8 @@ static int pci_bios_read(unsigned int se
23138 : "1" (PCIBIOS_READ_CONFIG_BYTE),
23139 "b" (bx),
23140 "D" ((long)reg),
23141- "S" (&pci_indirect));
23142+ "S" (&pci_indirect),
23143+ "r" (__PCIBIOS_DS));
23144 /*
23145 * Zero-extend the result beyond 8 bits, do not trust the
23146 * BIOS having done it:
23147@@ -182,7 +232,10 @@ static int pci_bios_read(unsigned int se
23148 *value &= 0xff;
23149 break;
23150 case 2:
23151- __asm__("lcall *(%%esi); cld\n\t"
23152+ __asm__("movw %w6, %%ds\n\t"
23153+ "lcall *%%ss:(%%esi); cld\n\t"
23154+ "push %%ss\n\t"
23155+ "pop %%ds\n\t"
23156 "jc 1f\n\t"
23157 "xor %%ah, %%ah\n"
23158 "1:"
23159@@ -191,7 +244,8 @@ static int pci_bios_read(unsigned int se
23160 : "1" (PCIBIOS_READ_CONFIG_WORD),
23161 "b" (bx),
23162 "D" ((long)reg),
23163- "S" (&pci_indirect));
23164+ "S" (&pci_indirect),
23165+ "r" (__PCIBIOS_DS));
23166 /*
23167 * Zero-extend the result beyond 16 bits, do not trust the
23168 * BIOS having done it:
23169@@ -199,7 +253,10 @@ static int pci_bios_read(unsigned int se
23170 *value &= 0xffff;
23171 break;
23172 case 4:
23173- __asm__("lcall *(%%esi); cld\n\t"
23174+ __asm__("movw %w6, %%ds\n\t"
23175+ "lcall *%%ss:(%%esi); cld\n\t"
23176+ "push %%ss\n\t"
23177+ "pop %%ds\n\t"
23178 "jc 1f\n\t"
23179 "xor %%ah, %%ah\n"
23180 "1:"
23181@@ -208,7 +265,8 @@ static int pci_bios_read(unsigned int se
23182 : "1" (PCIBIOS_READ_CONFIG_DWORD),
23183 "b" (bx),
23184 "D" ((long)reg),
23185- "S" (&pci_indirect));
23186+ "S" (&pci_indirect),
23187+ "r" (__PCIBIOS_DS));
23188 break;
23189 }
23190
23191@@ -231,7 +289,10 @@ static int pci_bios_write(unsigned int s
23192
23193 switch (len) {
23194 case 1:
23195- __asm__("lcall *(%%esi); cld\n\t"
23196+ __asm__("movw %w6, %%ds\n\t"
23197+ "lcall *%%ss:(%%esi); cld\n\t"
23198+ "push %%ss\n\t"
23199+ "pop %%ds\n\t"
23200 "jc 1f\n\t"
23201 "xor %%ah, %%ah\n"
23202 "1:"
23203@@ -240,10 +301,14 @@ static int pci_bios_write(unsigned int s
23204 "c" (value),
23205 "b" (bx),
23206 "D" ((long)reg),
23207- "S" (&pci_indirect));
23208+ "S" (&pci_indirect),
23209+ "r" (__PCIBIOS_DS));
23210 break;
23211 case 2:
23212- __asm__("lcall *(%%esi); cld\n\t"
23213+ __asm__("movw %w6, %%ds\n\t"
23214+ "lcall *%%ss:(%%esi); cld\n\t"
23215+ "push %%ss\n\t"
23216+ "pop %%ds\n\t"
23217 "jc 1f\n\t"
23218 "xor %%ah, %%ah\n"
23219 "1:"
23220@@ -252,10 +317,14 @@ static int pci_bios_write(unsigned int s
23221 "c" (value),
23222 "b" (bx),
23223 "D" ((long)reg),
23224- "S" (&pci_indirect));
23225+ "S" (&pci_indirect),
23226+ "r" (__PCIBIOS_DS));
23227 break;
23228 case 4:
23229- __asm__("lcall *(%%esi); cld\n\t"
23230+ __asm__("movw %w6, %%ds\n\t"
23231+ "lcall *%%ss:(%%esi); cld\n\t"
23232+ "push %%ss\n\t"
23233+ "pop %%ds\n\t"
23234 "jc 1f\n\t"
23235 "xor %%ah, %%ah\n"
23236 "1:"
23237@@ -264,7 +333,8 @@ static int pci_bios_write(unsigned int s
23238 "c" (value),
23239 "b" (bx),
23240 "D" ((long)reg),
23241- "S" (&pci_indirect));
23242+ "S" (&pci_indirect),
23243+ "r" (__PCIBIOS_DS));
23244 break;
23245 }
23246
23247@@ -278,7 +348,7 @@ static int pci_bios_write(unsigned int s
23248 * Function table for BIOS32 access
23249 */
23250
23251-static struct pci_raw_ops pci_bios_access = {
23252+static const struct pci_raw_ops pci_bios_access = {
23253 .read = pci_bios_read,
23254 .write = pci_bios_write
23255 };
23256@@ -287,7 +357,7 @@ static struct pci_raw_ops pci_bios_acces
23257 * Try to find PCI BIOS.
23258 */
23259
23260-static struct pci_raw_ops * __devinit pci_find_bios(void)
23261+static const struct pci_raw_ops * __devinit pci_find_bios(void)
23262 {
23263 union bios32 *check;
23264 unsigned char sum;
23265@@ -368,10 +438,13 @@ struct irq_routing_table * pcibios_get_i
23266
23267 DBG("PCI: Fetching IRQ routing table... ");
23268 __asm__("push %%es\n\t"
23269+ "movw %w8, %%ds\n\t"
23270 "push %%ds\n\t"
23271 "pop %%es\n\t"
23272- "lcall *(%%esi); cld\n\t"
23273+ "lcall *%%ss:(%%esi); cld\n\t"
23274 "pop %%es\n\t"
23275+ "push %%ss\n\t"
23276+ "pop %%ds\n"
23277 "jc 1f\n\t"
23278 "xor %%ah, %%ah\n"
23279 "1:"
23280@@ -382,7 +455,8 @@ struct irq_routing_table * pcibios_get_i
23281 "1" (0),
23282 "D" ((long) &opt),
23283 "S" (&pci_indirect),
23284- "m" (opt)
23285+ "m" (opt),
23286+ "r" (__PCIBIOS_DS)
23287 : "memory");
23288 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
23289 if (ret & 0xff00)
23290@@ -406,7 +480,10 @@ int pcibios_set_irq_routing(struct pci_d
23291 {
23292 int ret;
23293
23294- __asm__("lcall *(%%esi); cld\n\t"
23295+ __asm__("movw %w5, %%ds\n\t"
23296+ "lcall *%%ss:(%%esi); cld\n\t"
23297+ "push %%ss\n\t"
23298+ "pop %%ds\n"
23299 "jc 1f\n\t"
23300 "xor %%ah, %%ah\n"
23301 "1:"
23302@@ -414,7 +491,8 @@ int pcibios_set_irq_routing(struct pci_d
23303 : "0" (PCIBIOS_SET_PCI_HW_INT),
23304 "b" ((dev->bus->number << 8) | dev->devfn),
23305 "c" ((irq << 8) | (pin + 10)),
23306- "S" (&pci_indirect));
23307+ "S" (&pci_indirect),
23308+ "r" (__PCIBIOS_DS));
23309 return !(ret & 0xff00);
23310 }
23311 EXPORT_SYMBOL(pcibios_set_irq_routing);
23312diff -urNp linux-2.6.32.43/arch/x86/power/cpu.c linux-2.6.32.43/arch/x86/power/cpu.c
23313--- linux-2.6.32.43/arch/x86/power/cpu.c 2011-03-27 14:31:47.000000000 -0400
23314+++ linux-2.6.32.43/arch/x86/power/cpu.c 2011-04-17 15:56:46.000000000 -0400
23315@@ -129,7 +129,7 @@ static void do_fpu_end(void)
23316 static void fix_processor_context(void)
23317 {
23318 int cpu = smp_processor_id();
23319- struct tss_struct *t = &per_cpu(init_tss, cpu);
23320+ struct tss_struct *t = init_tss + cpu;
23321
23322 set_tss_desc(cpu, t); /*
23323 * This just modifies memory; should not be
23324@@ -139,7 +139,9 @@ static void fix_processor_context(void)
23325 */
23326
23327 #ifdef CONFIG_X86_64
23328+ pax_open_kernel();
23329 get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9;
23330+ pax_close_kernel();
23331
23332 syscall_init(); /* This sets MSR_*STAR and related */
23333 #endif
23334diff -urNp linux-2.6.32.43/arch/x86/vdso/Makefile linux-2.6.32.43/arch/x86/vdso/Makefile
23335--- linux-2.6.32.43/arch/x86/vdso/Makefile 2011-03-27 14:31:47.000000000 -0400
23336+++ linux-2.6.32.43/arch/x86/vdso/Makefile 2011-04-17 15:56:46.000000000 -0400
23337@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@
23338 $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
23339 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^)
23340
23341-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23342+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
23343 GCOV_PROFILE := n
23344
23345 #
23346diff -urNp linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c
23347--- linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c 2011-03-27 14:31:47.000000000 -0400
23348+++ linux-2.6.32.43/arch/x86/vdso/vclock_gettime.c 2011-04-17 15:56:46.000000000 -0400
23349@@ -22,24 +22,48 @@
23350 #include <asm/hpet.h>
23351 #include <asm/unistd.h>
23352 #include <asm/io.h>
23353+#include <asm/fixmap.h>
23354 #include "vextern.h"
23355
23356 #define gtod vdso_vsyscall_gtod_data
23357
23358+notrace noinline long __vdso_fallback_time(long *t)
23359+{
23360+ long secs;
23361+ asm volatile("syscall"
23362+ : "=a" (secs)
23363+ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory");
23364+ return secs;
23365+}
23366+
23367 notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
23368 {
23369 long ret;
23370 asm("syscall" : "=a" (ret) :
23371- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory");
23372+ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory");
23373 return ret;
23374 }
23375
23376+notrace static inline cycle_t __vdso_vread_hpet(void)
23377+{
23378+ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0);
23379+}
23380+
23381+notrace static inline cycle_t __vdso_vread_tsc(void)
23382+{
23383+ cycle_t ret = (cycle_t)vget_cycles();
23384+
23385+ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last;
23386+}
23387+
23388 notrace static inline long vgetns(void)
23389 {
23390 long v;
23391- cycles_t (*vread)(void);
23392- vread = gtod->clock.vread;
23393- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask;
23394+ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])
23395+ v = __vdso_vread_tsc();
23396+ else
23397+ v = __vdso_vread_hpet();
23398+ v = (v - gtod->clock.cycle_last) & gtod->clock.mask;
23399 return (v * gtod->clock.mult) >> gtod->clock.shift;
23400 }
23401
23402@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic
23403
23404 notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
23405 {
23406- if (likely(gtod->sysctl_enabled))
23407+ if (likely(gtod->sysctl_enabled &&
23408+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23409+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23410 switch (clock) {
23411 case CLOCK_REALTIME:
23412 if (likely(gtod->clock.vread))
23413@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid
23414 int clock_gettime(clockid_t, struct timespec *)
23415 __attribute__((weak, alias("__vdso_clock_gettime")));
23416
23417-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23418+notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz)
23419 {
23420 long ret;
23421- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) {
23422+ asm("syscall" : "=a" (ret) :
23423+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory");
23424+ return ret;
23425+}
23426+
23427+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
23428+{
23429+ if (likely(gtod->sysctl_enabled &&
23430+ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) ||
23431+ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]))))
23432+ {
23433 if (likely(tv != NULL)) {
23434 BUILD_BUG_ON(offsetof(struct timeval, tv_usec) !=
23435 offsetof(struct timespec, tv_nsec) ||
23436@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t
23437 }
23438 return 0;
23439 }
23440- asm("syscall" : "=a" (ret) :
23441- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
23442- return ret;
23443+ return __vdso_fallback_gettimeofday(tv, tz);
23444 }
23445 int gettimeofday(struct timeval *, struct timezone *)
23446 __attribute__((weak, alias("__vdso_gettimeofday")));
23447diff -urNp linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c
23448--- linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c 2011-03-27 14:31:47.000000000 -0400
23449+++ linux-2.6.32.43/arch/x86/vdso/vdso32-setup.c 2011-04-23 12:56:10.000000000 -0400
23450@@ -25,6 +25,7 @@
23451 #include <asm/tlbflush.h>
23452 #include <asm/vdso.h>
23453 #include <asm/proto.h>
23454+#include <asm/mman.h>
23455
23456 enum {
23457 VDSO_DISABLED = 0,
23458@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m
23459 void enable_sep_cpu(void)
23460 {
23461 int cpu = get_cpu();
23462- struct tss_struct *tss = &per_cpu(init_tss, cpu);
23463+ struct tss_struct *tss = init_tss + cpu;
23464
23465 if (!boot_cpu_has(X86_FEATURE_SEP)) {
23466 put_cpu();
23467@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
23468 gate_vma.vm_start = FIXADDR_USER_START;
23469 gate_vma.vm_end = FIXADDR_USER_END;
23470 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
23471- gate_vma.vm_page_prot = __P101;
23472+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
23473 /*
23474 * Make sure the vDSO gets into every core dump.
23475 * Dumping its contents makes post-mortem fully interpretable later
23476@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l
23477 if (compat)
23478 addr = VDSO_HIGH_BASE;
23479 else {
23480- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
23481+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
23482 if (IS_ERR_VALUE(addr)) {
23483 ret = addr;
23484 goto up_fail;
23485 }
23486 }
23487
23488- current->mm->context.vdso = (void *)addr;
23489+ current->mm->context.vdso = addr;
23490
23491 if (compat_uses_vma || !compat) {
23492 /*
23493@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l
23494 }
23495
23496 current_thread_info()->sysenter_return =
23497- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23498+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
23499
23500 up_fail:
23501 if (ret)
23502- current->mm->context.vdso = NULL;
23503+ current->mm->context.vdso = 0;
23504
23505 up_write(&mm->mmap_sem);
23506
23507@@ -413,8 +414,14 @@ __initcall(ia32_binfmt_init);
23508
23509 const char *arch_vma_name(struct vm_area_struct *vma)
23510 {
23511- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
23512+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
23513 return "[vdso]";
23514+
23515+#ifdef CONFIG_PAX_SEGMEXEC
23516+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
23517+ return "[vdso]";
23518+#endif
23519+
23520 return NULL;
23521 }
23522
23523@@ -423,7 +430,7 @@ struct vm_area_struct *get_gate_vma(stru
23524 struct mm_struct *mm = tsk->mm;
23525
23526 /* Check to see if this task was created in compat vdso mode */
23527- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
23528+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
23529 return &gate_vma;
23530 return NULL;
23531 }
23532diff -urNp linux-2.6.32.43/arch/x86/vdso/vdso.lds.S linux-2.6.32.43/arch/x86/vdso/vdso.lds.S
23533--- linux-2.6.32.43/arch/x86/vdso/vdso.lds.S 2011-03-27 14:31:47.000000000 -0400
23534+++ linux-2.6.32.43/arch/x86/vdso/vdso.lds.S 2011-06-06 17:35:35.000000000 -0400
23535@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK;
23536 #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x;
23537 #include "vextern.h"
23538 #undef VEXTERN
23539+
23540+#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x;
23541+VEXTERN(fallback_gettimeofday)
23542+VEXTERN(fallback_time)
23543+VEXTERN(getcpu)
23544+#undef VEXTERN
23545diff -urNp linux-2.6.32.43/arch/x86/vdso/vextern.h linux-2.6.32.43/arch/x86/vdso/vextern.h
23546--- linux-2.6.32.43/arch/x86/vdso/vextern.h 2011-03-27 14:31:47.000000000 -0400
23547+++ linux-2.6.32.43/arch/x86/vdso/vextern.h 2011-04-17 15:56:46.000000000 -0400
23548@@ -11,6 +11,5 @@
23549 put into vextern.h and be referenced as a pointer with vdso prefix.
23550 The main kernel later fills in the values. */
23551
23552-VEXTERN(jiffies)
23553 VEXTERN(vgetcpu_mode)
23554 VEXTERN(vsyscall_gtod_data)
23555diff -urNp linux-2.6.32.43/arch/x86/vdso/vma.c linux-2.6.32.43/arch/x86/vdso/vma.c
23556--- linux-2.6.32.43/arch/x86/vdso/vma.c 2011-03-27 14:31:47.000000000 -0400
23557+++ linux-2.6.32.43/arch/x86/vdso/vma.c 2011-04-17 15:56:46.000000000 -0400
23558@@ -57,7 +57,7 @@ static int __init init_vdso_vars(void)
23559 if (!vbase)
23560 goto oom;
23561
23562- if (memcmp(vbase, "\177ELF", 4)) {
23563+ if (memcmp(vbase, ELFMAG, SELFMAG)) {
23564 printk("VDSO: I'm broken; not ELF\n");
23565 vdso_enabled = 0;
23566 }
23567@@ -66,6 +66,7 @@ static int __init init_vdso_vars(void)
23568 *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x;
23569 #include "vextern.h"
23570 #undef VEXTERN
23571+ vunmap(vbase);
23572 return 0;
23573
23574 oom:
23575@@ -116,7 +117,7 @@ int arch_setup_additional_pages(struct l
23576 goto up_fail;
23577 }
23578
23579- current->mm->context.vdso = (void *)addr;
23580+ current->mm->context.vdso = addr;
23581
23582 ret = install_special_mapping(mm, addr, vdso_size,
23583 VM_READ|VM_EXEC|
23584@@ -124,7 +125,7 @@ int arch_setup_additional_pages(struct l
23585 VM_ALWAYSDUMP,
23586 vdso_pages);
23587 if (ret) {
23588- current->mm->context.vdso = NULL;
23589+ current->mm->context.vdso = 0;
23590 goto up_fail;
23591 }
23592
23593@@ -132,10 +133,3 @@ up_fail:
23594 up_write(&mm->mmap_sem);
23595 return ret;
23596 }
23597-
23598-static __init int vdso_setup(char *s)
23599-{
23600- vdso_enabled = simple_strtoul(s, NULL, 0);
23601- return 0;
23602-}
23603-__setup("vdso=", vdso_setup);
23604diff -urNp linux-2.6.32.43/arch/x86/xen/enlighten.c linux-2.6.32.43/arch/x86/xen/enlighten.c
23605--- linux-2.6.32.43/arch/x86/xen/enlighten.c 2011-03-27 14:31:47.000000000 -0400
23606+++ linux-2.6.32.43/arch/x86/xen/enlighten.c 2011-05-22 23:02:03.000000000 -0400
23607@@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
23608
23609 struct shared_info xen_dummy_shared_info;
23610
23611-void *xen_initial_gdt;
23612-
23613 /*
23614 * Point at some empty memory to start with. We map the real shared_info
23615 * page as soon as fixmap is up and running.
23616@@ -548,7 +546,7 @@ static void xen_write_idt_entry(gate_des
23617
23618 preempt_disable();
23619
23620- start = __get_cpu_var(idt_desc).address;
23621+ start = (unsigned long)__get_cpu_var(idt_desc).address;
23622 end = start + __get_cpu_var(idt_desc).size + 1;
23623
23624 xen_mc_flush();
23625@@ -993,7 +991,7 @@ static const struct pv_apic_ops xen_apic
23626 #endif
23627 };
23628
23629-static void xen_reboot(int reason)
23630+static __noreturn void xen_reboot(int reason)
23631 {
23632 struct sched_shutdown r = { .reason = reason };
23633
23634@@ -1001,17 +999,17 @@ static void xen_reboot(int reason)
23635 BUG();
23636 }
23637
23638-static void xen_restart(char *msg)
23639+static __noreturn void xen_restart(char *msg)
23640 {
23641 xen_reboot(SHUTDOWN_reboot);
23642 }
23643
23644-static void xen_emergency_restart(void)
23645+static __noreturn void xen_emergency_restart(void)
23646 {
23647 xen_reboot(SHUTDOWN_reboot);
23648 }
23649
23650-static void xen_machine_halt(void)
23651+static __noreturn void xen_machine_halt(void)
23652 {
23653 xen_reboot(SHUTDOWN_poweroff);
23654 }
23655@@ -1095,9 +1093,20 @@ asmlinkage void __init xen_start_kernel(
23656 */
23657 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
23658
23659-#ifdef CONFIG_X86_64
23660 /* Work out if we support NX */
23661- check_efer();
23662+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
23663+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
23664+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
23665+ unsigned l, h;
23666+
23667+#ifdef CONFIG_X86_PAE
23668+ nx_enabled = 1;
23669+#endif
23670+ __supported_pte_mask |= _PAGE_NX;
23671+ rdmsr(MSR_EFER, l, h);
23672+ l |= EFER_NX;
23673+ wrmsr(MSR_EFER, l, h);
23674+ }
23675 #endif
23676
23677 xen_setup_features();
23678@@ -1129,13 +1138,6 @@ asmlinkage void __init xen_start_kernel(
23679
23680 machine_ops = xen_machine_ops;
23681
23682- /*
23683- * The only reliable way to retain the initial address of the
23684- * percpu gdt_page is to remember it here, so we can go and
23685- * mark it RW later, when the initial percpu area is freed.
23686- */
23687- xen_initial_gdt = &per_cpu(gdt_page, 0);
23688-
23689 xen_smp_init();
23690
23691 pgd = (pgd_t *)xen_start_info->pt_base;
23692diff -urNp linux-2.6.32.43/arch/x86/xen/mmu.c linux-2.6.32.43/arch/x86/xen/mmu.c
23693--- linux-2.6.32.43/arch/x86/xen/mmu.c 2011-07-13 17:23:04.000000000 -0400
23694+++ linux-2.6.32.43/arch/x86/xen/mmu.c 2011-07-13 17:23:18.000000000 -0400
23695@@ -1719,6 +1719,8 @@ __init pgd_t *xen_setup_kernel_pagetable
23696 convert_pfn_mfn(init_level4_pgt);
23697 convert_pfn_mfn(level3_ident_pgt);
23698 convert_pfn_mfn(level3_kernel_pgt);
23699+ convert_pfn_mfn(level3_vmalloc_pgt);
23700+ convert_pfn_mfn(level3_vmemmap_pgt);
23701
23702 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
23703 l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud);
23704@@ -1737,7 +1739,10 @@ __init pgd_t *xen_setup_kernel_pagetable
23705 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
23706 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
23707 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
23708+ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO);
23709+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
23710 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
23711+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
23712 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
23713 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
23714
23715diff -urNp linux-2.6.32.43/arch/x86/xen/smp.c linux-2.6.32.43/arch/x86/xen/smp.c
23716--- linux-2.6.32.43/arch/x86/xen/smp.c 2011-03-27 14:31:47.000000000 -0400
23717+++ linux-2.6.32.43/arch/x86/xen/smp.c 2011-05-11 18:25:15.000000000 -0400
23718@@ -167,11 +167,6 @@ static void __init xen_smp_prepare_boot_
23719 {
23720 BUG_ON(smp_processor_id() != 0);
23721 native_smp_prepare_boot_cpu();
23722-
23723- /* We've switched to the "real" per-cpu gdt, so make sure the
23724- old memory can be recycled */
23725- make_lowmem_page_readwrite(xen_initial_gdt);
23726-
23727 xen_setup_vcpu_info_placement();
23728 }
23729
23730@@ -231,12 +226,12 @@ cpu_initialize_context(unsigned int cpu,
23731 gdt = get_cpu_gdt_table(cpu);
23732
23733 ctxt->flags = VGCF_IN_KERNEL;
23734- ctxt->user_regs.ds = __USER_DS;
23735- ctxt->user_regs.es = __USER_DS;
23736+ ctxt->user_regs.ds = __KERNEL_DS;
23737+ ctxt->user_regs.es = __KERNEL_DS;
23738 ctxt->user_regs.ss = __KERNEL_DS;
23739 #ifdef CONFIG_X86_32
23740 ctxt->user_regs.fs = __KERNEL_PERCPU;
23741- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
23742+ savesegment(gs, ctxt->user_regs.gs);
23743 #else
23744 ctxt->gs_base_kernel = per_cpu_offset(cpu);
23745 #endif
23746@@ -287,13 +282,12 @@ static int __cpuinit xen_cpu_up(unsigned
23747 int rc;
23748
23749 per_cpu(current_task, cpu) = idle;
23750+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
23751 #ifdef CONFIG_X86_32
23752 irq_ctx_init(cpu);
23753 #else
23754 clear_tsk_thread_flag(idle, TIF_FORK);
23755- per_cpu(kernel_stack, cpu) =
23756- (unsigned long)task_stack_page(idle) -
23757- KERNEL_STACK_OFFSET + THREAD_SIZE;
23758+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
23759 #endif
23760 xen_setup_runstate_info(cpu);
23761 xen_setup_timer(cpu);
23762diff -urNp linux-2.6.32.43/arch/x86/xen/xen-asm_32.S linux-2.6.32.43/arch/x86/xen/xen-asm_32.S
23763--- linux-2.6.32.43/arch/x86/xen/xen-asm_32.S 2011-03-27 14:31:47.000000000 -0400
23764+++ linux-2.6.32.43/arch/x86/xen/xen-asm_32.S 2011-04-22 19:13:13.000000000 -0400
23765@@ -83,14 +83,14 @@ ENTRY(xen_iret)
23766 ESP_OFFSET=4 # bytes pushed onto stack
23767
23768 /*
23769- * Store vcpu_info pointer for easy access. Do it this way to
23770- * avoid having to reload %fs
23771+ * Store vcpu_info pointer for easy access.
23772 */
23773 #ifdef CONFIG_SMP
23774- GET_THREAD_INFO(%eax)
23775- movl TI_cpu(%eax), %eax
23776- movl __per_cpu_offset(,%eax,4), %eax
23777- mov per_cpu__xen_vcpu(%eax), %eax
23778+ push %fs
23779+ mov $(__KERNEL_PERCPU), %eax
23780+ mov %eax, %fs
23781+ mov PER_CPU_VAR(xen_vcpu), %eax
23782+ pop %fs
23783 #else
23784 movl per_cpu__xen_vcpu, %eax
23785 #endif
23786diff -urNp linux-2.6.32.43/arch/x86/xen/xen-head.S linux-2.6.32.43/arch/x86/xen/xen-head.S
23787--- linux-2.6.32.43/arch/x86/xen/xen-head.S 2011-03-27 14:31:47.000000000 -0400
23788+++ linux-2.6.32.43/arch/x86/xen/xen-head.S 2011-04-17 15:56:46.000000000 -0400
23789@@ -19,6 +19,17 @@ ENTRY(startup_xen)
23790 #ifdef CONFIG_X86_32
23791 mov %esi,xen_start_info
23792 mov $init_thread_union+THREAD_SIZE,%esp
23793+#ifdef CONFIG_SMP
23794+ movl $cpu_gdt_table,%edi
23795+ movl $__per_cpu_load,%eax
23796+ movw %ax,__KERNEL_PERCPU + 2(%edi)
23797+ rorl $16,%eax
23798+ movb %al,__KERNEL_PERCPU + 4(%edi)
23799+ movb %ah,__KERNEL_PERCPU + 7(%edi)
23800+ movl $__per_cpu_end - 1,%eax
23801+ subl $__per_cpu_start,%eax
23802+ movw %ax,__KERNEL_PERCPU + 0(%edi)
23803+#endif
23804 #else
23805 mov %rsi,xen_start_info
23806 mov $init_thread_union+THREAD_SIZE,%rsp
23807diff -urNp linux-2.6.32.43/arch/x86/xen/xen-ops.h linux-2.6.32.43/arch/x86/xen/xen-ops.h
23808--- linux-2.6.32.43/arch/x86/xen/xen-ops.h 2011-03-27 14:31:47.000000000 -0400
23809+++ linux-2.6.32.43/arch/x86/xen/xen-ops.h 2011-04-17 15:56:46.000000000 -0400
23810@@ -10,8 +10,6 @@
23811 extern const char xen_hypervisor_callback[];
23812 extern const char xen_failsafe_callback[];
23813
23814-extern void *xen_initial_gdt;
23815-
23816 struct trap_info;
23817 void xen_copy_trap_info(struct trap_info *traps);
23818
23819diff -urNp linux-2.6.32.43/block/blk-integrity.c linux-2.6.32.43/block/blk-integrity.c
23820--- linux-2.6.32.43/block/blk-integrity.c 2011-03-27 14:31:47.000000000 -0400
23821+++ linux-2.6.32.43/block/blk-integrity.c 2011-04-17 15:56:46.000000000 -0400
23822@@ -278,7 +278,7 @@ static struct attribute *integrity_attrs
23823 NULL,
23824 };
23825
23826-static struct sysfs_ops integrity_ops = {
23827+static const struct sysfs_ops integrity_ops = {
23828 .show = &integrity_attr_show,
23829 .store = &integrity_attr_store,
23830 };
23831diff -urNp linux-2.6.32.43/block/blk-iopoll.c linux-2.6.32.43/block/blk-iopoll.c
23832--- linux-2.6.32.43/block/blk-iopoll.c 2011-03-27 14:31:47.000000000 -0400
23833+++ linux-2.6.32.43/block/blk-iopoll.c 2011-04-17 15:56:46.000000000 -0400
23834@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo
23835 }
23836 EXPORT_SYMBOL(blk_iopoll_complete);
23837
23838-static void blk_iopoll_softirq(struct softirq_action *h)
23839+static void blk_iopoll_softirq(void)
23840 {
23841 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
23842 int rearm = 0, budget = blk_iopoll_budget;
23843diff -urNp linux-2.6.32.43/block/blk-map.c linux-2.6.32.43/block/blk-map.c
23844--- linux-2.6.32.43/block/blk-map.c 2011-03-27 14:31:47.000000000 -0400
23845+++ linux-2.6.32.43/block/blk-map.c 2011-04-18 16:57:33.000000000 -0400
23846@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ
23847 * direct dma. else, set up kernel bounce buffers
23848 */
23849 uaddr = (unsigned long) ubuf;
23850- if (blk_rq_aligned(q, ubuf, len) && !map_data)
23851+ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data)
23852 bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask);
23853 else
23854 bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask);
23855@@ -201,12 +201,13 @@ int blk_rq_map_user_iov(struct request_q
23856 for (i = 0; i < iov_count; i++) {
23857 unsigned long uaddr = (unsigned long)iov[i].iov_base;
23858
23859+ if (!iov[i].iov_len)
23860+ return -EINVAL;
23861+
23862 if (uaddr & queue_dma_alignment(q)) {
23863 unaligned = 1;
23864 break;
23865 }
23866- if (!iov[i].iov_len)
23867- return -EINVAL;
23868 }
23869
23870 if (unaligned || (q->dma_pad_mask & len) || map_data)
23871@@ -299,7 +300,7 @@ int blk_rq_map_kern(struct request_queue
23872 if (!len || !kbuf)
23873 return -EINVAL;
23874
23875- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf);
23876+ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf);
23877 if (do_copy)
23878 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
23879 else
23880diff -urNp linux-2.6.32.43/block/blk-softirq.c linux-2.6.32.43/block/blk-softirq.c
23881--- linux-2.6.32.43/block/blk-softirq.c 2011-03-27 14:31:47.000000000 -0400
23882+++ linux-2.6.32.43/block/blk-softirq.c 2011-04-17 15:56:46.000000000 -0400
23883@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head,
23884 * Softirq action handler - move entries to local list and loop over them
23885 * while passing them to the queue registered handler.
23886 */
23887-static void blk_done_softirq(struct softirq_action *h)
23888+static void blk_done_softirq(void)
23889 {
23890 struct list_head *cpu_list, local_list;
23891
23892diff -urNp linux-2.6.32.43/block/blk-sysfs.c linux-2.6.32.43/block/blk-sysfs.c
23893--- linux-2.6.32.43/block/blk-sysfs.c 2011-05-10 22:12:01.000000000 -0400
23894+++ linux-2.6.32.43/block/blk-sysfs.c 2011-05-10 22:12:26.000000000 -0400
23895@@ -414,7 +414,7 @@ static void blk_release_queue(struct kob
23896 kmem_cache_free(blk_requestq_cachep, q);
23897 }
23898
23899-static struct sysfs_ops queue_sysfs_ops = {
23900+static const struct sysfs_ops queue_sysfs_ops = {
23901 .show = queue_attr_show,
23902 .store = queue_attr_store,
23903 };
23904diff -urNp linux-2.6.32.43/block/bsg.c linux-2.6.32.43/block/bsg.c
23905--- linux-2.6.32.43/block/bsg.c 2011-03-27 14:31:47.000000000 -0400
23906+++ linux-2.6.32.43/block/bsg.c 2011-04-17 15:56:46.000000000 -0400
23907@@ -175,16 +175,24 @@ static int blk_fill_sgv4_hdr_rq(struct r
23908 struct sg_io_v4 *hdr, struct bsg_device *bd,
23909 fmode_t has_write_perm)
23910 {
23911+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23912+ unsigned char *cmdptr;
23913+
23914 if (hdr->request_len > BLK_MAX_CDB) {
23915 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
23916 if (!rq->cmd)
23917 return -ENOMEM;
23918- }
23919+ cmdptr = rq->cmd;
23920+ } else
23921+ cmdptr = tmpcmd;
23922
23923- if (copy_from_user(rq->cmd, (void *)(unsigned long)hdr->request,
23924+ if (copy_from_user(cmdptr, (void *)(unsigned long)hdr->request,
23925 hdr->request_len))
23926 return -EFAULT;
23927
23928+ if (cmdptr != rq->cmd)
23929+ memcpy(rq->cmd, cmdptr, hdr->request_len);
23930+
23931 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
23932 if (blk_verify_command(rq->cmd, has_write_perm))
23933 return -EPERM;
23934diff -urNp linux-2.6.32.43/block/elevator.c linux-2.6.32.43/block/elevator.c
23935--- linux-2.6.32.43/block/elevator.c 2011-03-27 14:31:47.000000000 -0400
23936+++ linux-2.6.32.43/block/elevator.c 2011-04-17 15:56:46.000000000 -0400
23937@@ -889,7 +889,7 @@ elv_attr_store(struct kobject *kobj, str
23938 return error;
23939 }
23940
23941-static struct sysfs_ops elv_sysfs_ops = {
23942+static const struct sysfs_ops elv_sysfs_ops = {
23943 .show = elv_attr_show,
23944 .store = elv_attr_store,
23945 };
23946diff -urNp linux-2.6.32.43/block/scsi_ioctl.c linux-2.6.32.43/block/scsi_ioctl.c
23947--- linux-2.6.32.43/block/scsi_ioctl.c 2011-03-27 14:31:47.000000000 -0400
23948+++ linux-2.6.32.43/block/scsi_ioctl.c 2011-04-23 13:28:22.000000000 -0400
23949@@ -220,8 +220,20 @@ EXPORT_SYMBOL(blk_verify_command);
23950 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
23951 struct sg_io_hdr *hdr, fmode_t mode)
23952 {
23953- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
23954+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23955+ unsigned char *cmdptr;
23956+
23957+ if (rq->cmd != rq->__cmd)
23958+ cmdptr = rq->cmd;
23959+ else
23960+ cmdptr = tmpcmd;
23961+
23962+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
23963 return -EFAULT;
23964+
23965+ if (cmdptr != rq->cmd)
23966+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
23967+
23968 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
23969 return -EPERM;
23970
23971@@ -430,6 +442,8 @@ int sg_scsi_ioctl(struct request_queue *
23972 int err;
23973 unsigned int in_len, out_len, bytes, opcode, cmdlen;
23974 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
23975+ unsigned char tmpcmd[sizeof(rq->__cmd)];
23976+ unsigned char *cmdptr;
23977
23978 if (!sic)
23979 return -EINVAL;
23980@@ -463,9 +477,18 @@ int sg_scsi_ioctl(struct request_queue *
23981 */
23982 err = -EFAULT;
23983 rq->cmd_len = cmdlen;
23984- if (copy_from_user(rq->cmd, sic->data, cmdlen))
23985+
23986+ if (rq->cmd != rq->__cmd)
23987+ cmdptr = rq->cmd;
23988+ else
23989+ cmdptr = tmpcmd;
23990+
23991+ if (copy_from_user(cmdptr, sic->data, cmdlen))
23992 goto error;
23993
23994+ if (rq->cmd != cmdptr)
23995+ memcpy(rq->cmd, cmdptr, cmdlen);
23996+
23997 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
23998 goto error;
23999
24000diff -urNp linux-2.6.32.43/crypto/cryptd.c linux-2.6.32.43/crypto/cryptd.c
24001--- linux-2.6.32.43/crypto/cryptd.c 2011-03-27 14:31:47.000000000 -0400
24002+++ linux-2.6.32.43/crypto/cryptd.c 2011-08-05 20:33:55.000000000 -0400
24003@@ -214,7 +214,7 @@ static int cryptd_blkcipher_enqueue(stru
24004 struct cryptd_queue *queue;
24005
24006 queue = cryptd_get_queue(crypto_ablkcipher_tfm(tfm));
24007- rctx->complete = req->base.complete;
24008+ *(void **)&rctx->complete = req->base.complete;
24009 req->base.complete = complete;
24010
24011 return cryptd_enqueue_request(queue, &req->base);
24012diff -urNp linux-2.6.32.43/crypto/gf128mul.c linux-2.6.32.43/crypto/gf128mul.c
24013--- linux-2.6.32.43/crypto/gf128mul.c 2011-03-27 14:31:47.000000000 -0400
24014+++ linux-2.6.32.43/crypto/gf128mul.c 2011-07-06 19:53:33.000000000 -0400
24015@@ -182,7 +182,7 @@ void gf128mul_lle(be128 *r, const be128
24016 for (i = 0; i < 7; ++i)
24017 gf128mul_x_lle(&p[i + 1], &p[i]);
24018
24019- memset(r, 0, sizeof(r));
24020+ memset(r, 0, sizeof(*r));
24021 for (i = 0;;) {
24022 u8 ch = ((u8 *)b)[15 - i];
24023
24024@@ -220,7 +220,7 @@ void gf128mul_bbe(be128 *r, const be128
24025 for (i = 0; i < 7; ++i)
24026 gf128mul_x_bbe(&p[i + 1], &p[i]);
24027
24028- memset(r, 0, sizeof(r));
24029+ memset(r, 0, sizeof(*r));
24030 for (i = 0;;) {
24031 u8 ch = ((u8 *)b)[i];
24032
24033diff -urNp linux-2.6.32.43/crypto/md5.c linux-2.6.32.43/crypto/md5.c
24034--- linux-2.6.32.43/crypto/md5.c 2011-03-27 14:31:47.000000000 -0400
24035+++ linux-2.6.32.43/crypto/md5.c 2011-08-07 19:48:09.000000000 -0400
24036@@ -20,6 +20,7 @@
24037 #include <linux/module.h>
24038 #include <linux/string.h>
24039 #include <linux/types.h>
24040+#include <linux/cryptohash.h>
24041 #include <asm/byteorder.h>
24042
24043 #define MD5_DIGEST_SIZE 16
24044@@ -27,103 +28,12 @@
24045 #define MD5_BLOCK_WORDS 16
24046 #define MD5_HASH_WORDS 4
24047
24048-#define F1(x, y, z) (z ^ (x & (y ^ z)))
24049-#define F2(x, y, z) F1(z, x, y)
24050-#define F3(x, y, z) (x ^ y ^ z)
24051-#define F4(x, y, z) (y ^ (x | ~z))
24052-
24053-#define MD5STEP(f, w, x, y, z, in, s) \
24054- (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
24055-
24056 struct md5_ctx {
24057 u32 hash[MD5_HASH_WORDS];
24058 u32 block[MD5_BLOCK_WORDS];
24059 u64 byte_count;
24060 };
24061
24062-static void md5_transform(u32 *hash, u32 const *in)
24063-{
24064- u32 a, b, c, d;
24065-
24066- a = hash[0];
24067- b = hash[1];
24068- c = hash[2];
24069- d = hash[3];
24070-
24071- MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
24072- MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
24073- MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
24074- MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
24075- MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
24076- MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
24077- MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
24078- MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
24079- MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
24080- MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
24081- MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
24082- MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
24083- MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
24084- MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
24085- MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
24086- MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
24087-
24088- MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
24089- MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
24090- MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
24091- MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
24092- MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
24093- MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
24094- MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
24095- MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
24096- MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
24097- MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
24098- MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
24099- MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
24100- MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
24101- MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
24102- MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
24103- MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
24104-
24105- MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
24106- MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
24107- MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
24108- MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
24109- MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
24110- MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
24111- MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
24112- MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
24113- MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
24114- MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
24115- MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
24116- MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
24117- MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
24118- MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
24119- MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
24120- MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
24121-
24122- MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
24123- MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
24124- MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
24125- MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
24126- MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
24127- MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
24128- MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
24129- MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
24130- MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
24131- MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
24132- MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
24133- MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
24134- MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
24135- MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
24136- MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
24137- MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
24138-
24139- hash[0] += a;
24140- hash[1] += b;
24141- hash[2] += c;
24142- hash[3] += d;
24143-}
24144-
24145 /* XXX: this stuff can be optimized */
24146 static inline void le32_to_cpu_array(u32 *buf, unsigned int words)
24147 {
24148diff -urNp linux-2.6.32.43/crypto/serpent.c linux-2.6.32.43/crypto/serpent.c
24149--- linux-2.6.32.43/crypto/serpent.c 2011-03-27 14:31:47.000000000 -0400
24150+++ linux-2.6.32.43/crypto/serpent.c 2011-05-16 21:46:57.000000000 -0400
24151@@ -224,6 +224,8 @@ static int serpent_setkey(struct crypto_
24152 u32 r0,r1,r2,r3,r4;
24153 int i;
24154
24155+ pax_track_stack();
24156+
24157 /* Copy key, add padding */
24158
24159 for (i = 0; i < keylen; ++i)
24160diff -urNp linux-2.6.32.43/Documentation/dontdiff linux-2.6.32.43/Documentation/dontdiff
24161--- linux-2.6.32.43/Documentation/dontdiff 2011-03-27 14:31:47.000000000 -0400
24162+++ linux-2.6.32.43/Documentation/dontdiff 2011-05-18 20:09:36.000000000 -0400
24163@@ -1,13 +1,16 @@
24164 *.a
24165 *.aux
24166 *.bin
24167+*.cis
24168 *.cpio
24169 *.csp
24170+*.dbg
24171 *.dsp
24172 *.dvi
24173 *.elf
24174 *.eps
24175 *.fw
24176+*.gcno
24177 *.gen.S
24178 *.gif
24179 *.grep
24180@@ -38,8 +41,10 @@
24181 *.tab.h
24182 *.tex
24183 *.ver
24184+*.vim
24185 *.xml
24186 *_MODULES
24187+*_reg_safe.h
24188 *_vga16.c
24189 *~
24190 *.9
24191@@ -49,11 +54,16 @@
24192 53c700_d.h
24193 CVS
24194 ChangeSet
24195+GPATH
24196+GRTAGS
24197+GSYMS
24198+GTAGS
24199 Image
24200 Kerntypes
24201 Module.markers
24202 Module.symvers
24203 PENDING
24204+PERF*
24205 SCCS
24206 System.map*
24207 TAGS
24208@@ -76,7 +86,11 @@ btfixupprep
24209 build
24210 bvmlinux
24211 bzImage*
24212+capability_names.h
24213+capflags.c
24214 classlist.h*
24215+clut_vga16.c
24216+common-cmds.h
24217 comp*.log
24218 compile.h*
24219 conf
24220@@ -103,13 +117,14 @@ gen_crc32table
24221 gen_init_cpio
24222 genksyms
24223 *_gray256.c
24224+hash
24225 ihex2fw
24226 ikconfig.h*
24227 initramfs_data.cpio
24228+initramfs_data.cpio.bz2
24229 initramfs_data.cpio.gz
24230 initramfs_list
24231 kallsyms
24232-kconfig
24233 keywords.c
24234 ksym.c*
24235 ksym.h*
24236@@ -133,7 +148,9 @@ mkboot
24237 mkbugboot
24238 mkcpustr
24239 mkdep
24240+mkpiggy
24241 mkprep
24242+mkregtable
24243 mktables
24244 mktree
24245 modpost
24246@@ -149,6 +166,7 @@ patches*
24247 pca200e.bin
24248 pca200e_ecd.bin2
24249 piggy.gz
24250+piggy.S
24251 piggyback
24252 pnmtologo
24253 ppc_defs.h*
24254@@ -157,12 +175,15 @@ qconf
24255 raid6altivec*.c
24256 raid6int*.c
24257 raid6tables.c
24258+regdb.c
24259 relocs
24260+rlim_names.h
24261 series
24262 setup
24263 setup.bin
24264 setup.elf
24265 sImage
24266+slabinfo
24267 sm_tbl*
24268 split-include
24269 syscalltab.h
24270@@ -186,14 +207,20 @@ version.h*
24271 vmlinux
24272 vmlinux-*
24273 vmlinux.aout
24274+vmlinux.bin.all
24275+vmlinux.bin.bz2
24276 vmlinux.lds
24277+vmlinux.relocs
24278+voffset.h
24279 vsyscall.lds
24280 vsyscall_32.lds
24281 wanxlfw.inc
24282 uImage
24283 unifdef
24284+utsrelease.h
24285 wakeup.bin
24286 wakeup.elf
24287 wakeup.lds
24288 zImage*
24289 zconf.hash.c
24290+zoffset.h
24291diff -urNp linux-2.6.32.43/Documentation/kernel-parameters.txt linux-2.6.32.43/Documentation/kernel-parameters.txt
24292--- linux-2.6.32.43/Documentation/kernel-parameters.txt 2011-03-27 14:31:47.000000000 -0400
24293+++ linux-2.6.32.43/Documentation/kernel-parameters.txt 2011-04-17 15:56:45.000000000 -0400
24294@@ -1837,6 +1837,13 @@ and is between 256 and 4096 characters.
24295 the specified number of seconds. This is to be used if
24296 your oopses keep scrolling off the screen.
24297
24298+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
24299+ virtualization environments that don't cope well with the
24300+ expand down segment used by UDEREF on X86-32 or the frequent
24301+ page table updates on X86-64.
24302+
24303+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
24304+
24305 pcbit= [HW,ISDN]
24306
24307 pcd. [PARIDE]
24308diff -urNp linux-2.6.32.43/drivers/acpi/acpi_pad.c linux-2.6.32.43/drivers/acpi/acpi_pad.c
24309--- linux-2.6.32.43/drivers/acpi/acpi_pad.c 2011-03-27 14:31:47.000000000 -0400
24310+++ linux-2.6.32.43/drivers/acpi/acpi_pad.c 2011-04-17 15:56:46.000000000 -0400
24311@@ -30,7 +30,7 @@
24312 #include <acpi/acpi_bus.h>
24313 #include <acpi/acpi_drivers.h>
24314
24315-#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
24316+#define ACPI_PROCESSOR_AGGREGATOR_CLASS "acpi_pad"
24317 #define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
24318 #define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
24319 static DEFINE_MUTEX(isolated_cpus_lock);
24320diff -urNp linux-2.6.32.43/drivers/acpi/battery.c linux-2.6.32.43/drivers/acpi/battery.c
24321--- linux-2.6.32.43/drivers/acpi/battery.c 2011-03-27 14:31:47.000000000 -0400
24322+++ linux-2.6.32.43/drivers/acpi/battery.c 2011-04-17 15:56:46.000000000 -0400
24323@@ -763,7 +763,7 @@ DECLARE_FILE_FUNCTIONS(alarm);
24324 }
24325
24326 static struct battery_file {
24327- struct file_operations ops;
24328+ const struct file_operations ops;
24329 mode_t mode;
24330 const char *name;
24331 } acpi_battery_file[] = {
24332diff -urNp linux-2.6.32.43/drivers/acpi/dock.c linux-2.6.32.43/drivers/acpi/dock.c
24333--- linux-2.6.32.43/drivers/acpi/dock.c 2011-03-27 14:31:47.000000000 -0400
24334+++ linux-2.6.32.43/drivers/acpi/dock.c 2011-04-17 15:56:46.000000000 -0400
24335@@ -77,7 +77,7 @@ struct dock_dependent_device {
24336 struct list_head list;
24337 struct list_head hotplug_list;
24338 acpi_handle handle;
24339- struct acpi_dock_ops *ops;
24340+ const struct acpi_dock_ops *ops;
24341 void *context;
24342 };
24343
24344@@ -605,7 +605,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi
24345 * the dock driver after _DCK is executed.
24346 */
24347 int
24348-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
24349+register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops,
24350 void *context)
24351 {
24352 struct dock_dependent_device *dd;
24353diff -urNp linux-2.6.32.43/drivers/acpi/osl.c linux-2.6.32.43/drivers/acpi/osl.c
24354--- linux-2.6.32.43/drivers/acpi/osl.c 2011-03-27 14:31:47.000000000 -0400
24355+++ linux-2.6.32.43/drivers/acpi/osl.c 2011-04-17 15:56:46.000000000 -0400
24356@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres
24357 void __iomem *virt_addr;
24358
24359 virt_addr = ioremap(phys_addr, width);
24360+ if (!virt_addr)
24361+ return AE_NO_MEMORY;
24362 if (!value)
24363 value = &dummy;
24364
24365@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre
24366 void __iomem *virt_addr;
24367
24368 virt_addr = ioremap(phys_addr, width);
24369+ if (!virt_addr)
24370+ return AE_NO_MEMORY;
24371
24372 switch (width) {
24373 case 8:
24374diff -urNp linux-2.6.32.43/drivers/acpi/power_meter.c linux-2.6.32.43/drivers/acpi/power_meter.c
24375--- linux-2.6.32.43/drivers/acpi/power_meter.c 2011-03-27 14:31:47.000000000 -0400
24376+++ linux-2.6.32.43/drivers/acpi/power_meter.c 2011-04-17 15:56:46.000000000 -0400
24377@@ -315,8 +315,6 @@ static ssize_t set_trip(struct device *d
24378 return res;
24379
24380 temp /= 1000;
24381- if (temp < 0)
24382- return -EINVAL;
24383
24384 mutex_lock(&resource->lock);
24385 resource->trip[attr->index - 7] = temp;
24386diff -urNp linux-2.6.32.43/drivers/acpi/proc.c linux-2.6.32.43/drivers/acpi/proc.c
24387--- linux-2.6.32.43/drivers/acpi/proc.c 2011-03-27 14:31:47.000000000 -0400
24388+++ linux-2.6.32.43/drivers/acpi/proc.c 2011-04-17 15:56:46.000000000 -0400
24389@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f
24390 size_t count, loff_t * ppos)
24391 {
24392 struct list_head *node, *next;
24393- char strbuf[5];
24394- char str[5] = "";
24395- unsigned int len = count;
24396+ char strbuf[5] = {0};
24397 struct acpi_device *found_dev = NULL;
24398
24399- if (len > 4)
24400- len = 4;
24401- if (len < 0)
24402- return -EFAULT;
24403+ if (count > 4)
24404+ count = 4;
24405
24406- if (copy_from_user(strbuf, buffer, len))
24407+ if (copy_from_user(strbuf, buffer, count))
24408 return -EFAULT;
24409- strbuf[len] = '\0';
24410- sscanf(strbuf, "%s", str);
24411+ strbuf[count] = '\0';
24412
24413 mutex_lock(&acpi_device_lock);
24414 list_for_each_safe(node, next, &acpi_wakeup_device_list) {
24415@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f
24416 if (!dev->wakeup.flags.valid)
24417 continue;
24418
24419- if (!strncmp(dev->pnp.bus_id, str, 4)) {
24420+ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) {
24421 dev->wakeup.state.enabled =
24422 dev->wakeup.state.enabled ? 0 : 1;
24423 found_dev = dev;
24424diff -urNp linux-2.6.32.43/drivers/acpi/processor_core.c linux-2.6.32.43/drivers/acpi/processor_core.c
24425--- linux-2.6.32.43/drivers/acpi/processor_core.c 2011-03-27 14:31:47.000000000 -0400
24426+++ linux-2.6.32.43/drivers/acpi/processor_core.c 2011-04-17 15:56:46.000000000 -0400
24427@@ -790,7 +790,7 @@ static int __cpuinit acpi_processor_add(
24428 return 0;
24429 }
24430
24431- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0));
24432+ BUG_ON(pr->id >= nr_cpu_ids);
24433
24434 /*
24435 * Buggy BIOS check
24436diff -urNp linux-2.6.32.43/drivers/acpi/sbshc.c linux-2.6.32.43/drivers/acpi/sbshc.c
24437--- linux-2.6.32.43/drivers/acpi/sbshc.c 2011-03-27 14:31:47.000000000 -0400
24438+++ linux-2.6.32.43/drivers/acpi/sbshc.c 2011-04-17 15:56:46.000000000 -0400
24439@@ -17,7 +17,7 @@
24440
24441 #define PREFIX "ACPI: "
24442
24443-#define ACPI_SMB_HC_CLASS "smbus_host_controller"
24444+#define ACPI_SMB_HC_CLASS "smbus_host_ctl"
24445 #define ACPI_SMB_HC_DEVICE_NAME "ACPI SMBus HC"
24446
24447 struct acpi_smb_hc {
24448diff -urNp linux-2.6.32.43/drivers/acpi/sleep.c linux-2.6.32.43/drivers/acpi/sleep.c
24449--- linux-2.6.32.43/drivers/acpi/sleep.c 2011-03-27 14:31:47.000000000 -0400
24450+++ linux-2.6.32.43/drivers/acpi/sleep.c 2011-04-17 15:56:46.000000000 -0400
24451@@ -283,7 +283,7 @@ static int acpi_suspend_state_valid(susp
24452 }
24453 }
24454
24455-static struct platform_suspend_ops acpi_suspend_ops = {
24456+static const struct platform_suspend_ops acpi_suspend_ops = {
24457 .valid = acpi_suspend_state_valid,
24458 .begin = acpi_suspend_begin,
24459 .prepare_late = acpi_pm_prepare,
24460@@ -311,7 +311,7 @@ static int acpi_suspend_begin_old(suspen
24461 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24462 * been requested.
24463 */
24464-static struct platform_suspend_ops acpi_suspend_ops_old = {
24465+static const struct platform_suspend_ops acpi_suspend_ops_old = {
24466 .valid = acpi_suspend_state_valid,
24467 .begin = acpi_suspend_begin_old,
24468 .prepare_late = acpi_pm_disable_gpes,
24469@@ -460,7 +460,7 @@ static void acpi_pm_enable_gpes(void)
24470 acpi_enable_all_runtime_gpes();
24471 }
24472
24473-static struct platform_hibernation_ops acpi_hibernation_ops = {
24474+static const struct platform_hibernation_ops acpi_hibernation_ops = {
24475 .begin = acpi_hibernation_begin,
24476 .end = acpi_pm_end,
24477 .pre_snapshot = acpi_hibernation_pre_snapshot,
24478@@ -513,7 +513,7 @@ static int acpi_hibernation_pre_snapshot
24479 * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has
24480 * been requested.
24481 */
24482-static struct platform_hibernation_ops acpi_hibernation_ops_old = {
24483+static const struct platform_hibernation_ops acpi_hibernation_ops_old = {
24484 .begin = acpi_hibernation_begin_old,
24485 .end = acpi_pm_end,
24486 .pre_snapshot = acpi_hibernation_pre_snapshot_old,
24487diff -urNp linux-2.6.32.43/drivers/acpi/video.c linux-2.6.32.43/drivers/acpi/video.c
24488--- linux-2.6.32.43/drivers/acpi/video.c 2011-03-27 14:31:47.000000000 -0400
24489+++ linux-2.6.32.43/drivers/acpi/video.c 2011-04-17 15:56:46.000000000 -0400
24490@@ -359,7 +359,7 @@ static int acpi_video_set_brightness(str
24491 vd->brightness->levels[request_level]);
24492 }
24493
24494-static struct backlight_ops acpi_backlight_ops = {
24495+static const struct backlight_ops acpi_backlight_ops = {
24496 .get_brightness = acpi_video_get_brightness,
24497 .update_status = acpi_video_set_brightness,
24498 };
24499diff -urNp linux-2.6.32.43/drivers/ata/ahci.c linux-2.6.32.43/drivers/ata/ahci.c
24500--- linux-2.6.32.43/drivers/ata/ahci.c 2011-03-27 14:31:47.000000000 -0400
24501+++ linux-2.6.32.43/drivers/ata/ahci.c 2011-04-23 12:56:10.000000000 -0400
24502@@ -387,7 +387,7 @@ static struct scsi_host_template ahci_sh
24503 .sdev_attrs = ahci_sdev_attrs,
24504 };
24505
24506-static struct ata_port_operations ahci_ops = {
24507+static const struct ata_port_operations ahci_ops = {
24508 .inherits = &sata_pmp_port_ops,
24509
24510 .qc_defer = sata_pmp_qc_defer_cmd_switch,
24511@@ -424,17 +424,17 @@ static struct ata_port_operations ahci_o
24512 .port_stop = ahci_port_stop,
24513 };
24514
24515-static struct ata_port_operations ahci_vt8251_ops = {
24516+static const struct ata_port_operations ahci_vt8251_ops = {
24517 .inherits = &ahci_ops,
24518 .hardreset = ahci_vt8251_hardreset,
24519 };
24520
24521-static struct ata_port_operations ahci_p5wdh_ops = {
24522+static const struct ata_port_operations ahci_p5wdh_ops = {
24523 .inherits = &ahci_ops,
24524 .hardreset = ahci_p5wdh_hardreset,
24525 };
24526
24527-static struct ata_port_operations ahci_sb600_ops = {
24528+static const struct ata_port_operations ahci_sb600_ops = {
24529 .inherits = &ahci_ops,
24530 .softreset = ahci_sb600_softreset,
24531 .pmp_softreset = ahci_sb600_softreset,
24532diff -urNp linux-2.6.32.43/drivers/ata/ata_generic.c linux-2.6.32.43/drivers/ata/ata_generic.c
24533--- linux-2.6.32.43/drivers/ata/ata_generic.c 2011-03-27 14:31:47.000000000 -0400
24534+++ linux-2.6.32.43/drivers/ata/ata_generic.c 2011-04-17 15:56:46.000000000 -0400
24535@@ -104,7 +104,7 @@ static struct scsi_host_template generic
24536 ATA_BMDMA_SHT(DRV_NAME),
24537 };
24538
24539-static struct ata_port_operations generic_port_ops = {
24540+static const struct ata_port_operations generic_port_ops = {
24541 .inherits = &ata_bmdma_port_ops,
24542 .cable_detect = ata_cable_unknown,
24543 .set_mode = generic_set_mode,
24544diff -urNp linux-2.6.32.43/drivers/ata/ata_piix.c linux-2.6.32.43/drivers/ata/ata_piix.c
24545--- linux-2.6.32.43/drivers/ata/ata_piix.c 2011-03-27 14:31:47.000000000 -0400
24546+++ linux-2.6.32.43/drivers/ata/ata_piix.c 2011-04-23 12:56:10.000000000 -0400
24547@@ -318,7 +318,7 @@ static struct scsi_host_template piix_sh
24548 ATA_BMDMA_SHT(DRV_NAME),
24549 };
24550
24551-static struct ata_port_operations piix_pata_ops = {
24552+static const struct ata_port_operations piix_pata_ops = {
24553 .inherits = &ata_bmdma32_port_ops,
24554 .cable_detect = ata_cable_40wire,
24555 .set_piomode = piix_set_piomode,
24556@@ -326,22 +326,22 @@ static struct ata_port_operations piix_p
24557 .prereset = piix_pata_prereset,
24558 };
24559
24560-static struct ata_port_operations piix_vmw_ops = {
24561+static const struct ata_port_operations piix_vmw_ops = {
24562 .inherits = &piix_pata_ops,
24563 .bmdma_status = piix_vmw_bmdma_status,
24564 };
24565
24566-static struct ata_port_operations ich_pata_ops = {
24567+static const struct ata_port_operations ich_pata_ops = {
24568 .inherits = &piix_pata_ops,
24569 .cable_detect = ich_pata_cable_detect,
24570 .set_dmamode = ich_set_dmamode,
24571 };
24572
24573-static struct ata_port_operations piix_sata_ops = {
24574+static const struct ata_port_operations piix_sata_ops = {
24575 .inherits = &ata_bmdma_port_ops,
24576 };
24577
24578-static struct ata_port_operations piix_sidpr_sata_ops = {
24579+static const struct ata_port_operations piix_sidpr_sata_ops = {
24580 .inherits = &piix_sata_ops,
24581 .hardreset = sata_std_hardreset,
24582 .scr_read = piix_sidpr_scr_read,
24583diff -urNp linux-2.6.32.43/drivers/ata/libata-acpi.c linux-2.6.32.43/drivers/ata/libata-acpi.c
24584--- linux-2.6.32.43/drivers/ata/libata-acpi.c 2011-03-27 14:31:47.000000000 -0400
24585+++ linux-2.6.32.43/drivers/ata/libata-acpi.c 2011-04-17 15:56:46.000000000 -0400
24586@@ -223,12 +223,12 @@ static void ata_acpi_dev_uevent(acpi_han
24587 ata_acpi_uevent(dev->link->ap, dev, event);
24588 }
24589
24590-static struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24591+static const struct acpi_dock_ops ata_acpi_dev_dock_ops = {
24592 .handler = ata_acpi_dev_notify_dock,
24593 .uevent = ata_acpi_dev_uevent,
24594 };
24595
24596-static struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24597+static const struct acpi_dock_ops ata_acpi_ap_dock_ops = {
24598 .handler = ata_acpi_ap_notify_dock,
24599 .uevent = ata_acpi_ap_uevent,
24600 };
24601diff -urNp linux-2.6.32.43/drivers/ata/libata-core.c linux-2.6.32.43/drivers/ata/libata-core.c
24602--- linux-2.6.32.43/drivers/ata/libata-core.c 2011-03-27 14:31:47.000000000 -0400
24603+++ linux-2.6.32.43/drivers/ata/libata-core.c 2011-08-05 20:33:55.000000000 -0400
24604@@ -4954,7 +4954,7 @@ void ata_qc_free(struct ata_queued_cmd *
24605 struct ata_port *ap;
24606 unsigned int tag;
24607
24608- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24609+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24610 ap = qc->ap;
24611
24612 qc->flags = 0;
24613@@ -4970,7 +4970,7 @@ void __ata_qc_complete(struct ata_queued
24614 struct ata_port *ap;
24615 struct ata_link *link;
24616
24617- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24618+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
24619 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
24620 ap = qc->ap;
24621 link = qc->dev->link;
24622@@ -5987,7 +5987,7 @@ static void ata_host_stop(struct device
24623 * LOCKING:
24624 * None.
24625 */
24626-static void ata_finalize_port_ops(struct ata_port_operations *ops)
24627+static void ata_finalize_port_ops(const struct ata_port_operations *ops)
24628 {
24629 static DEFINE_SPINLOCK(lock);
24630 const struct ata_port_operations *cur;
24631@@ -5999,6 +5999,7 @@ static void ata_finalize_port_ops(struct
24632 return;
24633
24634 spin_lock(&lock);
24635+ pax_open_kernel();
24636
24637 for (cur = ops->inherits; cur; cur = cur->inherits) {
24638 void **inherit = (void **)cur;
24639@@ -6012,8 +6013,9 @@ static void ata_finalize_port_ops(struct
24640 if (IS_ERR(*pp))
24641 *pp = NULL;
24642
24643- ops->inherits = NULL;
24644+ *(struct ata_port_operations **)&ops->inherits = NULL;
24645
24646+ pax_close_kernel();
24647 spin_unlock(&lock);
24648 }
24649
24650@@ -6110,7 +6112,7 @@ int ata_host_start(struct ata_host *host
24651 */
24652 /* KILLME - the only user left is ipr */
24653 void ata_host_init(struct ata_host *host, struct device *dev,
24654- unsigned long flags, struct ata_port_operations *ops)
24655+ unsigned long flags, const struct ata_port_operations *ops)
24656 {
24657 spin_lock_init(&host->lock);
24658 host->dev = dev;
24659@@ -6773,7 +6775,7 @@ static void ata_dummy_error_handler(stru
24660 /* truly dummy */
24661 }
24662
24663-struct ata_port_operations ata_dummy_port_ops = {
24664+const struct ata_port_operations ata_dummy_port_ops = {
24665 .qc_prep = ata_noop_qc_prep,
24666 .qc_issue = ata_dummy_qc_issue,
24667 .error_handler = ata_dummy_error_handler,
24668diff -urNp linux-2.6.32.43/drivers/ata/libata-eh.c linux-2.6.32.43/drivers/ata/libata-eh.c
24669--- linux-2.6.32.43/drivers/ata/libata-eh.c 2011-03-27 14:31:47.000000000 -0400
24670+++ linux-2.6.32.43/drivers/ata/libata-eh.c 2011-05-16 21:46:57.000000000 -0400
24671@@ -2423,6 +2423,8 @@ void ata_eh_report(struct ata_port *ap)
24672 {
24673 struct ata_link *link;
24674
24675+ pax_track_stack();
24676+
24677 ata_for_each_link(link, ap, HOST_FIRST)
24678 ata_eh_link_report(link);
24679 }
24680@@ -3590,7 +3592,7 @@ void ata_do_eh(struct ata_port *ap, ata_
24681 */
24682 void ata_std_error_handler(struct ata_port *ap)
24683 {
24684- struct ata_port_operations *ops = ap->ops;
24685+ const struct ata_port_operations *ops = ap->ops;
24686 ata_reset_fn_t hardreset = ops->hardreset;
24687
24688 /* ignore built-in hardreset if SCR access is not available */
24689diff -urNp linux-2.6.32.43/drivers/ata/libata-pmp.c linux-2.6.32.43/drivers/ata/libata-pmp.c
24690--- linux-2.6.32.43/drivers/ata/libata-pmp.c 2011-03-27 14:31:47.000000000 -0400
24691+++ linux-2.6.32.43/drivers/ata/libata-pmp.c 2011-04-17 15:56:46.000000000 -0400
24692@@ -841,7 +841,7 @@ static int sata_pmp_handle_link_fail(str
24693 */
24694 static int sata_pmp_eh_recover(struct ata_port *ap)
24695 {
24696- struct ata_port_operations *ops = ap->ops;
24697+ const struct ata_port_operations *ops = ap->ops;
24698 int pmp_tries, link_tries[SATA_PMP_MAX_PORTS];
24699 struct ata_link *pmp_link = &ap->link;
24700 struct ata_device *pmp_dev = pmp_link->device;
24701diff -urNp linux-2.6.32.43/drivers/ata/pata_acpi.c linux-2.6.32.43/drivers/ata/pata_acpi.c
24702--- linux-2.6.32.43/drivers/ata/pata_acpi.c 2011-03-27 14:31:47.000000000 -0400
24703+++ linux-2.6.32.43/drivers/ata/pata_acpi.c 2011-04-17 15:56:46.000000000 -0400
24704@@ -215,7 +215,7 @@ static struct scsi_host_template pacpi_s
24705 ATA_BMDMA_SHT(DRV_NAME),
24706 };
24707
24708-static struct ata_port_operations pacpi_ops = {
24709+static const struct ata_port_operations pacpi_ops = {
24710 .inherits = &ata_bmdma_port_ops,
24711 .qc_issue = pacpi_qc_issue,
24712 .cable_detect = pacpi_cable_detect,
24713diff -urNp linux-2.6.32.43/drivers/ata/pata_ali.c linux-2.6.32.43/drivers/ata/pata_ali.c
24714--- linux-2.6.32.43/drivers/ata/pata_ali.c 2011-03-27 14:31:47.000000000 -0400
24715+++ linux-2.6.32.43/drivers/ata/pata_ali.c 2011-04-17 15:56:46.000000000 -0400
24716@@ -365,7 +365,7 @@ static struct scsi_host_template ali_sht
24717 * Port operations for PIO only ALi
24718 */
24719
24720-static struct ata_port_operations ali_early_port_ops = {
24721+static const struct ata_port_operations ali_early_port_ops = {
24722 .inherits = &ata_sff_port_ops,
24723 .cable_detect = ata_cable_40wire,
24724 .set_piomode = ali_set_piomode,
24725@@ -382,7 +382,7 @@ static const struct ata_port_operations
24726 * Port operations for DMA capable ALi without cable
24727 * detect
24728 */
24729-static struct ata_port_operations ali_20_port_ops = {
24730+static const struct ata_port_operations ali_20_port_ops = {
24731 .inherits = &ali_dma_base_ops,
24732 .cable_detect = ata_cable_40wire,
24733 .mode_filter = ali_20_filter,
24734@@ -393,7 +393,7 @@ static struct ata_port_operations ali_20
24735 /*
24736 * Port operations for DMA capable ALi with cable detect
24737 */
24738-static struct ata_port_operations ali_c2_port_ops = {
24739+static const struct ata_port_operations ali_c2_port_ops = {
24740 .inherits = &ali_dma_base_ops,
24741 .check_atapi_dma = ali_check_atapi_dma,
24742 .cable_detect = ali_c2_cable_detect,
24743@@ -404,7 +404,7 @@ static struct ata_port_operations ali_c2
24744 /*
24745 * Port operations for DMA capable ALi with cable detect
24746 */
24747-static struct ata_port_operations ali_c4_port_ops = {
24748+static const struct ata_port_operations ali_c4_port_ops = {
24749 .inherits = &ali_dma_base_ops,
24750 .check_atapi_dma = ali_check_atapi_dma,
24751 .cable_detect = ali_c2_cable_detect,
24752@@ -414,7 +414,7 @@ static struct ata_port_operations ali_c4
24753 /*
24754 * Port operations for DMA capable ALi with cable detect and LBA48
24755 */
24756-static struct ata_port_operations ali_c5_port_ops = {
24757+static const struct ata_port_operations ali_c5_port_ops = {
24758 .inherits = &ali_dma_base_ops,
24759 .check_atapi_dma = ali_check_atapi_dma,
24760 .dev_config = ali_warn_atapi_dma,
24761diff -urNp linux-2.6.32.43/drivers/ata/pata_amd.c linux-2.6.32.43/drivers/ata/pata_amd.c
24762--- linux-2.6.32.43/drivers/ata/pata_amd.c 2011-03-27 14:31:47.000000000 -0400
24763+++ linux-2.6.32.43/drivers/ata/pata_amd.c 2011-04-17 15:56:46.000000000 -0400
24764@@ -397,28 +397,28 @@ static const struct ata_port_operations
24765 .prereset = amd_pre_reset,
24766 };
24767
24768-static struct ata_port_operations amd33_port_ops = {
24769+static const struct ata_port_operations amd33_port_ops = {
24770 .inherits = &amd_base_port_ops,
24771 .cable_detect = ata_cable_40wire,
24772 .set_piomode = amd33_set_piomode,
24773 .set_dmamode = amd33_set_dmamode,
24774 };
24775
24776-static struct ata_port_operations amd66_port_ops = {
24777+static const struct ata_port_operations amd66_port_ops = {
24778 .inherits = &amd_base_port_ops,
24779 .cable_detect = ata_cable_unknown,
24780 .set_piomode = amd66_set_piomode,
24781 .set_dmamode = amd66_set_dmamode,
24782 };
24783
24784-static struct ata_port_operations amd100_port_ops = {
24785+static const struct ata_port_operations amd100_port_ops = {
24786 .inherits = &amd_base_port_ops,
24787 .cable_detect = ata_cable_unknown,
24788 .set_piomode = amd100_set_piomode,
24789 .set_dmamode = amd100_set_dmamode,
24790 };
24791
24792-static struct ata_port_operations amd133_port_ops = {
24793+static const struct ata_port_operations amd133_port_ops = {
24794 .inherits = &amd_base_port_ops,
24795 .cable_detect = amd_cable_detect,
24796 .set_piomode = amd133_set_piomode,
24797@@ -433,13 +433,13 @@ static const struct ata_port_operations
24798 .host_stop = nv_host_stop,
24799 };
24800
24801-static struct ata_port_operations nv100_port_ops = {
24802+static const struct ata_port_operations nv100_port_ops = {
24803 .inherits = &nv_base_port_ops,
24804 .set_piomode = nv100_set_piomode,
24805 .set_dmamode = nv100_set_dmamode,
24806 };
24807
24808-static struct ata_port_operations nv133_port_ops = {
24809+static const struct ata_port_operations nv133_port_ops = {
24810 .inherits = &nv_base_port_ops,
24811 .set_piomode = nv133_set_piomode,
24812 .set_dmamode = nv133_set_dmamode,
24813diff -urNp linux-2.6.32.43/drivers/ata/pata_artop.c linux-2.6.32.43/drivers/ata/pata_artop.c
24814--- linux-2.6.32.43/drivers/ata/pata_artop.c 2011-03-27 14:31:47.000000000 -0400
24815+++ linux-2.6.32.43/drivers/ata/pata_artop.c 2011-04-17 15:56:46.000000000 -0400
24816@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s
24817 ATA_BMDMA_SHT(DRV_NAME),
24818 };
24819
24820-static struct ata_port_operations artop6210_ops = {
24821+static const struct ata_port_operations artop6210_ops = {
24822 .inherits = &ata_bmdma_port_ops,
24823 .cable_detect = ata_cable_40wire,
24824 .set_piomode = artop6210_set_piomode,
24825@@ -320,7 +320,7 @@ static struct ata_port_operations artop6
24826 .qc_defer = artop6210_qc_defer,
24827 };
24828
24829-static struct ata_port_operations artop6260_ops = {
24830+static const struct ata_port_operations artop6260_ops = {
24831 .inherits = &ata_bmdma_port_ops,
24832 .cable_detect = artop6260_cable_detect,
24833 .set_piomode = artop6260_set_piomode,
24834diff -urNp linux-2.6.32.43/drivers/ata/pata_at32.c linux-2.6.32.43/drivers/ata/pata_at32.c
24835--- linux-2.6.32.43/drivers/ata/pata_at32.c 2011-03-27 14:31:47.000000000 -0400
24836+++ linux-2.6.32.43/drivers/ata/pata_at32.c 2011-04-17 15:56:46.000000000 -0400
24837@@ -172,7 +172,7 @@ static struct scsi_host_template at32_sh
24838 ATA_PIO_SHT(DRV_NAME),
24839 };
24840
24841-static struct ata_port_operations at32_port_ops = {
24842+static const struct ata_port_operations at32_port_ops = {
24843 .inherits = &ata_sff_port_ops,
24844 .cable_detect = ata_cable_40wire,
24845 .set_piomode = pata_at32_set_piomode,
24846diff -urNp linux-2.6.32.43/drivers/ata/pata_at91.c linux-2.6.32.43/drivers/ata/pata_at91.c
24847--- linux-2.6.32.43/drivers/ata/pata_at91.c 2011-03-27 14:31:47.000000000 -0400
24848+++ linux-2.6.32.43/drivers/ata/pata_at91.c 2011-04-17 15:56:46.000000000 -0400
24849@@ -195,7 +195,7 @@ static struct scsi_host_template pata_at
24850 ATA_PIO_SHT(DRV_NAME),
24851 };
24852
24853-static struct ata_port_operations pata_at91_port_ops = {
24854+static const struct ata_port_operations pata_at91_port_ops = {
24855 .inherits = &ata_sff_port_ops,
24856
24857 .sff_data_xfer = pata_at91_data_xfer_noirq,
24858diff -urNp linux-2.6.32.43/drivers/ata/pata_atiixp.c linux-2.6.32.43/drivers/ata/pata_atiixp.c
24859--- linux-2.6.32.43/drivers/ata/pata_atiixp.c 2011-03-27 14:31:47.000000000 -0400
24860+++ linux-2.6.32.43/drivers/ata/pata_atiixp.c 2011-04-17 15:56:46.000000000 -0400
24861@@ -205,7 +205,7 @@ static struct scsi_host_template atiixp_
24862 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24863 };
24864
24865-static struct ata_port_operations atiixp_port_ops = {
24866+static const struct ata_port_operations atiixp_port_ops = {
24867 .inherits = &ata_bmdma_port_ops,
24868
24869 .qc_prep = ata_sff_dumb_qc_prep,
24870diff -urNp linux-2.6.32.43/drivers/ata/pata_atp867x.c linux-2.6.32.43/drivers/ata/pata_atp867x.c
24871--- linux-2.6.32.43/drivers/ata/pata_atp867x.c 2011-03-27 14:31:47.000000000 -0400
24872+++ linux-2.6.32.43/drivers/ata/pata_atp867x.c 2011-04-17 15:56:46.000000000 -0400
24873@@ -274,7 +274,7 @@ static struct scsi_host_template atp867x
24874 ATA_BMDMA_SHT(DRV_NAME),
24875 };
24876
24877-static struct ata_port_operations atp867x_ops = {
24878+static const struct ata_port_operations atp867x_ops = {
24879 .inherits = &ata_bmdma_port_ops,
24880 .cable_detect = atp867x_cable_detect,
24881 .set_piomode = atp867x_set_piomode,
24882diff -urNp linux-2.6.32.43/drivers/ata/pata_bf54x.c linux-2.6.32.43/drivers/ata/pata_bf54x.c
24883--- linux-2.6.32.43/drivers/ata/pata_bf54x.c 2011-03-27 14:31:47.000000000 -0400
24884+++ linux-2.6.32.43/drivers/ata/pata_bf54x.c 2011-04-17 15:56:46.000000000 -0400
24885@@ -1464,7 +1464,7 @@ static struct scsi_host_template bfin_sh
24886 .dma_boundary = ATA_DMA_BOUNDARY,
24887 };
24888
24889-static struct ata_port_operations bfin_pata_ops = {
24890+static const struct ata_port_operations bfin_pata_ops = {
24891 .inherits = &ata_sff_port_ops,
24892
24893 .set_piomode = bfin_set_piomode,
24894diff -urNp linux-2.6.32.43/drivers/ata/pata_cmd640.c linux-2.6.32.43/drivers/ata/pata_cmd640.c
24895--- linux-2.6.32.43/drivers/ata/pata_cmd640.c 2011-03-27 14:31:47.000000000 -0400
24896+++ linux-2.6.32.43/drivers/ata/pata_cmd640.c 2011-04-17 15:56:46.000000000 -0400
24897@@ -168,7 +168,7 @@ static struct scsi_host_template cmd640_
24898 ATA_BMDMA_SHT(DRV_NAME),
24899 };
24900
24901-static struct ata_port_operations cmd640_port_ops = {
24902+static const struct ata_port_operations cmd640_port_ops = {
24903 .inherits = &ata_bmdma_port_ops,
24904 /* In theory xfer_noirq is not needed once we kill the prefetcher */
24905 .sff_data_xfer = ata_sff_data_xfer_noirq,
24906diff -urNp linux-2.6.32.43/drivers/ata/pata_cmd64x.c linux-2.6.32.43/drivers/ata/pata_cmd64x.c
24907--- linux-2.6.32.43/drivers/ata/pata_cmd64x.c 2011-06-25 12:55:34.000000000 -0400
24908+++ linux-2.6.32.43/drivers/ata/pata_cmd64x.c 2011-06-25 12:56:37.000000000 -0400
24909@@ -271,18 +271,18 @@ static const struct ata_port_operations
24910 .set_dmamode = cmd64x_set_dmamode,
24911 };
24912
24913-static struct ata_port_operations cmd64x_port_ops = {
24914+static const struct ata_port_operations cmd64x_port_ops = {
24915 .inherits = &cmd64x_base_ops,
24916 .cable_detect = ata_cable_40wire,
24917 };
24918
24919-static struct ata_port_operations cmd646r1_port_ops = {
24920+static const struct ata_port_operations cmd646r1_port_ops = {
24921 .inherits = &cmd64x_base_ops,
24922 .bmdma_stop = cmd646r1_bmdma_stop,
24923 .cable_detect = ata_cable_40wire,
24924 };
24925
24926-static struct ata_port_operations cmd648_port_ops = {
24927+static const struct ata_port_operations cmd648_port_ops = {
24928 .inherits = &cmd64x_base_ops,
24929 .bmdma_stop = cmd648_bmdma_stop,
24930 .cable_detect = cmd648_cable_detect,
24931diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5520.c linux-2.6.32.43/drivers/ata/pata_cs5520.c
24932--- linux-2.6.32.43/drivers/ata/pata_cs5520.c 2011-03-27 14:31:47.000000000 -0400
24933+++ linux-2.6.32.43/drivers/ata/pata_cs5520.c 2011-04-17 15:56:46.000000000 -0400
24934@@ -144,7 +144,7 @@ static struct scsi_host_template cs5520_
24935 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24936 };
24937
24938-static struct ata_port_operations cs5520_port_ops = {
24939+static const struct ata_port_operations cs5520_port_ops = {
24940 .inherits = &ata_bmdma_port_ops,
24941 .qc_prep = ata_sff_dumb_qc_prep,
24942 .cable_detect = ata_cable_40wire,
24943diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5530.c linux-2.6.32.43/drivers/ata/pata_cs5530.c
24944--- linux-2.6.32.43/drivers/ata/pata_cs5530.c 2011-03-27 14:31:47.000000000 -0400
24945+++ linux-2.6.32.43/drivers/ata/pata_cs5530.c 2011-04-17 15:56:46.000000000 -0400
24946@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_
24947 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
24948 };
24949
24950-static struct ata_port_operations cs5530_port_ops = {
24951+static const struct ata_port_operations cs5530_port_ops = {
24952 .inherits = &ata_bmdma_port_ops,
24953
24954 .qc_prep = ata_sff_dumb_qc_prep,
24955diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5535.c linux-2.6.32.43/drivers/ata/pata_cs5535.c
24956--- linux-2.6.32.43/drivers/ata/pata_cs5535.c 2011-03-27 14:31:47.000000000 -0400
24957+++ linux-2.6.32.43/drivers/ata/pata_cs5535.c 2011-04-17 15:56:46.000000000 -0400
24958@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_
24959 ATA_BMDMA_SHT(DRV_NAME),
24960 };
24961
24962-static struct ata_port_operations cs5535_port_ops = {
24963+static const struct ata_port_operations cs5535_port_ops = {
24964 .inherits = &ata_bmdma_port_ops,
24965 .cable_detect = cs5535_cable_detect,
24966 .set_piomode = cs5535_set_piomode,
24967diff -urNp linux-2.6.32.43/drivers/ata/pata_cs5536.c linux-2.6.32.43/drivers/ata/pata_cs5536.c
24968--- linux-2.6.32.43/drivers/ata/pata_cs5536.c 2011-03-27 14:31:47.000000000 -0400
24969+++ linux-2.6.32.43/drivers/ata/pata_cs5536.c 2011-04-17 15:56:46.000000000 -0400
24970@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_
24971 ATA_BMDMA_SHT(DRV_NAME),
24972 };
24973
24974-static struct ata_port_operations cs5536_port_ops = {
24975+static const struct ata_port_operations cs5536_port_ops = {
24976 .inherits = &ata_bmdma_port_ops,
24977 .cable_detect = cs5536_cable_detect,
24978 .set_piomode = cs5536_set_piomode,
24979diff -urNp linux-2.6.32.43/drivers/ata/pata_cypress.c linux-2.6.32.43/drivers/ata/pata_cypress.c
24980--- linux-2.6.32.43/drivers/ata/pata_cypress.c 2011-03-27 14:31:47.000000000 -0400
24981+++ linux-2.6.32.43/drivers/ata/pata_cypress.c 2011-04-17 15:56:46.000000000 -0400
24982@@ -113,7 +113,7 @@ static struct scsi_host_template cy82c69
24983 ATA_BMDMA_SHT(DRV_NAME),
24984 };
24985
24986-static struct ata_port_operations cy82c693_port_ops = {
24987+static const struct ata_port_operations cy82c693_port_ops = {
24988 .inherits = &ata_bmdma_port_ops,
24989 .cable_detect = ata_cable_40wire,
24990 .set_piomode = cy82c693_set_piomode,
24991diff -urNp linux-2.6.32.43/drivers/ata/pata_efar.c linux-2.6.32.43/drivers/ata/pata_efar.c
24992--- linux-2.6.32.43/drivers/ata/pata_efar.c 2011-03-27 14:31:47.000000000 -0400
24993+++ linux-2.6.32.43/drivers/ata/pata_efar.c 2011-04-17 15:56:46.000000000 -0400
24994@@ -222,7 +222,7 @@ static struct scsi_host_template efar_sh
24995 ATA_BMDMA_SHT(DRV_NAME),
24996 };
24997
24998-static struct ata_port_operations efar_ops = {
24999+static const struct ata_port_operations efar_ops = {
25000 .inherits = &ata_bmdma_port_ops,
25001 .cable_detect = efar_cable_detect,
25002 .set_piomode = efar_set_piomode,
25003diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt366.c linux-2.6.32.43/drivers/ata/pata_hpt366.c
25004--- linux-2.6.32.43/drivers/ata/pata_hpt366.c 2011-06-25 12:55:34.000000000 -0400
25005+++ linux-2.6.32.43/drivers/ata/pata_hpt366.c 2011-06-25 12:56:37.000000000 -0400
25006@@ -282,7 +282,7 @@ static struct scsi_host_template hpt36x_
25007 * Configuration for HPT366/68
25008 */
25009
25010-static struct ata_port_operations hpt366_port_ops = {
25011+static const struct ata_port_operations hpt366_port_ops = {
25012 .inherits = &ata_bmdma_port_ops,
25013 .cable_detect = hpt36x_cable_detect,
25014 .mode_filter = hpt366_filter,
25015diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt37x.c linux-2.6.32.43/drivers/ata/pata_hpt37x.c
25016--- linux-2.6.32.43/drivers/ata/pata_hpt37x.c 2011-06-25 12:55:34.000000000 -0400
25017+++ linux-2.6.32.43/drivers/ata/pata_hpt37x.c 2011-06-25 12:56:37.000000000 -0400
25018@@ -576,7 +576,7 @@ static struct scsi_host_template hpt37x_
25019 * Configuration for HPT370
25020 */
25021
25022-static struct ata_port_operations hpt370_port_ops = {
25023+static const struct ata_port_operations hpt370_port_ops = {
25024 .inherits = &ata_bmdma_port_ops,
25025
25026 .bmdma_stop = hpt370_bmdma_stop,
25027@@ -591,7 +591,7 @@ static struct ata_port_operations hpt370
25028 * Configuration for HPT370A. Close to 370 but less filters
25029 */
25030
25031-static struct ata_port_operations hpt370a_port_ops = {
25032+static const struct ata_port_operations hpt370a_port_ops = {
25033 .inherits = &hpt370_port_ops,
25034 .mode_filter = hpt370a_filter,
25035 };
25036@@ -601,7 +601,7 @@ static struct ata_port_operations hpt370
25037 * and DMA mode setting functionality.
25038 */
25039
25040-static struct ata_port_operations hpt372_port_ops = {
25041+static const struct ata_port_operations hpt372_port_ops = {
25042 .inherits = &ata_bmdma_port_ops,
25043
25044 .bmdma_stop = hpt37x_bmdma_stop,
25045@@ -616,7 +616,7 @@ static struct ata_port_operations hpt372
25046 * but we have a different cable detection procedure for function 1.
25047 */
25048
25049-static struct ata_port_operations hpt374_fn1_port_ops = {
25050+static const struct ata_port_operations hpt374_fn1_port_ops = {
25051 .inherits = &hpt372_port_ops,
25052 .prereset = hpt374_fn1_pre_reset,
25053 };
25054diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c
25055--- linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:55:34.000000000 -0400
25056+++ linux-2.6.32.43/drivers/ata/pata_hpt3x2n.c 2011-06-25 12:56:37.000000000 -0400
25057@@ -337,7 +337,7 @@ static struct scsi_host_template hpt3x2n
25058 * Configuration for HPT3x2n.
25059 */
25060
25061-static struct ata_port_operations hpt3x2n_port_ops = {
25062+static const struct ata_port_operations hpt3x2n_port_ops = {
25063 .inherits = &ata_bmdma_port_ops,
25064
25065 .bmdma_stop = hpt3x2n_bmdma_stop,
25066diff -urNp linux-2.6.32.43/drivers/ata/pata_hpt3x3.c linux-2.6.32.43/drivers/ata/pata_hpt3x3.c
25067--- linux-2.6.32.43/drivers/ata/pata_hpt3x3.c 2011-03-27 14:31:47.000000000 -0400
25068+++ linux-2.6.32.43/drivers/ata/pata_hpt3x3.c 2011-04-17 15:56:46.000000000 -0400
25069@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_
25070 ATA_BMDMA_SHT(DRV_NAME),
25071 };
25072
25073-static struct ata_port_operations hpt3x3_port_ops = {
25074+static const struct ata_port_operations hpt3x3_port_ops = {
25075 .inherits = &ata_bmdma_port_ops,
25076 .cable_detect = ata_cable_40wire,
25077 .set_piomode = hpt3x3_set_piomode,
25078diff -urNp linux-2.6.32.43/drivers/ata/pata_icside.c linux-2.6.32.43/drivers/ata/pata_icside.c
25079--- linux-2.6.32.43/drivers/ata/pata_icside.c 2011-03-27 14:31:47.000000000 -0400
25080+++ linux-2.6.32.43/drivers/ata/pata_icside.c 2011-04-17 15:56:46.000000000 -0400
25081@@ -319,7 +319,7 @@ static void pata_icside_postreset(struct
25082 }
25083 }
25084
25085-static struct ata_port_operations pata_icside_port_ops = {
25086+static const struct ata_port_operations pata_icside_port_ops = {
25087 .inherits = &ata_sff_port_ops,
25088 /* no need to build any PRD tables for DMA */
25089 .qc_prep = ata_noop_qc_prep,
25090diff -urNp linux-2.6.32.43/drivers/ata/pata_isapnp.c linux-2.6.32.43/drivers/ata/pata_isapnp.c
25091--- linux-2.6.32.43/drivers/ata/pata_isapnp.c 2011-03-27 14:31:47.000000000 -0400
25092+++ linux-2.6.32.43/drivers/ata/pata_isapnp.c 2011-04-17 15:56:46.000000000 -0400
25093@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_
25094 ATA_PIO_SHT(DRV_NAME),
25095 };
25096
25097-static struct ata_port_operations isapnp_port_ops = {
25098+static const struct ata_port_operations isapnp_port_ops = {
25099 .inherits = &ata_sff_port_ops,
25100 .cable_detect = ata_cable_40wire,
25101 };
25102
25103-static struct ata_port_operations isapnp_noalt_port_ops = {
25104+static const struct ata_port_operations isapnp_noalt_port_ops = {
25105 .inherits = &ata_sff_port_ops,
25106 .cable_detect = ata_cable_40wire,
25107 /* No altstatus so we don't want to use the lost interrupt poll */
25108diff -urNp linux-2.6.32.43/drivers/ata/pata_it8213.c linux-2.6.32.43/drivers/ata/pata_it8213.c
25109--- linux-2.6.32.43/drivers/ata/pata_it8213.c 2011-03-27 14:31:47.000000000 -0400
25110+++ linux-2.6.32.43/drivers/ata/pata_it8213.c 2011-04-17 15:56:46.000000000 -0400
25111@@ -234,7 +234,7 @@ static struct scsi_host_template it8213_
25112 };
25113
25114
25115-static struct ata_port_operations it8213_ops = {
25116+static const struct ata_port_operations it8213_ops = {
25117 .inherits = &ata_bmdma_port_ops,
25118 .cable_detect = it8213_cable_detect,
25119 .set_piomode = it8213_set_piomode,
25120diff -urNp linux-2.6.32.43/drivers/ata/pata_it821x.c linux-2.6.32.43/drivers/ata/pata_it821x.c
25121--- linux-2.6.32.43/drivers/ata/pata_it821x.c 2011-03-27 14:31:47.000000000 -0400
25122+++ linux-2.6.32.43/drivers/ata/pata_it821x.c 2011-04-17 15:56:46.000000000 -0400
25123@@ -800,7 +800,7 @@ static struct scsi_host_template it821x_
25124 ATA_BMDMA_SHT(DRV_NAME),
25125 };
25126
25127-static struct ata_port_operations it821x_smart_port_ops = {
25128+static const struct ata_port_operations it821x_smart_port_ops = {
25129 .inherits = &ata_bmdma_port_ops,
25130
25131 .check_atapi_dma= it821x_check_atapi_dma,
25132@@ -814,7 +814,7 @@ static struct ata_port_operations it821x
25133 .port_start = it821x_port_start,
25134 };
25135
25136-static struct ata_port_operations it821x_passthru_port_ops = {
25137+static const struct ata_port_operations it821x_passthru_port_ops = {
25138 .inherits = &ata_bmdma_port_ops,
25139
25140 .check_atapi_dma= it821x_check_atapi_dma,
25141@@ -830,7 +830,7 @@ static struct ata_port_operations it821x
25142 .port_start = it821x_port_start,
25143 };
25144
25145-static struct ata_port_operations it821x_rdc_port_ops = {
25146+static const struct ata_port_operations it821x_rdc_port_ops = {
25147 .inherits = &ata_bmdma_port_ops,
25148
25149 .check_atapi_dma= it821x_check_atapi_dma,
25150diff -urNp linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c
25151--- linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c 2011-03-27 14:31:47.000000000 -0400
25152+++ linux-2.6.32.43/drivers/ata/pata_ixp4xx_cf.c 2011-04-17 15:56:46.000000000 -0400
25153@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_
25154 ATA_PIO_SHT(DRV_NAME),
25155 };
25156
25157-static struct ata_port_operations ixp4xx_port_ops = {
25158+static const struct ata_port_operations ixp4xx_port_ops = {
25159 .inherits = &ata_sff_port_ops,
25160 .sff_data_xfer = ixp4xx_mmio_data_xfer,
25161 .cable_detect = ata_cable_40wire,
25162diff -urNp linux-2.6.32.43/drivers/ata/pata_jmicron.c linux-2.6.32.43/drivers/ata/pata_jmicron.c
25163--- linux-2.6.32.43/drivers/ata/pata_jmicron.c 2011-03-27 14:31:47.000000000 -0400
25164+++ linux-2.6.32.43/drivers/ata/pata_jmicron.c 2011-04-17 15:56:46.000000000 -0400
25165@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron
25166 ATA_BMDMA_SHT(DRV_NAME),
25167 };
25168
25169-static struct ata_port_operations jmicron_ops = {
25170+static const struct ata_port_operations jmicron_ops = {
25171 .inherits = &ata_bmdma_port_ops,
25172 .prereset = jmicron_pre_reset,
25173 };
25174diff -urNp linux-2.6.32.43/drivers/ata/pata_legacy.c linux-2.6.32.43/drivers/ata/pata_legacy.c
25175--- linux-2.6.32.43/drivers/ata/pata_legacy.c 2011-03-27 14:31:47.000000000 -0400
25176+++ linux-2.6.32.43/drivers/ata/pata_legacy.c 2011-04-17 15:56:46.000000000 -0400
25177@@ -106,7 +106,7 @@ struct legacy_probe {
25178
25179 struct legacy_controller {
25180 const char *name;
25181- struct ata_port_operations *ops;
25182+ const struct ata_port_operations *ops;
25183 unsigned int pio_mask;
25184 unsigned int flags;
25185 unsigned int pflags;
25186@@ -223,12 +223,12 @@ static const struct ata_port_operations
25187 * pio_mask as well.
25188 */
25189
25190-static struct ata_port_operations simple_port_ops = {
25191+static const struct ata_port_operations simple_port_ops = {
25192 .inherits = &legacy_base_port_ops,
25193 .sff_data_xfer = ata_sff_data_xfer_noirq,
25194 };
25195
25196-static struct ata_port_operations legacy_port_ops = {
25197+static const struct ata_port_operations legacy_port_ops = {
25198 .inherits = &legacy_base_port_ops,
25199 .sff_data_xfer = ata_sff_data_xfer_noirq,
25200 .set_mode = legacy_set_mode,
25201@@ -324,7 +324,7 @@ static unsigned int pdc_data_xfer_vlb(st
25202 return buflen;
25203 }
25204
25205-static struct ata_port_operations pdc20230_port_ops = {
25206+static const struct ata_port_operations pdc20230_port_ops = {
25207 .inherits = &legacy_base_port_ops,
25208 .set_piomode = pdc20230_set_piomode,
25209 .sff_data_xfer = pdc_data_xfer_vlb,
25210@@ -357,7 +357,7 @@ static void ht6560a_set_piomode(struct a
25211 ioread8(ap->ioaddr.status_addr);
25212 }
25213
25214-static struct ata_port_operations ht6560a_port_ops = {
25215+static const struct ata_port_operations ht6560a_port_ops = {
25216 .inherits = &legacy_base_port_ops,
25217 .set_piomode = ht6560a_set_piomode,
25218 };
25219@@ -400,7 +400,7 @@ static void ht6560b_set_piomode(struct a
25220 ioread8(ap->ioaddr.status_addr);
25221 }
25222
25223-static struct ata_port_operations ht6560b_port_ops = {
25224+static const struct ata_port_operations ht6560b_port_ops = {
25225 .inherits = &legacy_base_port_ops,
25226 .set_piomode = ht6560b_set_piomode,
25227 };
25228@@ -499,7 +499,7 @@ static void opti82c611a_set_piomode(stru
25229 }
25230
25231
25232-static struct ata_port_operations opti82c611a_port_ops = {
25233+static const struct ata_port_operations opti82c611a_port_ops = {
25234 .inherits = &legacy_base_port_ops,
25235 .set_piomode = opti82c611a_set_piomode,
25236 };
25237@@ -609,7 +609,7 @@ static unsigned int opti82c46x_qc_issue(
25238 return ata_sff_qc_issue(qc);
25239 }
25240
25241-static struct ata_port_operations opti82c46x_port_ops = {
25242+static const struct ata_port_operations opti82c46x_port_ops = {
25243 .inherits = &legacy_base_port_ops,
25244 .set_piomode = opti82c46x_set_piomode,
25245 .qc_issue = opti82c46x_qc_issue,
25246@@ -771,20 +771,20 @@ static int qdi_port(struct platform_devi
25247 return 0;
25248 }
25249
25250-static struct ata_port_operations qdi6500_port_ops = {
25251+static const struct ata_port_operations qdi6500_port_ops = {
25252 .inherits = &legacy_base_port_ops,
25253 .set_piomode = qdi6500_set_piomode,
25254 .qc_issue = qdi_qc_issue,
25255 .sff_data_xfer = vlb32_data_xfer,
25256 };
25257
25258-static struct ata_port_operations qdi6580_port_ops = {
25259+static const struct ata_port_operations qdi6580_port_ops = {
25260 .inherits = &legacy_base_port_ops,
25261 .set_piomode = qdi6580_set_piomode,
25262 .sff_data_xfer = vlb32_data_xfer,
25263 };
25264
25265-static struct ata_port_operations qdi6580dp_port_ops = {
25266+static const struct ata_port_operations qdi6580dp_port_ops = {
25267 .inherits = &legacy_base_port_ops,
25268 .set_piomode = qdi6580dp_set_piomode,
25269 .sff_data_xfer = vlb32_data_xfer,
25270@@ -855,7 +855,7 @@ static int winbond_port(struct platform_
25271 return 0;
25272 }
25273
25274-static struct ata_port_operations winbond_port_ops = {
25275+static const struct ata_port_operations winbond_port_ops = {
25276 .inherits = &legacy_base_port_ops,
25277 .set_piomode = winbond_set_piomode,
25278 .sff_data_xfer = vlb32_data_xfer,
25279@@ -978,7 +978,7 @@ static __init int legacy_init_one(struct
25280 int pio_modes = controller->pio_mask;
25281 unsigned long io = probe->port;
25282 u32 mask = (1 << probe->slot);
25283- struct ata_port_operations *ops = controller->ops;
25284+ const struct ata_port_operations *ops = controller->ops;
25285 struct legacy_data *ld = &legacy_data[probe->slot];
25286 struct ata_host *host = NULL;
25287 struct ata_port *ap;
25288diff -urNp linux-2.6.32.43/drivers/ata/pata_marvell.c linux-2.6.32.43/drivers/ata/pata_marvell.c
25289--- linux-2.6.32.43/drivers/ata/pata_marvell.c 2011-03-27 14:31:47.000000000 -0400
25290+++ linux-2.6.32.43/drivers/ata/pata_marvell.c 2011-04-17 15:56:46.000000000 -0400
25291@@ -100,7 +100,7 @@ static struct scsi_host_template marvell
25292 ATA_BMDMA_SHT(DRV_NAME),
25293 };
25294
25295-static struct ata_port_operations marvell_ops = {
25296+static const struct ata_port_operations marvell_ops = {
25297 .inherits = &ata_bmdma_port_ops,
25298 .cable_detect = marvell_cable_detect,
25299 .prereset = marvell_pre_reset,
25300diff -urNp linux-2.6.32.43/drivers/ata/pata_mpc52xx.c linux-2.6.32.43/drivers/ata/pata_mpc52xx.c
25301--- linux-2.6.32.43/drivers/ata/pata_mpc52xx.c 2011-03-27 14:31:47.000000000 -0400
25302+++ linux-2.6.32.43/drivers/ata/pata_mpc52xx.c 2011-04-17 15:56:46.000000000 -0400
25303@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx
25304 ATA_PIO_SHT(DRV_NAME),
25305 };
25306
25307-static struct ata_port_operations mpc52xx_ata_port_ops = {
25308+static const struct ata_port_operations mpc52xx_ata_port_ops = {
25309 .inherits = &ata_bmdma_port_ops,
25310 .sff_dev_select = mpc52xx_ata_dev_select,
25311 .set_piomode = mpc52xx_ata_set_piomode,
25312diff -urNp linux-2.6.32.43/drivers/ata/pata_mpiix.c linux-2.6.32.43/drivers/ata/pata_mpiix.c
25313--- linux-2.6.32.43/drivers/ata/pata_mpiix.c 2011-03-27 14:31:47.000000000 -0400
25314+++ linux-2.6.32.43/drivers/ata/pata_mpiix.c 2011-04-17 15:56:46.000000000 -0400
25315@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s
25316 ATA_PIO_SHT(DRV_NAME),
25317 };
25318
25319-static struct ata_port_operations mpiix_port_ops = {
25320+static const struct ata_port_operations mpiix_port_ops = {
25321 .inherits = &ata_sff_port_ops,
25322 .qc_issue = mpiix_qc_issue,
25323 .cable_detect = ata_cable_40wire,
25324diff -urNp linux-2.6.32.43/drivers/ata/pata_netcell.c linux-2.6.32.43/drivers/ata/pata_netcell.c
25325--- linux-2.6.32.43/drivers/ata/pata_netcell.c 2011-03-27 14:31:47.000000000 -0400
25326+++ linux-2.6.32.43/drivers/ata/pata_netcell.c 2011-04-17 15:56:46.000000000 -0400
25327@@ -34,7 +34,7 @@ static struct scsi_host_template netcell
25328 ATA_BMDMA_SHT(DRV_NAME),
25329 };
25330
25331-static struct ata_port_operations netcell_ops = {
25332+static const struct ata_port_operations netcell_ops = {
25333 .inherits = &ata_bmdma_port_ops,
25334 .cable_detect = ata_cable_80wire,
25335 .read_id = netcell_read_id,
25336diff -urNp linux-2.6.32.43/drivers/ata/pata_ninja32.c linux-2.6.32.43/drivers/ata/pata_ninja32.c
25337--- linux-2.6.32.43/drivers/ata/pata_ninja32.c 2011-03-27 14:31:47.000000000 -0400
25338+++ linux-2.6.32.43/drivers/ata/pata_ninja32.c 2011-04-17 15:56:46.000000000 -0400
25339@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32
25340 ATA_BMDMA_SHT(DRV_NAME),
25341 };
25342
25343-static struct ata_port_operations ninja32_port_ops = {
25344+static const struct ata_port_operations ninja32_port_ops = {
25345 .inherits = &ata_bmdma_port_ops,
25346 .sff_dev_select = ninja32_dev_select,
25347 .cable_detect = ata_cable_40wire,
25348diff -urNp linux-2.6.32.43/drivers/ata/pata_ns87410.c linux-2.6.32.43/drivers/ata/pata_ns87410.c
25349--- linux-2.6.32.43/drivers/ata/pata_ns87410.c 2011-03-27 14:31:47.000000000 -0400
25350+++ linux-2.6.32.43/drivers/ata/pata_ns87410.c 2011-04-17 15:56:46.000000000 -0400
25351@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410
25352 ATA_PIO_SHT(DRV_NAME),
25353 };
25354
25355-static struct ata_port_operations ns87410_port_ops = {
25356+static const struct ata_port_operations ns87410_port_ops = {
25357 .inherits = &ata_sff_port_ops,
25358 .qc_issue = ns87410_qc_issue,
25359 .cable_detect = ata_cable_40wire,
25360diff -urNp linux-2.6.32.43/drivers/ata/pata_ns87415.c linux-2.6.32.43/drivers/ata/pata_ns87415.c
25361--- linux-2.6.32.43/drivers/ata/pata_ns87415.c 2011-03-27 14:31:47.000000000 -0400
25362+++ linux-2.6.32.43/drivers/ata/pata_ns87415.c 2011-04-17 15:56:46.000000000 -0400
25363@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at
25364 }
25365 #endif /* 87560 SuperIO Support */
25366
25367-static struct ata_port_operations ns87415_pata_ops = {
25368+static const struct ata_port_operations ns87415_pata_ops = {
25369 .inherits = &ata_bmdma_port_ops,
25370
25371 .check_atapi_dma = ns87415_check_atapi_dma,
25372@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741
25373 };
25374
25375 #if defined(CONFIG_SUPERIO)
25376-static struct ata_port_operations ns87560_pata_ops = {
25377+static const struct ata_port_operations ns87560_pata_ops = {
25378 .inherits = &ns87415_pata_ops,
25379 .sff_tf_read = ns87560_tf_read,
25380 .sff_check_status = ns87560_check_status,
25381diff -urNp linux-2.6.32.43/drivers/ata/pata_octeon_cf.c linux-2.6.32.43/drivers/ata/pata_octeon_cf.c
25382--- linux-2.6.32.43/drivers/ata/pata_octeon_cf.c 2011-03-27 14:31:47.000000000 -0400
25383+++ linux-2.6.32.43/drivers/ata/pata_octeon_cf.c 2011-04-17 15:56:46.000000000 -0400
25384@@ -801,6 +801,7 @@ static unsigned int octeon_cf_qc_issue(s
25385 return 0;
25386 }
25387
25388+/* cannot be const */
25389 static struct ata_port_operations octeon_cf_ops = {
25390 .inherits = &ata_sff_port_ops,
25391 .check_atapi_dma = octeon_cf_check_atapi_dma,
25392diff -urNp linux-2.6.32.43/drivers/ata/pata_oldpiix.c linux-2.6.32.43/drivers/ata/pata_oldpiix.c
25393--- linux-2.6.32.43/drivers/ata/pata_oldpiix.c 2011-03-27 14:31:47.000000000 -0400
25394+++ linux-2.6.32.43/drivers/ata/pata_oldpiix.c 2011-04-17 15:56:46.000000000 -0400
25395@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix
25396 ATA_BMDMA_SHT(DRV_NAME),
25397 };
25398
25399-static struct ata_port_operations oldpiix_pata_ops = {
25400+static const struct ata_port_operations oldpiix_pata_ops = {
25401 .inherits = &ata_bmdma_port_ops,
25402 .qc_issue = oldpiix_qc_issue,
25403 .cable_detect = ata_cable_40wire,
25404diff -urNp linux-2.6.32.43/drivers/ata/pata_opti.c linux-2.6.32.43/drivers/ata/pata_opti.c
25405--- linux-2.6.32.43/drivers/ata/pata_opti.c 2011-03-27 14:31:47.000000000 -0400
25406+++ linux-2.6.32.43/drivers/ata/pata_opti.c 2011-04-17 15:56:46.000000000 -0400
25407@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh
25408 ATA_PIO_SHT(DRV_NAME),
25409 };
25410
25411-static struct ata_port_operations opti_port_ops = {
25412+static const struct ata_port_operations opti_port_ops = {
25413 .inherits = &ata_sff_port_ops,
25414 .cable_detect = ata_cable_40wire,
25415 .set_piomode = opti_set_piomode,
25416diff -urNp linux-2.6.32.43/drivers/ata/pata_optidma.c linux-2.6.32.43/drivers/ata/pata_optidma.c
25417--- linux-2.6.32.43/drivers/ata/pata_optidma.c 2011-03-27 14:31:47.000000000 -0400
25418+++ linux-2.6.32.43/drivers/ata/pata_optidma.c 2011-04-17 15:56:46.000000000 -0400
25419@@ -337,7 +337,7 @@ static struct scsi_host_template optidma
25420 ATA_BMDMA_SHT(DRV_NAME),
25421 };
25422
25423-static struct ata_port_operations optidma_port_ops = {
25424+static const struct ata_port_operations optidma_port_ops = {
25425 .inherits = &ata_bmdma_port_ops,
25426 .cable_detect = ata_cable_40wire,
25427 .set_piomode = optidma_set_pio_mode,
25428@@ -346,7 +346,7 @@ static struct ata_port_operations optidm
25429 .prereset = optidma_pre_reset,
25430 };
25431
25432-static struct ata_port_operations optiplus_port_ops = {
25433+static const struct ata_port_operations optiplus_port_ops = {
25434 .inherits = &optidma_port_ops,
25435 .set_piomode = optiplus_set_pio_mode,
25436 .set_dmamode = optiplus_set_dma_mode,
25437diff -urNp linux-2.6.32.43/drivers/ata/pata_palmld.c linux-2.6.32.43/drivers/ata/pata_palmld.c
25438--- linux-2.6.32.43/drivers/ata/pata_palmld.c 2011-03-27 14:31:47.000000000 -0400
25439+++ linux-2.6.32.43/drivers/ata/pata_palmld.c 2011-04-17 15:56:46.000000000 -0400
25440@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_
25441 ATA_PIO_SHT(DRV_NAME),
25442 };
25443
25444-static struct ata_port_operations palmld_port_ops = {
25445+static const struct ata_port_operations palmld_port_ops = {
25446 .inherits = &ata_sff_port_ops,
25447 .sff_data_xfer = ata_sff_data_xfer_noirq,
25448 .cable_detect = ata_cable_40wire,
25449diff -urNp linux-2.6.32.43/drivers/ata/pata_pcmcia.c linux-2.6.32.43/drivers/ata/pata_pcmcia.c
25450--- linux-2.6.32.43/drivers/ata/pata_pcmcia.c 2011-03-27 14:31:47.000000000 -0400
25451+++ linux-2.6.32.43/drivers/ata/pata_pcmcia.c 2011-04-17 15:56:46.000000000 -0400
25452@@ -162,14 +162,14 @@ static struct scsi_host_template pcmcia_
25453 ATA_PIO_SHT(DRV_NAME),
25454 };
25455
25456-static struct ata_port_operations pcmcia_port_ops = {
25457+static const struct ata_port_operations pcmcia_port_ops = {
25458 .inherits = &ata_sff_port_ops,
25459 .sff_data_xfer = ata_sff_data_xfer_noirq,
25460 .cable_detect = ata_cable_40wire,
25461 .set_mode = pcmcia_set_mode,
25462 };
25463
25464-static struct ata_port_operations pcmcia_8bit_port_ops = {
25465+static const struct ata_port_operations pcmcia_8bit_port_ops = {
25466 .inherits = &ata_sff_port_ops,
25467 .sff_data_xfer = ata_data_xfer_8bit,
25468 .cable_detect = ata_cable_40wire,
25469@@ -256,7 +256,7 @@ static int pcmcia_init_one(struct pcmcia
25470 unsigned long io_base, ctl_base;
25471 void __iomem *io_addr, *ctl_addr;
25472 int n_ports = 1;
25473- struct ata_port_operations *ops = &pcmcia_port_ops;
25474+ const struct ata_port_operations *ops = &pcmcia_port_ops;
25475
25476 info = kzalloc(sizeof(*info), GFP_KERNEL);
25477 if (info == NULL)
25478diff -urNp linux-2.6.32.43/drivers/ata/pata_pdc2027x.c linux-2.6.32.43/drivers/ata/pata_pdc2027x.c
25479--- linux-2.6.32.43/drivers/ata/pata_pdc2027x.c 2011-03-27 14:31:47.000000000 -0400
25480+++ linux-2.6.32.43/drivers/ata/pata_pdc2027x.c 2011-04-17 15:56:46.000000000 -0400
25481@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027
25482 ATA_BMDMA_SHT(DRV_NAME),
25483 };
25484
25485-static struct ata_port_operations pdc2027x_pata100_ops = {
25486+static const struct ata_port_operations pdc2027x_pata100_ops = {
25487 .inherits = &ata_bmdma_port_ops,
25488 .check_atapi_dma = pdc2027x_check_atapi_dma,
25489 .cable_detect = pdc2027x_cable_detect,
25490 .prereset = pdc2027x_prereset,
25491 };
25492
25493-static struct ata_port_operations pdc2027x_pata133_ops = {
25494+static const struct ata_port_operations pdc2027x_pata133_ops = {
25495 .inherits = &pdc2027x_pata100_ops,
25496 .mode_filter = pdc2027x_mode_filter,
25497 .set_piomode = pdc2027x_set_piomode,
25498diff -urNp linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c
25499--- linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c 2011-03-27 14:31:47.000000000 -0400
25500+++ linux-2.6.32.43/drivers/ata/pata_pdc202xx_old.c 2011-04-17 15:56:46.000000000 -0400
25501@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x
25502 ATA_BMDMA_SHT(DRV_NAME),
25503 };
25504
25505-static struct ata_port_operations pdc2024x_port_ops = {
25506+static const struct ata_port_operations pdc2024x_port_ops = {
25507 .inherits = &ata_bmdma_port_ops,
25508
25509 .cable_detect = ata_cable_40wire,
25510@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202
25511 .sff_exec_command = pdc202xx_exec_command,
25512 };
25513
25514-static struct ata_port_operations pdc2026x_port_ops = {
25515+static const struct ata_port_operations pdc2026x_port_ops = {
25516 .inherits = &pdc2024x_port_ops,
25517
25518 .check_atapi_dma = pdc2026x_check_atapi_dma,
25519diff -urNp linux-2.6.32.43/drivers/ata/pata_platform.c linux-2.6.32.43/drivers/ata/pata_platform.c
25520--- linux-2.6.32.43/drivers/ata/pata_platform.c 2011-03-27 14:31:47.000000000 -0400
25521+++ linux-2.6.32.43/drivers/ata/pata_platform.c 2011-04-17 15:56:46.000000000 -0400
25522@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl
25523 ATA_PIO_SHT(DRV_NAME),
25524 };
25525
25526-static struct ata_port_operations pata_platform_port_ops = {
25527+static const struct ata_port_operations pata_platform_port_ops = {
25528 .inherits = &ata_sff_port_ops,
25529 .sff_data_xfer = ata_sff_data_xfer_noirq,
25530 .cable_detect = ata_cable_unknown,
25531diff -urNp linux-2.6.32.43/drivers/ata/pata_qdi.c linux-2.6.32.43/drivers/ata/pata_qdi.c
25532--- linux-2.6.32.43/drivers/ata/pata_qdi.c 2011-03-27 14:31:47.000000000 -0400
25533+++ linux-2.6.32.43/drivers/ata/pata_qdi.c 2011-04-17 15:56:46.000000000 -0400
25534@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht
25535 ATA_PIO_SHT(DRV_NAME),
25536 };
25537
25538-static struct ata_port_operations qdi6500_port_ops = {
25539+static const struct ata_port_operations qdi6500_port_ops = {
25540 .inherits = &ata_sff_port_ops,
25541 .qc_issue = qdi_qc_issue,
25542 .sff_data_xfer = qdi_data_xfer,
25543@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650
25544 .set_piomode = qdi6500_set_piomode,
25545 };
25546
25547-static struct ata_port_operations qdi6580_port_ops = {
25548+static const struct ata_port_operations qdi6580_port_ops = {
25549 .inherits = &qdi6500_port_ops,
25550 .set_piomode = qdi6580_set_piomode,
25551 };
25552diff -urNp linux-2.6.32.43/drivers/ata/pata_radisys.c linux-2.6.32.43/drivers/ata/pata_radisys.c
25553--- linux-2.6.32.43/drivers/ata/pata_radisys.c 2011-03-27 14:31:47.000000000 -0400
25554+++ linux-2.6.32.43/drivers/ata/pata_radisys.c 2011-04-17 15:56:46.000000000 -0400
25555@@ -187,7 +187,7 @@ static struct scsi_host_template radisys
25556 ATA_BMDMA_SHT(DRV_NAME),
25557 };
25558
25559-static struct ata_port_operations radisys_pata_ops = {
25560+static const struct ata_port_operations radisys_pata_ops = {
25561 .inherits = &ata_bmdma_port_ops,
25562 .qc_issue = radisys_qc_issue,
25563 .cable_detect = ata_cable_unknown,
25564diff -urNp linux-2.6.32.43/drivers/ata/pata_rb532_cf.c linux-2.6.32.43/drivers/ata/pata_rb532_cf.c
25565--- linux-2.6.32.43/drivers/ata/pata_rb532_cf.c 2011-03-27 14:31:47.000000000 -0400
25566+++ linux-2.6.32.43/drivers/ata/pata_rb532_cf.c 2011-04-17 15:56:46.000000000 -0400
25567@@ -68,7 +68,7 @@ static irqreturn_t rb532_pata_irq_handle
25568 return IRQ_HANDLED;
25569 }
25570
25571-static struct ata_port_operations rb532_pata_port_ops = {
25572+static const struct ata_port_operations rb532_pata_port_ops = {
25573 .inherits = &ata_sff_port_ops,
25574 .sff_data_xfer = ata_sff_data_xfer32,
25575 };
25576diff -urNp linux-2.6.32.43/drivers/ata/pata_rdc.c linux-2.6.32.43/drivers/ata/pata_rdc.c
25577--- linux-2.6.32.43/drivers/ata/pata_rdc.c 2011-03-27 14:31:47.000000000 -0400
25578+++ linux-2.6.32.43/drivers/ata/pata_rdc.c 2011-04-17 15:56:46.000000000 -0400
25579@@ -272,7 +272,7 @@ static void rdc_set_dmamode(struct ata_p
25580 pci_write_config_byte(dev, 0x48, udma_enable);
25581 }
25582
25583-static struct ata_port_operations rdc_pata_ops = {
25584+static const struct ata_port_operations rdc_pata_ops = {
25585 .inherits = &ata_bmdma32_port_ops,
25586 .cable_detect = rdc_pata_cable_detect,
25587 .set_piomode = rdc_set_piomode,
25588diff -urNp linux-2.6.32.43/drivers/ata/pata_rz1000.c linux-2.6.32.43/drivers/ata/pata_rz1000.c
25589--- linux-2.6.32.43/drivers/ata/pata_rz1000.c 2011-03-27 14:31:47.000000000 -0400
25590+++ linux-2.6.32.43/drivers/ata/pata_rz1000.c 2011-04-17 15:56:46.000000000 -0400
25591@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_
25592 ATA_PIO_SHT(DRV_NAME),
25593 };
25594
25595-static struct ata_port_operations rz1000_port_ops = {
25596+static const struct ata_port_operations rz1000_port_ops = {
25597 .inherits = &ata_sff_port_ops,
25598 .cable_detect = ata_cable_40wire,
25599 .set_mode = rz1000_set_mode,
25600diff -urNp linux-2.6.32.43/drivers/ata/pata_sc1200.c linux-2.6.32.43/drivers/ata/pata_sc1200.c
25601--- linux-2.6.32.43/drivers/ata/pata_sc1200.c 2011-03-27 14:31:47.000000000 -0400
25602+++ linux-2.6.32.43/drivers/ata/pata_sc1200.c 2011-04-17 15:56:46.000000000 -0400
25603@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_
25604 .sg_tablesize = LIBATA_DUMB_MAX_PRD,
25605 };
25606
25607-static struct ata_port_operations sc1200_port_ops = {
25608+static const struct ata_port_operations sc1200_port_ops = {
25609 .inherits = &ata_bmdma_port_ops,
25610 .qc_prep = ata_sff_dumb_qc_prep,
25611 .qc_issue = sc1200_qc_issue,
25612diff -urNp linux-2.6.32.43/drivers/ata/pata_scc.c linux-2.6.32.43/drivers/ata/pata_scc.c
25613--- linux-2.6.32.43/drivers/ata/pata_scc.c 2011-03-27 14:31:47.000000000 -0400
25614+++ linux-2.6.32.43/drivers/ata/pata_scc.c 2011-04-17 15:56:46.000000000 -0400
25615@@ -965,7 +965,7 @@ static struct scsi_host_template scc_sht
25616 ATA_BMDMA_SHT(DRV_NAME),
25617 };
25618
25619-static struct ata_port_operations scc_pata_ops = {
25620+static const struct ata_port_operations scc_pata_ops = {
25621 .inherits = &ata_bmdma_port_ops,
25622
25623 .set_piomode = scc_set_piomode,
25624diff -urNp linux-2.6.32.43/drivers/ata/pata_sch.c linux-2.6.32.43/drivers/ata/pata_sch.c
25625--- linux-2.6.32.43/drivers/ata/pata_sch.c 2011-03-27 14:31:47.000000000 -0400
25626+++ linux-2.6.32.43/drivers/ata/pata_sch.c 2011-04-17 15:56:46.000000000 -0400
25627@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht
25628 ATA_BMDMA_SHT(DRV_NAME),
25629 };
25630
25631-static struct ata_port_operations sch_pata_ops = {
25632+static const struct ata_port_operations sch_pata_ops = {
25633 .inherits = &ata_bmdma_port_ops,
25634 .cable_detect = ata_cable_unknown,
25635 .set_piomode = sch_set_piomode,
25636diff -urNp linux-2.6.32.43/drivers/ata/pata_serverworks.c linux-2.6.32.43/drivers/ata/pata_serverworks.c
25637--- linux-2.6.32.43/drivers/ata/pata_serverworks.c 2011-03-27 14:31:47.000000000 -0400
25638+++ linux-2.6.32.43/drivers/ata/pata_serverworks.c 2011-04-17 15:56:46.000000000 -0400
25639@@ -299,7 +299,7 @@ static struct scsi_host_template serverw
25640 ATA_BMDMA_SHT(DRV_NAME),
25641 };
25642
25643-static struct ata_port_operations serverworks_osb4_port_ops = {
25644+static const struct ata_port_operations serverworks_osb4_port_ops = {
25645 .inherits = &ata_bmdma_port_ops,
25646 .cable_detect = serverworks_cable_detect,
25647 .mode_filter = serverworks_osb4_filter,
25648@@ -307,7 +307,7 @@ static struct ata_port_operations server
25649 .set_dmamode = serverworks_set_dmamode,
25650 };
25651
25652-static struct ata_port_operations serverworks_csb_port_ops = {
25653+static const struct ata_port_operations serverworks_csb_port_ops = {
25654 .inherits = &serverworks_osb4_port_ops,
25655 .mode_filter = serverworks_csb_filter,
25656 };
25657diff -urNp linux-2.6.32.43/drivers/ata/pata_sil680.c linux-2.6.32.43/drivers/ata/pata_sil680.c
25658--- linux-2.6.32.43/drivers/ata/pata_sil680.c 2011-06-25 12:55:34.000000000 -0400
25659+++ linux-2.6.32.43/drivers/ata/pata_sil680.c 2011-06-25 12:56:37.000000000 -0400
25660@@ -194,7 +194,7 @@ static struct scsi_host_template sil680_
25661 ATA_BMDMA_SHT(DRV_NAME),
25662 };
25663
25664-static struct ata_port_operations sil680_port_ops = {
25665+static const struct ata_port_operations sil680_port_ops = {
25666 .inherits = &ata_bmdma32_port_ops,
25667 .cable_detect = sil680_cable_detect,
25668 .set_piomode = sil680_set_piomode,
25669diff -urNp linux-2.6.32.43/drivers/ata/pata_sis.c linux-2.6.32.43/drivers/ata/pata_sis.c
25670--- linux-2.6.32.43/drivers/ata/pata_sis.c 2011-03-27 14:31:47.000000000 -0400
25671+++ linux-2.6.32.43/drivers/ata/pata_sis.c 2011-04-17 15:56:46.000000000 -0400
25672@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht
25673 ATA_BMDMA_SHT(DRV_NAME),
25674 };
25675
25676-static struct ata_port_operations sis_133_for_sata_ops = {
25677+static const struct ata_port_operations sis_133_for_sata_ops = {
25678 .inherits = &ata_bmdma_port_ops,
25679 .set_piomode = sis_133_set_piomode,
25680 .set_dmamode = sis_133_set_dmamode,
25681 .cable_detect = sis_133_cable_detect,
25682 };
25683
25684-static struct ata_port_operations sis_base_ops = {
25685+static const struct ata_port_operations sis_base_ops = {
25686 .inherits = &ata_bmdma_port_ops,
25687 .prereset = sis_pre_reset,
25688 };
25689
25690-static struct ata_port_operations sis_133_ops = {
25691+static const struct ata_port_operations sis_133_ops = {
25692 .inherits = &sis_base_ops,
25693 .set_piomode = sis_133_set_piomode,
25694 .set_dmamode = sis_133_set_dmamode,
25695 .cable_detect = sis_133_cable_detect,
25696 };
25697
25698-static struct ata_port_operations sis_133_early_ops = {
25699+static const struct ata_port_operations sis_133_early_ops = {
25700 .inherits = &sis_base_ops,
25701 .set_piomode = sis_100_set_piomode,
25702 .set_dmamode = sis_133_early_set_dmamode,
25703 .cable_detect = sis_66_cable_detect,
25704 };
25705
25706-static struct ata_port_operations sis_100_ops = {
25707+static const struct ata_port_operations sis_100_ops = {
25708 .inherits = &sis_base_ops,
25709 .set_piomode = sis_100_set_piomode,
25710 .set_dmamode = sis_100_set_dmamode,
25711 .cable_detect = sis_66_cable_detect,
25712 };
25713
25714-static struct ata_port_operations sis_66_ops = {
25715+static const struct ata_port_operations sis_66_ops = {
25716 .inherits = &sis_base_ops,
25717 .set_piomode = sis_old_set_piomode,
25718 .set_dmamode = sis_66_set_dmamode,
25719 .cable_detect = sis_66_cable_detect,
25720 };
25721
25722-static struct ata_port_operations sis_old_ops = {
25723+static const struct ata_port_operations sis_old_ops = {
25724 .inherits = &sis_base_ops,
25725 .set_piomode = sis_old_set_piomode,
25726 .set_dmamode = sis_old_set_dmamode,
25727diff -urNp linux-2.6.32.43/drivers/ata/pata_sl82c105.c linux-2.6.32.43/drivers/ata/pata_sl82c105.c
25728--- linux-2.6.32.43/drivers/ata/pata_sl82c105.c 2011-03-27 14:31:47.000000000 -0400
25729+++ linux-2.6.32.43/drivers/ata/pata_sl82c105.c 2011-04-17 15:56:46.000000000 -0400
25730@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10
25731 ATA_BMDMA_SHT(DRV_NAME),
25732 };
25733
25734-static struct ata_port_operations sl82c105_port_ops = {
25735+static const struct ata_port_operations sl82c105_port_ops = {
25736 .inherits = &ata_bmdma_port_ops,
25737 .qc_defer = sl82c105_qc_defer,
25738 .bmdma_start = sl82c105_bmdma_start,
25739diff -urNp linux-2.6.32.43/drivers/ata/pata_triflex.c linux-2.6.32.43/drivers/ata/pata_triflex.c
25740--- linux-2.6.32.43/drivers/ata/pata_triflex.c 2011-03-27 14:31:47.000000000 -0400
25741+++ linux-2.6.32.43/drivers/ata/pata_triflex.c 2011-04-17 15:56:46.000000000 -0400
25742@@ -178,7 +178,7 @@ static struct scsi_host_template triflex
25743 ATA_BMDMA_SHT(DRV_NAME),
25744 };
25745
25746-static struct ata_port_operations triflex_port_ops = {
25747+static const struct ata_port_operations triflex_port_ops = {
25748 .inherits = &ata_bmdma_port_ops,
25749 .bmdma_start = triflex_bmdma_start,
25750 .bmdma_stop = triflex_bmdma_stop,
25751diff -urNp linux-2.6.32.43/drivers/ata/pata_via.c linux-2.6.32.43/drivers/ata/pata_via.c
25752--- linux-2.6.32.43/drivers/ata/pata_via.c 2011-03-27 14:31:47.000000000 -0400
25753+++ linux-2.6.32.43/drivers/ata/pata_via.c 2011-04-17 15:56:46.000000000 -0400
25754@@ -419,7 +419,7 @@ static struct scsi_host_template via_sht
25755 ATA_BMDMA_SHT(DRV_NAME),
25756 };
25757
25758-static struct ata_port_operations via_port_ops = {
25759+static const struct ata_port_operations via_port_ops = {
25760 .inherits = &ata_bmdma_port_ops,
25761 .cable_detect = via_cable_detect,
25762 .set_piomode = via_set_piomode,
25763@@ -429,7 +429,7 @@ static struct ata_port_operations via_po
25764 .port_start = via_port_start,
25765 };
25766
25767-static struct ata_port_operations via_port_ops_noirq = {
25768+static const struct ata_port_operations via_port_ops_noirq = {
25769 .inherits = &via_port_ops,
25770 .sff_data_xfer = ata_sff_data_xfer_noirq,
25771 };
25772diff -urNp linux-2.6.32.43/drivers/ata/pata_winbond.c linux-2.6.32.43/drivers/ata/pata_winbond.c
25773--- linux-2.6.32.43/drivers/ata/pata_winbond.c 2011-03-27 14:31:47.000000000 -0400
25774+++ linux-2.6.32.43/drivers/ata/pata_winbond.c 2011-04-17 15:56:46.000000000 -0400
25775@@ -125,7 +125,7 @@ static struct scsi_host_template winbond
25776 ATA_PIO_SHT(DRV_NAME),
25777 };
25778
25779-static struct ata_port_operations winbond_port_ops = {
25780+static const struct ata_port_operations winbond_port_ops = {
25781 .inherits = &ata_sff_port_ops,
25782 .sff_data_xfer = winbond_data_xfer,
25783 .cable_detect = ata_cable_40wire,
25784diff -urNp linux-2.6.32.43/drivers/ata/pdc_adma.c linux-2.6.32.43/drivers/ata/pdc_adma.c
25785--- linux-2.6.32.43/drivers/ata/pdc_adma.c 2011-03-27 14:31:47.000000000 -0400
25786+++ linux-2.6.32.43/drivers/ata/pdc_adma.c 2011-04-17 15:56:46.000000000 -0400
25787@@ -145,7 +145,7 @@ static struct scsi_host_template adma_at
25788 .dma_boundary = ADMA_DMA_BOUNDARY,
25789 };
25790
25791-static struct ata_port_operations adma_ata_ops = {
25792+static const struct ata_port_operations adma_ata_ops = {
25793 .inherits = &ata_sff_port_ops,
25794
25795 .lost_interrupt = ATA_OP_NULL,
25796diff -urNp linux-2.6.32.43/drivers/ata/sata_fsl.c linux-2.6.32.43/drivers/ata/sata_fsl.c
25797--- linux-2.6.32.43/drivers/ata/sata_fsl.c 2011-03-27 14:31:47.000000000 -0400
25798+++ linux-2.6.32.43/drivers/ata/sata_fsl.c 2011-04-17 15:56:46.000000000 -0400
25799@@ -1258,7 +1258,7 @@ static struct scsi_host_template sata_fs
25800 .dma_boundary = ATA_DMA_BOUNDARY,
25801 };
25802
25803-static struct ata_port_operations sata_fsl_ops = {
25804+static const struct ata_port_operations sata_fsl_ops = {
25805 .inherits = &sata_pmp_port_ops,
25806
25807 .qc_defer = ata_std_qc_defer,
25808diff -urNp linux-2.6.32.43/drivers/ata/sata_inic162x.c linux-2.6.32.43/drivers/ata/sata_inic162x.c
25809--- linux-2.6.32.43/drivers/ata/sata_inic162x.c 2011-03-27 14:31:47.000000000 -0400
25810+++ linux-2.6.32.43/drivers/ata/sata_inic162x.c 2011-04-17 15:56:46.000000000 -0400
25811@@ -721,7 +721,7 @@ static int inic_port_start(struct ata_po
25812 return 0;
25813 }
25814
25815-static struct ata_port_operations inic_port_ops = {
25816+static const struct ata_port_operations inic_port_ops = {
25817 .inherits = &sata_port_ops,
25818
25819 .check_atapi_dma = inic_check_atapi_dma,
25820diff -urNp linux-2.6.32.43/drivers/ata/sata_mv.c linux-2.6.32.43/drivers/ata/sata_mv.c
25821--- linux-2.6.32.43/drivers/ata/sata_mv.c 2011-03-27 14:31:47.000000000 -0400
25822+++ linux-2.6.32.43/drivers/ata/sata_mv.c 2011-04-17 15:56:46.000000000 -0400
25823@@ -656,7 +656,7 @@ static struct scsi_host_template mv6_sht
25824 .dma_boundary = MV_DMA_BOUNDARY,
25825 };
25826
25827-static struct ata_port_operations mv5_ops = {
25828+static const struct ata_port_operations mv5_ops = {
25829 .inherits = &ata_sff_port_ops,
25830
25831 .lost_interrupt = ATA_OP_NULL,
25832@@ -678,7 +678,7 @@ static struct ata_port_operations mv5_op
25833 .port_stop = mv_port_stop,
25834 };
25835
25836-static struct ata_port_operations mv6_ops = {
25837+static const struct ata_port_operations mv6_ops = {
25838 .inherits = &mv5_ops,
25839 .dev_config = mv6_dev_config,
25840 .scr_read = mv_scr_read,
25841@@ -698,7 +698,7 @@ static struct ata_port_operations mv6_op
25842 .bmdma_status = mv_bmdma_status,
25843 };
25844
25845-static struct ata_port_operations mv_iie_ops = {
25846+static const struct ata_port_operations mv_iie_ops = {
25847 .inherits = &mv6_ops,
25848 .dev_config = ATA_OP_NULL,
25849 .qc_prep = mv_qc_prep_iie,
25850diff -urNp linux-2.6.32.43/drivers/ata/sata_nv.c linux-2.6.32.43/drivers/ata/sata_nv.c
25851--- linux-2.6.32.43/drivers/ata/sata_nv.c 2011-03-27 14:31:47.000000000 -0400
25852+++ linux-2.6.32.43/drivers/ata/sata_nv.c 2011-04-17 15:56:46.000000000 -0400
25853@@ -464,7 +464,7 @@ static struct scsi_host_template nv_swnc
25854 * cases. Define nv_hardreset() which only kicks in for post-boot
25855 * probing and use it for all variants.
25856 */
25857-static struct ata_port_operations nv_generic_ops = {
25858+static const struct ata_port_operations nv_generic_ops = {
25859 .inherits = &ata_bmdma_port_ops,
25860 .lost_interrupt = ATA_OP_NULL,
25861 .scr_read = nv_scr_read,
25862@@ -472,20 +472,20 @@ static struct ata_port_operations nv_gen
25863 .hardreset = nv_hardreset,
25864 };
25865
25866-static struct ata_port_operations nv_nf2_ops = {
25867+static const struct ata_port_operations nv_nf2_ops = {
25868 .inherits = &nv_generic_ops,
25869 .freeze = nv_nf2_freeze,
25870 .thaw = nv_nf2_thaw,
25871 };
25872
25873-static struct ata_port_operations nv_ck804_ops = {
25874+static const struct ata_port_operations nv_ck804_ops = {
25875 .inherits = &nv_generic_ops,
25876 .freeze = nv_ck804_freeze,
25877 .thaw = nv_ck804_thaw,
25878 .host_stop = nv_ck804_host_stop,
25879 };
25880
25881-static struct ata_port_operations nv_adma_ops = {
25882+static const struct ata_port_operations nv_adma_ops = {
25883 .inherits = &nv_ck804_ops,
25884
25885 .check_atapi_dma = nv_adma_check_atapi_dma,
25886@@ -509,7 +509,7 @@ static struct ata_port_operations nv_adm
25887 .host_stop = nv_adma_host_stop,
25888 };
25889
25890-static struct ata_port_operations nv_swncq_ops = {
25891+static const struct ata_port_operations nv_swncq_ops = {
25892 .inherits = &nv_generic_ops,
25893
25894 .qc_defer = ata_std_qc_defer,
25895diff -urNp linux-2.6.32.43/drivers/ata/sata_promise.c linux-2.6.32.43/drivers/ata/sata_promise.c
25896--- linux-2.6.32.43/drivers/ata/sata_promise.c 2011-03-27 14:31:47.000000000 -0400
25897+++ linux-2.6.32.43/drivers/ata/sata_promise.c 2011-04-17 15:56:46.000000000 -0400
25898@@ -195,7 +195,7 @@ static const struct ata_port_operations
25899 .error_handler = pdc_error_handler,
25900 };
25901
25902-static struct ata_port_operations pdc_sata_ops = {
25903+static const struct ata_port_operations pdc_sata_ops = {
25904 .inherits = &pdc_common_ops,
25905 .cable_detect = pdc_sata_cable_detect,
25906 .freeze = pdc_sata_freeze,
25907@@ -208,14 +208,14 @@ static struct ata_port_operations pdc_sa
25908
25909 /* First-generation chips need a more restrictive ->check_atapi_dma op,
25910 and ->freeze/thaw that ignore the hotplug controls. */
25911-static struct ata_port_operations pdc_old_sata_ops = {
25912+static const struct ata_port_operations pdc_old_sata_ops = {
25913 .inherits = &pdc_sata_ops,
25914 .freeze = pdc_freeze,
25915 .thaw = pdc_thaw,
25916 .check_atapi_dma = pdc_old_sata_check_atapi_dma,
25917 };
25918
25919-static struct ata_port_operations pdc_pata_ops = {
25920+static const struct ata_port_operations pdc_pata_ops = {
25921 .inherits = &pdc_common_ops,
25922 .cable_detect = pdc_pata_cable_detect,
25923 .freeze = pdc_freeze,
25924diff -urNp linux-2.6.32.43/drivers/ata/sata_qstor.c linux-2.6.32.43/drivers/ata/sata_qstor.c
25925--- linux-2.6.32.43/drivers/ata/sata_qstor.c 2011-03-27 14:31:47.000000000 -0400
25926+++ linux-2.6.32.43/drivers/ata/sata_qstor.c 2011-04-17 15:56:46.000000000 -0400
25927@@ -132,7 +132,7 @@ static struct scsi_host_template qs_ata_
25928 .dma_boundary = QS_DMA_BOUNDARY,
25929 };
25930
25931-static struct ata_port_operations qs_ata_ops = {
25932+static const struct ata_port_operations qs_ata_ops = {
25933 .inherits = &ata_sff_port_ops,
25934
25935 .check_atapi_dma = qs_check_atapi_dma,
25936diff -urNp linux-2.6.32.43/drivers/ata/sata_sil24.c linux-2.6.32.43/drivers/ata/sata_sil24.c
25937--- linux-2.6.32.43/drivers/ata/sata_sil24.c 2011-03-27 14:31:47.000000000 -0400
25938+++ linux-2.6.32.43/drivers/ata/sata_sil24.c 2011-04-17 15:56:46.000000000 -0400
25939@@ -388,7 +388,7 @@ static struct scsi_host_template sil24_s
25940 .dma_boundary = ATA_DMA_BOUNDARY,
25941 };
25942
25943-static struct ata_port_operations sil24_ops = {
25944+static const struct ata_port_operations sil24_ops = {
25945 .inherits = &sata_pmp_port_ops,
25946
25947 .qc_defer = sil24_qc_defer,
25948diff -urNp linux-2.6.32.43/drivers/ata/sata_sil.c linux-2.6.32.43/drivers/ata/sata_sil.c
25949--- linux-2.6.32.43/drivers/ata/sata_sil.c 2011-03-27 14:31:47.000000000 -0400
25950+++ linux-2.6.32.43/drivers/ata/sata_sil.c 2011-04-17 15:56:46.000000000 -0400
25951@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht
25952 .sg_tablesize = ATA_MAX_PRD
25953 };
25954
25955-static struct ata_port_operations sil_ops = {
25956+static const struct ata_port_operations sil_ops = {
25957 .inherits = &ata_bmdma32_port_ops,
25958 .dev_config = sil_dev_config,
25959 .set_mode = sil_set_mode,
25960diff -urNp linux-2.6.32.43/drivers/ata/sata_sis.c linux-2.6.32.43/drivers/ata/sata_sis.c
25961--- linux-2.6.32.43/drivers/ata/sata_sis.c 2011-03-27 14:31:47.000000000 -0400
25962+++ linux-2.6.32.43/drivers/ata/sata_sis.c 2011-04-17 15:56:46.000000000 -0400
25963@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht
25964 ATA_BMDMA_SHT(DRV_NAME),
25965 };
25966
25967-static struct ata_port_operations sis_ops = {
25968+static const struct ata_port_operations sis_ops = {
25969 .inherits = &ata_bmdma_port_ops,
25970 .scr_read = sis_scr_read,
25971 .scr_write = sis_scr_write,
25972diff -urNp linux-2.6.32.43/drivers/ata/sata_svw.c linux-2.6.32.43/drivers/ata/sata_svw.c
25973--- linux-2.6.32.43/drivers/ata/sata_svw.c 2011-03-27 14:31:47.000000000 -0400
25974+++ linux-2.6.32.43/drivers/ata/sata_svw.c 2011-04-17 15:56:46.000000000 -0400
25975@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata
25976 };
25977
25978
25979-static struct ata_port_operations k2_sata_ops = {
25980+static const struct ata_port_operations k2_sata_ops = {
25981 .inherits = &ata_bmdma_port_ops,
25982 .sff_tf_load = k2_sata_tf_load,
25983 .sff_tf_read = k2_sata_tf_read,
25984diff -urNp linux-2.6.32.43/drivers/ata/sata_sx4.c linux-2.6.32.43/drivers/ata/sata_sx4.c
25985--- linux-2.6.32.43/drivers/ata/sata_sx4.c 2011-03-27 14:31:47.000000000 -0400
25986+++ linux-2.6.32.43/drivers/ata/sata_sx4.c 2011-04-17 15:56:46.000000000 -0400
25987@@ -248,7 +248,7 @@ static struct scsi_host_template pdc_sat
25988 };
25989
25990 /* TODO: inherit from base port_ops after converting to new EH */
25991-static struct ata_port_operations pdc_20621_ops = {
25992+static const struct ata_port_operations pdc_20621_ops = {
25993 .inherits = &ata_sff_port_ops,
25994
25995 .check_atapi_dma = pdc_check_atapi_dma,
25996diff -urNp linux-2.6.32.43/drivers/ata/sata_uli.c linux-2.6.32.43/drivers/ata/sata_uli.c
25997--- linux-2.6.32.43/drivers/ata/sata_uli.c 2011-03-27 14:31:47.000000000 -0400
25998+++ linux-2.6.32.43/drivers/ata/sata_uli.c 2011-04-17 15:56:46.000000000 -0400
25999@@ -79,7 +79,7 @@ static struct scsi_host_template uli_sht
26000 ATA_BMDMA_SHT(DRV_NAME),
26001 };
26002
26003-static struct ata_port_operations uli_ops = {
26004+static const struct ata_port_operations uli_ops = {
26005 .inherits = &ata_bmdma_port_ops,
26006 .scr_read = uli_scr_read,
26007 .scr_write = uli_scr_write,
26008diff -urNp linux-2.6.32.43/drivers/ata/sata_via.c linux-2.6.32.43/drivers/ata/sata_via.c
26009--- linux-2.6.32.43/drivers/ata/sata_via.c 2011-05-10 22:12:01.000000000 -0400
26010+++ linux-2.6.32.43/drivers/ata/sata_via.c 2011-05-10 22:15:08.000000000 -0400
26011@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh
26012 ATA_BMDMA_SHT(DRV_NAME),
26013 };
26014
26015-static struct ata_port_operations svia_base_ops = {
26016+static const struct ata_port_operations svia_base_ops = {
26017 .inherits = &ata_bmdma_port_ops,
26018 .sff_tf_load = svia_tf_load,
26019 };
26020
26021-static struct ata_port_operations vt6420_sata_ops = {
26022+static const struct ata_port_operations vt6420_sata_ops = {
26023 .inherits = &svia_base_ops,
26024 .freeze = svia_noop_freeze,
26025 .prereset = vt6420_prereset,
26026 .bmdma_start = vt6420_bmdma_start,
26027 };
26028
26029-static struct ata_port_operations vt6421_pata_ops = {
26030+static const struct ata_port_operations vt6421_pata_ops = {
26031 .inherits = &svia_base_ops,
26032 .cable_detect = vt6421_pata_cable_detect,
26033 .set_piomode = vt6421_set_pio_mode,
26034 .set_dmamode = vt6421_set_dma_mode,
26035 };
26036
26037-static struct ata_port_operations vt6421_sata_ops = {
26038+static const struct ata_port_operations vt6421_sata_ops = {
26039 .inherits = &svia_base_ops,
26040 .scr_read = svia_scr_read,
26041 .scr_write = svia_scr_write,
26042 };
26043
26044-static struct ata_port_operations vt8251_ops = {
26045+static const struct ata_port_operations vt8251_ops = {
26046 .inherits = &svia_base_ops,
26047 .hardreset = sata_std_hardreset,
26048 .scr_read = vt8251_scr_read,
26049diff -urNp linux-2.6.32.43/drivers/ata/sata_vsc.c linux-2.6.32.43/drivers/ata/sata_vsc.c
26050--- linux-2.6.32.43/drivers/ata/sata_vsc.c 2011-03-27 14:31:47.000000000 -0400
26051+++ linux-2.6.32.43/drivers/ata/sata_vsc.c 2011-04-17 15:56:46.000000000 -0400
26052@@ -306,7 +306,7 @@ static struct scsi_host_template vsc_sat
26053 };
26054
26055
26056-static struct ata_port_operations vsc_sata_ops = {
26057+static const struct ata_port_operations vsc_sata_ops = {
26058 .inherits = &ata_bmdma_port_ops,
26059 /* The IRQ handling is not quite standard SFF behaviour so we
26060 cannot use the default lost interrupt handler */
26061diff -urNp linux-2.6.32.43/drivers/atm/adummy.c linux-2.6.32.43/drivers/atm/adummy.c
26062--- linux-2.6.32.43/drivers/atm/adummy.c 2011-03-27 14:31:47.000000000 -0400
26063+++ linux-2.6.32.43/drivers/atm/adummy.c 2011-04-17 15:56:46.000000000 -0400
26064@@ -77,7 +77,7 @@ adummy_send(struct atm_vcc *vcc, struct
26065 vcc->pop(vcc, skb);
26066 else
26067 dev_kfree_skb_any(skb);
26068- atomic_inc(&vcc->stats->tx);
26069+ atomic_inc_unchecked(&vcc->stats->tx);
26070
26071 return 0;
26072 }
26073diff -urNp linux-2.6.32.43/drivers/atm/ambassador.c linux-2.6.32.43/drivers/atm/ambassador.c
26074--- linux-2.6.32.43/drivers/atm/ambassador.c 2011-03-27 14:31:47.000000000 -0400
26075+++ linux-2.6.32.43/drivers/atm/ambassador.c 2011-04-17 15:56:46.000000000 -0400
26076@@ -453,7 +453,7 @@ static void tx_complete (amb_dev * dev,
26077 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
26078
26079 // VC layer stats
26080- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26081+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26082
26083 // free the descriptor
26084 kfree (tx_descr);
26085@@ -494,7 +494,7 @@ static void rx_complete (amb_dev * dev,
26086 dump_skb ("<<<", vc, skb);
26087
26088 // VC layer stats
26089- atomic_inc(&atm_vcc->stats->rx);
26090+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26091 __net_timestamp(skb);
26092 // end of our responsability
26093 atm_vcc->push (atm_vcc, skb);
26094@@ -509,7 +509,7 @@ static void rx_complete (amb_dev * dev,
26095 } else {
26096 PRINTK (KERN_INFO, "dropped over-size frame");
26097 // should we count this?
26098- atomic_inc(&atm_vcc->stats->rx_drop);
26099+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26100 }
26101
26102 } else {
26103@@ -1341,7 +1341,7 @@ static int amb_send (struct atm_vcc * at
26104 }
26105
26106 if (check_area (skb->data, skb->len)) {
26107- atomic_inc(&atm_vcc->stats->tx_err);
26108+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
26109 return -ENOMEM; // ?
26110 }
26111
26112diff -urNp linux-2.6.32.43/drivers/atm/atmtcp.c linux-2.6.32.43/drivers/atm/atmtcp.c
26113--- linux-2.6.32.43/drivers/atm/atmtcp.c 2011-03-27 14:31:47.000000000 -0400
26114+++ linux-2.6.32.43/drivers/atm/atmtcp.c 2011-04-17 15:56:46.000000000 -0400
26115@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc
26116 if (vcc->pop) vcc->pop(vcc,skb);
26117 else dev_kfree_skb(skb);
26118 if (dev_data) return 0;
26119- atomic_inc(&vcc->stats->tx_err);
26120+ atomic_inc_unchecked(&vcc->stats->tx_err);
26121 return -ENOLINK;
26122 }
26123 size = skb->len+sizeof(struct atmtcp_hdr);
26124@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc
26125 if (!new_skb) {
26126 if (vcc->pop) vcc->pop(vcc,skb);
26127 else dev_kfree_skb(skb);
26128- atomic_inc(&vcc->stats->tx_err);
26129+ atomic_inc_unchecked(&vcc->stats->tx_err);
26130 return -ENOBUFS;
26131 }
26132 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
26133@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc
26134 if (vcc->pop) vcc->pop(vcc,skb);
26135 else dev_kfree_skb(skb);
26136 out_vcc->push(out_vcc,new_skb);
26137- atomic_inc(&vcc->stats->tx);
26138- atomic_inc(&out_vcc->stats->rx);
26139+ atomic_inc_unchecked(&vcc->stats->tx);
26140+ atomic_inc_unchecked(&out_vcc->stats->rx);
26141 return 0;
26142 }
26143
26144@@ -300,7 +300,7 @@ static int atmtcp_c_send(struct atm_vcc
26145 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
26146 read_unlock(&vcc_sklist_lock);
26147 if (!out_vcc) {
26148- atomic_inc(&vcc->stats->tx_err);
26149+ atomic_inc_unchecked(&vcc->stats->tx_err);
26150 goto done;
26151 }
26152 skb_pull(skb,sizeof(struct atmtcp_hdr));
26153@@ -312,8 +312,8 @@ static int atmtcp_c_send(struct atm_vcc
26154 __net_timestamp(new_skb);
26155 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
26156 out_vcc->push(out_vcc,new_skb);
26157- atomic_inc(&vcc->stats->tx);
26158- atomic_inc(&out_vcc->stats->rx);
26159+ atomic_inc_unchecked(&vcc->stats->tx);
26160+ atomic_inc_unchecked(&out_vcc->stats->rx);
26161 done:
26162 if (vcc->pop) vcc->pop(vcc,skb);
26163 else dev_kfree_skb(skb);
26164diff -urNp linux-2.6.32.43/drivers/atm/eni.c linux-2.6.32.43/drivers/atm/eni.c
26165--- linux-2.6.32.43/drivers/atm/eni.c 2011-03-27 14:31:47.000000000 -0400
26166+++ linux-2.6.32.43/drivers/atm/eni.c 2011-04-17 15:56:46.000000000 -0400
26167@@ -525,7 +525,7 @@ static int rx_aal0(struct atm_vcc *vcc)
26168 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
26169 vcc->dev->number);
26170 length = 0;
26171- atomic_inc(&vcc->stats->rx_err);
26172+ atomic_inc_unchecked(&vcc->stats->rx_err);
26173 }
26174 else {
26175 length = ATM_CELL_SIZE-1; /* no HEC */
26176@@ -580,7 +580,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26177 size);
26178 }
26179 eff = length = 0;
26180- atomic_inc(&vcc->stats->rx_err);
26181+ atomic_inc_unchecked(&vcc->stats->rx_err);
26182 }
26183 else {
26184 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
26185@@ -597,7 +597,7 @@ static int rx_aal5(struct atm_vcc *vcc)
26186 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
26187 vcc->dev->number,vcc->vci,length,size << 2,descr);
26188 length = eff = 0;
26189- atomic_inc(&vcc->stats->rx_err);
26190+ atomic_inc_unchecked(&vcc->stats->rx_err);
26191 }
26192 }
26193 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
26194@@ -770,7 +770,7 @@ rx_dequeued++;
26195 vcc->push(vcc,skb);
26196 pushed++;
26197 }
26198- atomic_inc(&vcc->stats->rx);
26199+ atomic_inc_unchecked(&vcc->stats->rx);
26200 }
26201 wake_up(&eni_dev->rx_wait);
26202 }
26203@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *d
26204 PCI_DMA_TODEVICE);
26205 if (vcc->pop) vcc->pop(vcc,skb);
26206 else dev_kfree_skb_irq(skb);
26207- atomic_inc(&vcc->stats->tx);
26208+ atomic_inc_unchecked(&vcc->stats->tx);
26209 wake_up(&eni_dev->tx_wait);
26210 dma_complete++;
26211 }
26212diff -urNp linux-2.6.32.43/drivers/atm/firestream.c linux-2.6.32.43/drivers/atm/firestream.c
26213--- linux-2.6.32.43/drivers/atm/firestream.c 2011-03-27 14:31:47.000000000 -0400
26214+++ linux-2.6.32.43/drivers/atm/firestream.c 2011-04-17 15:56:46.000000000 -0400
26215@@ -748,7 +748,7 @@ static void process_txdone_queue (struct
26216 }
26217 }
26218
26219- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26220+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26221
26222 fs_dprintk (FS_DEBUG_TXMEM, "i");
26223 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
26224@@ -815,7 +815,7 @@ static void process_incoming (struct fs_
26225 #endif
26226 skb_put (skb, qe->p1 & 0xffff);
26227 ATM_SKB(skb)->vcc = atm_vcc;
26228- atomic_inc(&atm_vcc->stats->rx);
26229+ atomic_inc_unchecked(&atm_vcc->stats->rx);
26230 __net_timestamp(skb);
26231 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
26232 atm_vcc->push (atm_vcc, skb);
26233@@ -836,12 +836,12 @@ static void process_incoming (struct fs_
26234 kfree (pe);
26235 }
26236 if (atm_vcc)
26237- atomic_inc(&atm_vcc->stats->rx_drop);
26238+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26239 break;
26240 case 0x1f: /* Reassembly abort: no buffers. */
26241 /* Silently increment error counter. */
26242 if (atm_vcc)
26243- atomic_inc(&atm_vcc->stats->rx_drop);
26244+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
26245 break;
26246 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
26247 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
26248diff -urNp linux-2.6.32.43/drivers/atm/fore200e.c linux-2.6.32.43/drivers/atm/fore200e.c
26249--- linux-2.6.32.43/drivers/atm/fore200e.c 2011-03-27 14:31:47.000000000 -0400
26250+++ linux-2.6.32.43/drivers/atm/fore200e.c 2011-04-17 15:56:46.000000000 -0400
26251@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200
26252 #endif
26253 /* check error condition */
26254 if (*entry->status & STATUS_ERROR)
26255- atomic_inc(&vcc->stats->tx_err);
26256+ atomic_inc_unchecked(&vcc->stats->tx_err);
26257 else
26258- atomic_inc(&vcc->stats->tx);
26259+ atomic_inc_unchecked(&vcc->stats->tx);
26260 }
26261 }
26262
26263@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore2
26264 if (skb == NULL) {
26265 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
26266
26267- atomic_inc(&vcc->stats->rx_drop);
26268+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26269 return -ENOMEM;
26270 }
26271
26272@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore2
26273
26274 dev_kfree_skb_any(skb);
26275
26276- atomic_inc(&vcc->stats->rx_drop);
26277+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26278 return -ENOMEM;
26279 }
26280
26281 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26282
26283 vcc->push(vcc, skb);
26284- atomic_inc(&vcc->stats->rx);
26285+ atomic_inc_unchecked(&vcc->stats->rx);
26286
26287 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
26288
26289@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200
26290 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
26291 fore200e->atm_dev->number,
26292 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
26293- atomic_inc(&vcc->stats->rx_err);
26294+ atomic_inc_unchecked(&vcc->stats->rx_err);
26295 }
26296 }
26297
26298@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struc
26299 goto retry_here;
26300 }
26301
26302- atomic_inc(&vcc->stats->tx_err);
26303+ atomic_inc_unchecked(&vcc->stats->tx_err);
26304
26305 fore200e->tx_sat++;
26306 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
26307diff -urNp linux-2.6.32.43/drivers/atm/he.c linux-2.6.32.43/drivers/atm/he.c
26308--- linux-2.6.32.43/drivers/atm/he.c 2011-03-27 14:31:47.000000000 -0400
26309+++ linux-2.6.32.43/drivers/atm/he.c 2011-04-17 15:56:46.000000000 -0400
26310@@ -1769,7 +1769,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26311
26312 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
26313 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
26314- atomic_inc(&vcc->stats->rx_drop);
26315+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26316 goto return_host_buffers;
26317 }
26318
26319@@ -1802,7 +1802,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26320 RBRQ_LEN_ERR(he_dev->rbrq_head)
26321 ? "LEN_ERR" : "",
26322 vcc->vpi, vcc->vci);
26323- atomic_inc(&vcc->stats->rx_err);
26324+ atomic_inc_unchecked(&vcc->stats->rx_err);
26325 goto return_host_buffers;
26326 }
26327
26328@@ -1861,7 +1861,7 @@ he_service_rbrq(struct he_dev *he_dev, i
26329 vcc->push(vcc, skb);
26330 spin_lock(&he_dev->global_lock);
26331
26332- atomic_inc(&vcc->stats->rx);
26333+ atomic_inc_unchecked(&vcc->stats->rx);
26334
26335 return_host_buffers:
26336 ++pdus_assembled;
26337@@ -2206,7 +2206,7 @@ __enqueue_tpd(struct he_dev *he_dev, str
26338 tpd->vcc->pop(tpd->vcc, tpd->skb);
26339 else
26340 dev_kfree_skb_any(tpd->skb);
26341- atomic_inc(&tpd->vcc->stats->tx_err);
26342+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
26343 }
26344 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
26345 return;
26346@@ -2618,7 +2618,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26347 vcc->pop(vcc, skb);
26348 else
26349 dev_kfree_skb_any(skb);
26350- atomic_inc(&vcc->stats->tx_err);
26351+ atomic_inc_unchecked(&vcc->stats->tx_err);
26352 return -EINVAL;
26353 }
26354
26355@@ -2629,7 +2629,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26356 vcc->pop(vcc, skb);
26357 else
26358 dev_kfree_skb_any(skb);
26359- atomic_inc(&vcc->stats->tx_err);
26360+ atomic_inc_unchecked(&vcc->stats->tx_err);
26361 return -EINVAL;
26362 }
26363 #endif
26364@@ -2641,7 +2641,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26365 vcc->pop(vcc, skb);
26366 else
26367 dev_kfree_skb_any(skb);
26368- atomic_inc(&vcc->stats->tx_err);
26369+ atomic_inc_unchecked(&vcc->stats->tx_err);
26370 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26371 return -ENOMEM;
26372 }
26373@@ -2683,7 +2683,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26374 vcc->pop(vcc, skb);
26375 else
26376 dev_kfree_skb_any(skb);
26377- atomic_inc(&vcc->stats->tx_err);
26378+ atomic_inc_unchecked(&vcc->stats->tx_err);
26379 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26380 return -ENOMEM;
26381 }
26382@@ -2714,7 +2714,7 @@ he_send(struct atm_vcc *vcc, struct sk_b
26383 __enqueue_tpd(he_dev, tpd, cid);
26384 spin_unlock_irqrestore(&he_dev->global_lock, flags);
26385
26386- atomic_inc(&vcc->stats->tx);
26387+ atomic_inc_unchecked(&vcc->stats->tx);
26388
26389 return 0;
26390 }
26391diff -urNp linux-2.6.32.43/drivers/atm/horizon.c linux-2.6.32.43/drivers/atm/horizon.c
26392--- linux-2.6.32.43/drivers/atm/horizon.c 2011-03-27 14:31:47.000000000 -0400
26393+++ linux-2.6.32.43/drivers/atm/horizon.c 2011-04-17 15:56:46.000000000 -0400
26394@@ -1033,7 +1033,7 @@ static void rx_schedule (hrz_dev * dev,
26395 {
26396 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
26397 // VC layer stats
26398- atomic_inc(&vcc->stats->rx);
26399+ atomic_inc_unchecked(&vcc->stats->rx);
26400 __net_timestamp(skb);
26401 // end of our responsability
26402 vcc->push (vcc, skb);
26403@@ -1185,7 +1185,7 @@ static void tx_schedule (hrz_dev * const
26404 dev->tx_iovec = NULL;
26405
26406 // VC layer stats
26407- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
26408+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
26409
26410 // free the skb
26411 hrz_kfree_skb (skb);
26412diff -urNp linux-2.6.32.43/drivers/atm/idt77252.c linux-2.6.32.43/drivers/atm/idt77252.c
26413--- linux-2.6.32.43/drivers/atm/idt77252.c 2011-03-27 14:31:47.000000000 -0400
26414+++ linux-2.6.32.43/drivers/atm/idt77252.c 2011-04-17 15:56:46.000000000 -0400
26415@@ -810,7 +810,7 @@ drain_scq(struct idt77252_dev *card, str
26416 else
26417 dev_kfree_skb(skb);
26418
26419- atomic_inc(&vcc->stats->tx);
26420+ atomic_inc_unchecked(&vcc->stats->tx);
26421 }
26422
26423 atomic_dec(&scq->used);
26424@@ -1073,13 +1073,13 @@ dequeue_rx(struct idt77252_dev *card, st
26425 if ((sb = dev_alloc_skb(64)) == NULL) {
26426 printk("%s: Can't allocate buffers for aal0.\n",
26427 card->name);
26428- atomic_add(i, &vcc->stats->rx_drop);
26429+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
26430 break;
26431 }
26432 if (!atm_charge(vcc, sb->truesize)) {
26433 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
26434 card->name);
26435- atomic_add(i - 1, &vcc->stats->rx_drop);
26436+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
26437 dev_kfree_skb(sb);
26438 break;
26439 }
26440@@ -1096,7 +1096,7 @@ dequeue_rx(struct idt77252_dev *card, st
26441 ATM_SKB(sb)->vcc = vcc;
26442 __net_timestamp(sb);
26443 vcc->push(vcc, sb);
26444- atomic_inc(&vcc->stats->rx);
26445+ atomic_inc_unchecked(&vcc->stats->rx);
26446
26447 cell += ATM_CELL_PAYLOAD;
26448 }
26449@@ -1133,13 +1133,13 @@ dequeue_rx(struct idt77252_dev *card, st
26450 "(CDC: %08x)\n",
26451 card->name, len, rpp->len, readl(SAR_REG_CDC));
26452 recycle_rx_pool_skb(card, rpp);
26453- atomic_inc(&vcc->stats->rx_err);
26454+ atomic_inc_unchecked(&vcc->stats->rx_err);
26455 return;
26456 }
26457 if (stat & SAR_RSQE_CRC) {
26458 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
26459 recycle_rx_pool_skb(card, rpp);
26460- atomic_inc(&vcc->stats->rx_err);
26461+ atomic_inc_unchecked(&vcc->stats->rx_err);
26462 return;
26463 }
26464 if (skb_queue_len(&rpp->queue) > 1) {
26465@@ -1150,7 +1150,7 @@ dequeue_rx(struct idt77252_dev *card, st
26466 RXPRINTK("%s: Can't alloc RX skb.\n",
26467 card->name);
26468 recycle_rx_pool_skb(card, rpp);
26469- atomic_inc(&vcc->stats->rx_err);
26470+ atomic_inc_unchecked(&vcc->stats->rx_err);
26471 return;
26472 }
26473 if (!atm_charge(vcc, skb->truesize)) {
26474@@ -1169,7 +1169,7 @@ dequeue_rx(struct idt77252_dev *card, st
26475 __net_timestamp(skb);
26476
26477 vcc->push(vcc, skb);
26478- atomic_inc(&vcc->stats->rx);
26479+ atomic_inc_unchecked(&vcc->stats->rx);
26480
26481 return;
26482 }
26483@@ -1191,7 +1191,7 @@ dequeue_rx(struct idt77252_dev *card, st
26484 __net_timestamp(skb);
26485
26486 vcc->push(vcc, skb);
26487- atomic_inc(&vcc->stats->rx);
26488+ atomic_inc_unchecked(&vcc->stats->rx);
26489
26490 if (skb->truesize > SAR_FB_SIZE_3)
26491 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
26492@@ -1303,14 +1303,14 @@ idt77252_rx_raw(struct idt77252_dev *car
26493 if (vcc->qos.aal != ATM_AAL0) {
26494 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
26495 card->name, vpi, vci);
26496- atomic_inc(&vcc->stats->rx_drop);
26497+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26498 goto drop;
26499 }
26500
26501 if ((sb = dev_alloc_skb(64)) == NULL) {
26502 printk("%s: Can't allocate buffers for AAL0.\n",
26503 card->name);
26504- atomic_inc(&vcc->stats->rx_err);
26505+ atomic_inc_unchecked(&vcc->stats->rx_err);
26506 goto drop;
26507 }
26508
26509@@ -1329,7 +1329,7 @@ idt77252_rx_raw(struct idt77252_dev *car
26510 ATM_SKB(sb)->vcc = vcc;
26511 __net_timestamp(sb);
26512 vcc->push(vcc, sb);
26513- atomic_inc(&vcc->stats->rx);
26514+ atomic_inc_unchecked(&vcc->stats->rx);
26515
26516 drop:
26517 skb_pull(queue, 64);
26518@@ -1954,13 +1954,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26519
26520 if (vc == NULL) {
26521 printk("%s: NULL connection in send().\n", card->name);
26522- atomic_inc(&vcc->stats->tx_err);
26523+ atomic_inc_unchecked(&vcc->stats->tx_err);
26524 dev_kfree_skb(skb);
26525 return -EINVAL;
26526 }
26527 if (!test_bit(VCF_TX, &vc->flags)) {
26528 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
26529- atomic_inc(&vcc->stats->tx_err);
26530+ atomic_inc_unchecked(&vcc->stats->tx_err);
26531 dev_kfree_skb(skb);
26532 return -EINVAL;
26533 }
26534@@ -1972,14 +1972,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26535 break;
26536 default:
26537 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
26538- atomic_inc(&vcc->stats->tx_err);
26539+ atomic_inc_unchecked(&vcc->stats->tx_err);
26540 dev_kfree_skb(skb);
26541 return -EINVAL;
26542 }
26543
26544 if (skb_shinfo(skb)->nr_frags != 0) {
26545 printk("%s: No scatter-gather yet.\n", card->name);
26546- atomic_inc(&vcc->stats->tx_err);
26547+ atomic_inc_unchecked(&vcc->stats->tx_err);
26548 dev_kfree_skb(skb);
26549 return -EINVAL;
26550 }
26551@@ -1987,7 +1987,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s
26552
26553 err = queue_skb(card, vc, skb, oam);
26554 if (err) {
26555- atomic_inc(&vcc->stats->tx_err);
26556+ atomic_inc_unchecked(&vcc->stats->tx_err);
26557 dev_kfree_skb(skb);
26558 return err;
26559 }
26560@@ -2010,7 +2010,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v
26561 skb = dev_alloc_skb(64);
26562 if (!skb) {
26563 printk("%s: Out of memory in send_oam().\n", card->name);
26564- atomic_inc(&vcc->stats->tx_err);
26565+ atomic_inc_unchecked(&vcc->stats->tx_err);
26566 return -ENOMEM;
26567 }
26568 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
26569diff -urNp linux-2.6.32.43/drivers/atm/iphase.c linux-2.6.32.43/drivers/atm/iphase.c
26570--- linux-2.6.32.43/drivers/atm/iphase.c 2011-03-27 14:31:47.000000000 -0400
26571+++ linux-2.6.32.43/drivers/atm/iphase.c 2011-04-17 15:56:46.000000000 -0400
26572@@ -1123,7 +1123,7 @@ static int rx_pkt(struct atm_dev *dev)
26573 status = (u_short) (buf_desc_ptr->desc_mode);
26574 if (status & (RX_CER | RX_PTE | RX_OFL))
26575 {
26576- atomic_inc(&vcc->stats->rx_err);
26577+ atomic_inc_unchecked(&vcc->stats->rx_err);
26578 IF_ERR(printk("IA: bad packet, dropping it");)
26579 if (status & RX_CER) {
26580 IF_ERR(printk(" cause: packet CRC error\n");)
26581@@ -1146,7 +1146,7 @@ static int rx_pkt(struct atm_dev *dev)
26582 len = dma_addr - buf_addr;
26583 if (len > iadev->rx_buf_sz) {
26584 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
26585- atomic_inc(&vcc->stats->rx_err);
26586+ atomic_inc_unchecked(&vcc->stats->rx_err);
26587 goto out_free_desc;
26588 }
26589
26590@@ -1296,7 +1296,7 @@ static void rx_dle_intr(struct atm_dev *
26591 ia_vcc = INPH_IA_VCC(vcc);
26592 if (ia_vcc == NULL)
26593 {
26594- atomic_inc(&vcc->stats->rx_err);
26595+ atomic_inc_unchecked(&vcc->stats->rx_err);
26596 dev_kfree_skb_any(skb);
26597 atm_return(vcc, atm_guess_pdu2truesize(len));
26598 goto INCR_DLE;
26599@@ -1308,7 +1308,7 @@ static void rx_dle_intr(struct atm_dev *
26600 if ((length > iadev->rx_buf_sz) || (length >
26601 (skb->len - sizeof(struct cpcs_trailer))))
26602 {
26603- atomic_inc(&vcc->stats->rx_err);
26604+ atomic_inc_unchecked(&vcc->stats->rx_err);
26605 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
26606 length, skb->len);)
26607 dev_kfree_skb_any(skb);
26608@@ -1324,7 +1324,7 @@ static void rx_dle_intr(struct atm_dev *
26609
26610 IF_RX(printk("rx_dle_intr: skb push");)
26611 vcc->push(vcc,skb);
26612- atomic_inc(&vcc->stats->rx);
26613+ atomic_inc_unchecked(&vcc->stats->rx);
26614 iadev->rx_pkt_cnt++;
26615 }
26616 INCR_DLE:
26617@@ -2806,15 +2806,15 @@ static int ia_ioctl(struct atm_dev *dev,
26618 {
26619 struct k_sonet_stats *stats;
26620 stats = &PRIV(_ia_dev[board])->sonet_stats;
26621- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
26622- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
26623- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
26624- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
26625- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
26626- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
26627- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
26628- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
26629- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
26630+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
26631+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
26632+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
26633+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
26634+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
26635+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
26636+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
26637+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
26638+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
26639 }
26640 ia_cmds.status = 0;
26641 break;
26642@@ -2919,7 +2919,7 @@ static int ia_pkt_tx (struct atm_vcc *vc
26643 if ((desc == 0) || (desc > iadev->num_tx_desc))
26644 {
26645 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
26646- atomic_inc(&vcc->stats->tx);
26647+ atomic_inc_unchecked(&vcc->stats->tx);
26648 if (vcc->pop)
26649 vcc->pop(vcc, skb);
26650 else
26651@@ -3024,14 +3024,14 @@ static int ia_pkt_tx (struct atm_vcc *vc
26652 ATM_DESC(skb) = vcc->vci;
26653 skb_queue_tail(&iadev->tx_dma_q, skb);
26654
26655- atomic_inc(&vcc->stats->tx);
26656+ atomic_inc_unchecked(&vcc->stats->tx);
26657 iadev->tx_pkt_cnt++;
26658 /* Increment transaction counter */
26659 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
26660
26661 #if 0
26662 /* add flow control logic */
26663- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
26664+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
26665 if (iavcc->vc_desc_cnt > 10) {
26666 vcc->tx_quota = vcc->tx_quota * 3 / 4;
26667 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
26668diff -urNp linux-2.6.32.43/drivers/atm/lanai.c linux-2.6.32.43/drivers/atm/lanai.c
26669--- linux-2.6.32.43/drivers/atm/lanai.c 2011-03-27 14:31:47.000000000 -0400
26670+++ linux-2.6.32.43/drivers/atm/lanai.c 2011-04-17 15:56:46.000000000 -0400
26671@@ -1305,7 +1305,7 @@ static void lanai_send_one_aal5(struct l
26672 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
26673 lanai_endtx(lanai, lvcc);
26674 lanai_free_skb(lvcc->tx.atmvcc, skb);
26675- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
26676+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
26677 }
26678
26679 /* Try to fill the buffer - don't call unless there is backlog */
26680@@ -1428,7 +1428,7 @@ static void vcc_rx_aal5(struct lanai_vcc
26681 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
26682 __net_timestamp(skb);
26683 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
26684- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
26685+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
26686 out:
26687 lvcc->rx.buf.ptr = end;
26688 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
26689@@ -1670,7 +1670,7 @@ static int handle_service(struct lanai_d
26690 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
26691 "vcc %d\n", lanai->number, (unsigned int) s, vci);
26692 lanai->stats.service_rxnotaal5++;
26693- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26694+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26695 return 0;
26696 }
26697 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
26698@@ -1682,7 +1682,7 @@ static int handle_service(struct lanai_d
26699 int bytes;
26700 read_unlock(&vcc_sklist_lock);
26701 DPRINTK("got trashed rx pdu on vci %d\n", vci);
26702- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26703+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26704 lvcc->stats.x.aal5.service_trash++;
26705 bytes = (SERVICE_GET_END(s) * 16) -
26706 (((unsigned long) lvcc->rx.buf.ptr) -
26707@@ -1694,7 +1694,7 @@ static int handle_service(struct lanai_d
26708 }
26709 if (s & SERVICE_STREAM) {
26710 read_unlock(&vcc_sklist_lock);
26711- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26712+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26713 lvcc->stats.x.aal5.service_stream++;
26714 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
26715 "PDU on VCI %d!\n", lanai->number, vci);
26716@@ -1702,7 +1702,7 @@ static int handle_service(struct lanai_d
26717 return 0;
26718 }
26719 DPRINTK("got rx crc error on vci %d\n", vci);
26720- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
26721+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
26722 lvcc->stats.x.aal5.service_rxcrc++;
26723 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
26724 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
26725diff -urNp linux-2.6.32.43/drivers/atm/nicstar.c linux-2.6.32.43/drivers/atm/nicstar.c
26726--- linux-2.6.32.43/drivers/atm/nicstar.c 2011-03-27 14:31:47.000000000 -0400
26727+++ linux-2.6.32.43/drivers/atm/nicstar.c 2011-04-17 15:56:46.000000000 -0400
26728@@ -1723,7 +1723,7 @@ static int ns_send(struct atm_vcc *vcc,
26729 if ((vc = (vc_map *) vcc->dev_data) == NULL)
26730 {
26731 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index);
26732- atomic_inc(&vcc->stats->tx_err);
26733+ atomic_inc_unchecked(&vcc->stats->tx_err);
26734 dev_kfree_skb_any(skb);
26735 return -EINVAL;
26736 }
26737@@ -1731,7 +1731,7 @@ static int ns_send(struct atm_vcc *vcc,
26738 if (!vc->tx)
26739 {
26740 printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index);
26741- atomic_inc(&vcc->stats->tx_err);
26742+ atomic_inc_unchecked(&vcc->stats->tx_err);
26743 dev_kfree_skb_any(skb);
26744 return -EINVAL;
26745 }
26746@@ -1739,7 +1739,7 @@ static int ns_send(struct atm_vcc *vcc,
26747 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0)
26748 {
26749 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index);
26750- atomic_inc(&vcc->stats->tx_err);
26751+ atomic_inc_unchecked(&vcc->stats->tx_err);
26752 dev_kfree_skb_any(skb);
26753 return -EINVAL;
26754 }
26755@@ -1747,7 +1747,7 @@ static int ns_send(struct atm_vcc *vcc,
26756 if (skb_shinfo(skb)->nr_frags != 0)
26757 {
26758 printk("nicstar%d: No scatter-gather yet.\n", card->index);
26759- atomic_inc(&vcc->stats->tx_err);
26760+ atomic_inc_unchecked(&vcc->stats->tx_err);
26761 dev_kfree_skb_any(skb);
26762 return -EINVAL;
26763 }
26764@@ -1792,11 +1792,11 @@ static int ns_send(struct atm_vcc *vcc,
26765
26766 if (push_scqe(card, vc, scq, &scqe, skb) != 0)
26767 {
26768- atomic_inc(&vcc->stats->tx_err);
26769+ atomic_inc_unchecked(&vcc->stats->tx_err);
26770 dev_kfree_skb_any(skb);
26771 return -EIO;
26772 }
26773- atomic_inc(&vcc->stats->tx);
26774+ atomic_inc_unchecked(&vcc->stats->tx);
26775
26776 return 0;
26777 }
26778@@ -2111,14 +2111,14 @@ static void dequeue_rx(ns_dev *card, ns_
26779 {
26780 printk("nicstar%d: Can't allocate buffers for aal0.\n",
26781 card->index);
26782- atomic_add(i,&vcc->stats->rx_drop);
26783+ atomic_add_unchecked(i,&vcc->stats->rx_drop);
26784 break;
26785 }
26786 if (!atm_charge(vcc, sb->truesize))
26787 {
26788 RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n",
26789 card->index);
26790- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26791+ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */
26792 dev_kfree_skb_any(sb);
26793 break;
26794 }
26795@@ -2133,7 +2133,7 @@ static void dequeue_rx(ns_dev *card, ns_
26796 ATM_SKB(sb)->vcc = vcc;
26797 __net_timestamp(sb);
26798 vcc->push(vcc, sb);
26799- atomic_inc(&vcc->stats->rx);
26800+ atomic_inc_unchecked(&vcc->stats->rx);
26801 cell += ATM_CELL_PAYLOAD;
26802 }
26803
26804@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev *card, ns_
26805 if (iovb == NULL)
26806 {
26807 printk("nicstar%d: Out of iovec buffers.\n", card->index);
26808- atomic_inc(&vcc->stats->rx_drop);
26809+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26810 recycle_rx_buf(card, skb);
26811 return;
26812 }
26813@@ -2182,7 +2182,7 @@ static void dequeue_rx(ns_dev *card, ns_
26814 else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS)
26815 {
26816 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
26817- atomic_inc(&vcc->stats->rx_err);
26818+ atomic_inc_unchecked(&vcc->stats->rx_err);
26819 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS);
26820 NS_SKB(iovb)->iovcnt = 0;
26821 iovb->len = 0;
26822@@ -2202,7 +2202,7 @@ static void dequeue_rx(ns_dev *card, ns_
26823 printk("nicstar%d: Expected a small buffer, and this is not one.\n",
26824 card->index);
26825 which_list(card, skb);
26826- atomic_inc(&vcc->stats->rx_err);
26827+ atomic_inc_unchecked(&vcc->stats->rx_err);
26828 recycle_rx_buf(card, skb);
26829 vc->rx_iov = NULL;
26830 recycle_iov_buf(card, iovb);
26831@@ -2216,7 +2216,7 @@ static void dequeue_rx(ns_dev *card, ns_
26832 printk("nicstar%d: Expected a large buffer, and this is not one.\n",
26833 card->index);
26834 which_list(card, skb);
26835- atomic_inc(&vcc->stats->rx_err);
26836+ atomic_inc_unchecked(&vcc->stats->rx_err);
26837 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26838 NS_SKB(iovb)->iovcnt);
26839 vc->rx_iov = NULL;
26840@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev *card, ns_
26841 printk(" - PDU size mismatch.\n");
26842 else
26843 printk(".\n");
26844- atomic_inc(&vcc->stats->rx_err);
26845+ atomic_inc_unchecked(&vcc->stats->rx_err);
26846 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26847 NS_SKB(iovb)->iovcnt);
26848 vc->rx_iov = NULL;
26849@@ -2256,7 +2256,7 @@ static void dequeue_rx(ns_dev *card, ns_
26850 if (!atm_charge(vcc, skb->truesize))
26851 {
26852 push_rxbufs(card, skb);
26853- atomic_inc(&vcc->stats->rx_drop);
26854+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26855 }
26856 else
26857 {
26858@@ -2268,7 +2268,7 @@ static void dequeue_rx(ns_dev *card, ns_
26859 ATM_SKB(skb)->vcc = vcc;
26860 __net_timestamp(skb);
26861 vcc->push(vcc, skb);
26862- atomic_inc(&vcc->stats->rx);
26863+ atomic_inc_unchecked(&vcc->stats->rx);
26864 }
26865 }
26866 else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */
26867@@ -2283,7 +2283,7 @@ static void dequeue_rx(ns_dev *card, ns_
26868 if (!atm_charge(vcc, sb->truesize))
26869 {
26870 push_rxbufs(card, sb);
26871- atomic_inc(&vcc->stats->rx_drop);
26872+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26873 }
26874 else
26875 {
26876@@ -2295,7 +2295,7 @@ static void dequeue_rx(ns_dev *card, ns_
26877 ATM_SKB(sb)->vcc = vcc;
26878 __net_timestamp(sb);
26879 vcc->push(vcc, sb);
26880- atomic_inc(&vcc->stats->rx);
26881+ atomic_inc_unchecked(&vcc->stats->rx);
26882 }
26883
26884 push_rxbufs(card, skb);
26885@@ -2306,7 +2306,7 @@ static void dequeue_rx(ns_dev *card, ns_
26886 if (!atm_charge(vcc, skb->truesize))
26887 {
26888 push_rxbufs(card, skb);
26889- atomic_inc(&vcc->stats->rx_drop);
26890+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26891 }
26892 else
26893 {
26894@@ -2320,7 +2320,7 @@ static void dequeue_rx(ns_dev *card, ns_
26895 ATM_SKB(skb)->vcc = vcc;
26896 __net_timestamp(skb);
26897 vcc->push(vcc, skb);
26898- atomic_inc(&vcc->stats->rx);
26899+ atomic_inc_unchecked(&vcc->stats->rx);
26900 }
26901
26902 push_rxbufs(card, sb);
26903@@ -2342,7 +2342,7 @@ static void dequeue_rx(ns_dev *card, ns_
26904 if (hb == NULL)
26905 {
26906 printk("nicstar%d: Out of huge buffers.\n", card->index);
26907- atomic_inc(&vcc->stats->rx_drop);
26908+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26909 recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data,
26910 NS_SKB(iovb)->iovcnt);
26911 vc->rx_iov = NULL;
26912@@ -2393,7 +2393,7 @@ static void dequeue_rx(ns_dev *card, ns_
26913 }
26914 else
26915 dev_kfree_skb_any(hb);
26916- atomic_inc(&vcc->stats->rx_drop);
26917+ atomic_inc_unchecked(&vcc->stats->rx_drop);
26918 }
26919 else
26920 {
26921@@ -2427,7 +2427,7 @@ static void dequeue_rx(ns_dev *card, ns_
26922 #endif /* NS_USE_DESTRUCTORS */
26923 __net_timestamp(hb);
26924 vcc->push(vcc, hb);
26925- atomic_inc(&vcc->stats->rx);
26926+ atomic_inc_unchecked(&vcc->stats->rx);
26927 }
26928 }
26929
26930diff -urNp linux-2.6.32.43/drivers/atm/solos-pci.c linux-2.6.32.43/drivers/atm/solos-pci.c
26931--- linux-2.6.32.43/drivers/atm/solos-pci.c 2011-04-17 17:00:52.000000000 -0400
26932+++ linux-2.6.32.43/drivers/atm/solos-pci.c 2011-05-16 21:46:57.000000000 -0400
26933@@ -708,7 +708,7 @@ void solos_bh(unsigned long card_arg)
26934 }
26935 atm_charge(vcc, skb->truesize);
26936 vcc->push(vcc, skb);
26937- atomic_inc(&vcc->stats->rx);
26938+ atomic_inc_unchecked(&vcc->stats->rx);
26939 break;
26940
26941 case PKT_STATUS:
26942@@ -914,6 +914,8 @@ static int print_buffer(struct sk_buff *
26943 char msg[500];
26944 char item[10];
26945
26946+ pax_track_stack();
26947+
26948 len = buf->len;
26949 for (i = 0; i < len; i++){
26950 if(i % 8 == 0)
26951@@ -1023,7 +1025,7 @@ static uint32_t fpga_tx(struct solos_car
26952 vcc = SKB_CB(oldskb)->vcc;
26953
26954 if (vcc) {
26955- atomic_inc(&vcc->stats->tx);
26956+ atomic_inc_unchecked(&vcc->stats->tx);
26957 solos_pop(vcc, oldskb);
26958 } else
26959 dev_kfree_skb_irq(oldskb);
26960diff -urNp linux-2.6.32.43/drivers/atm/suni.c linux-2.6.32.43/drivers/atm/suni.c
26961--- linux-2.6.32.43/drivers/atm/suni.c 2011-03-27 14:31:47.000000000 -0400
26962+++ linux-2.6.32.43/drivers/atm/suni.c 2011-04-17 15:56:46.000000000 -0400
26963@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
26964
26965
26966 #define ADD_LIMITED(s,v) \
26967- atomic_add((v),&stats->s); \
26968- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
26969+ atomic_add_unchecked((v),&stats->s); \
26970+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
26971
26972
26973 static void suni_hz(unsigned long from_timer)
26974diff -urNp linux-2.6.32.43/drivers/atm/uPD98402.c linux-2.6.32.43/drivers/atm/uPD98402.c
26975--- linux-2.6.32.43/drivers/atm/uPD98402.c 2011-03-27 14:31:47.000000000 -0400
26976+++ linux-2.6.32.43/drivers/atm/uPD98402.c 2011-04-17 15:56:46.000000000 -0400
26977@@ -41,7 +41,7 @@ static int fetch_stats(struct atm_dev *d
26978 struct sonet_stats tmp;
26979 int error = 0;
26980
26981- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26982+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
26983 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
26984 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
26985 if (zero && !error) {
26986@@ -160,9 +160,9 @@ static int uPD98402_ioctl(struct atm_dev
26987
26988
26989 #define ADD_LIMITED(s,v) \
26990- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
26991- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
26992- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26993+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
26994+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
26995+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
26996
26997
26998 static void stat_event(struct atm_dev *dev)
26999@@ -193,7 +193,7 @@ static void uPD98402_int(struct atm_dev
27000 if (reason & uPD98402_INT_PFM) stat_event(dev);
27001 if (reason & uPD98402_INT_PCO) {
27002 (void) GET(PCOCR); /* clear interrupt cause */
27003- atomic_add(GET(HECCT),
27004+ atomic_add_unchecked(GET(HECCT),
27005 &PRIV(dev)->sonet_stats.uncorr_hcs);
27006 }
27007 if ((reason & uPD98402_INT_RFO) &&
27008@@ -221,9 +221,9 @@ static int uPD98402_start(struct atm_dev
27009 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
27010 uPD98402_INT_LOS),PIMR); /* enable them */
27011 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
27012- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27013- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
27014- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
27015+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
27016+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
27017+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
27018 return 0;
27019 }
27020
27021diff -urNp linux-2.6.32.43/drivers/atm/zatm.c linux-2.6.32.43/drivers/atm/zatm.c
27022--- linux-2.6.32.43/drivers/atm/zatm.c 2011-03-27 14:31:47.000000000 -0400
27023+++ linux-2.6.32.43/drivers/atm/zatm.c 2011-04-17 15:56:46.000000000 -0400
27024@@ -458,7 +458,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27025 }
27026 if (!size) {
27027 dev_kfree_skb_irq(skb);
27028- if (vcc) atomic_inc(&vcc->stats->rx_err);
27029+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
27030 continue;
27031 }
27032 if (!atm_charge(vcc,skb->truesize)) {
27033@@ -468,7 +468,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy
27034 skb->len = size;
27035 ATM_SKB(skb)->vcc = vcc;
27036 vcc->push(vcc,skb);
27037- atomic_inc(&vcc->stats->rx);
27038+ atomic_inc_unchecked(&vcc->stats->rx);
27039 }
27040 zout(pos & 0xffff,MTA(mbx));
27041 #if 0 /* probably a stupid idea */
27042@@ -732,7 +732,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD
27043 skb_queue_head(&zatm_vcc->backlog,skb);
27044 break;
27045 }
27046- atomic_inc(&vcc->stats->tx);
27047+ atomic_inc_unchecked(&vcc->stats->tx);
27048 wake_up(&zatm_vcc->tx_wait);
27049 }
27050
27051diff -urNp linux-2.6.32.43/drivers/base/bus.c linux-2.6.32.43/drivers/base/bus.c
27052--- linux-2.6.32.43/drivers/base/bus.c 2011-03-27 14:31:47.000000000 -0400
27053+++ linux-2.6.32.43/drivers/base/bus.c 2011-04-17 15:56:46.000000000 -0400
27054@@ -70,7 +70,7 @@ static ssize_t drv_attr_store(struct kob
27055 return ret;
27056 }
27057
27058-static struct sysfs_ops driver_sysfs_ops = {
27059+static const struct sysfs_ops driver_sysfs_ops = {
27060 .show = drv_attr_show,
27061 .store = drv_attr_store,
27062 };
27063@@ -115,7 +115,7 @@ static ssize_t bus_attr_store(struct kob
27064 return ret;
27065 }
27066
27067-static struct sysfs_ops bus_sysfs_ops = {
27068+static const struct sysfs_ops bus_sysfs_ops = {
27069 .show = bus_attr_show,
27070 .store = bus_attr_store,
27071 };
27072@@ -154,7 +154,7 @@ static int bus_uevent_filter(struct kset
27073 return 0;
27074 }
27075
27076-static struct kset_uevent_ops bus_uevent_ops = {
27077+static const struct kset_uevent_ops bus_uevent_ops = {
27078 .filter = bus_uevent_filter,
27079 };
27080
27081diff -urNp linux-2.6.32.43/drivers/base/class.c linux-2.6.32.43/drivers/base/class.c
27082--- linux-2.6.32.43/drivers/base/class.c 2011-03-27 14:31:47.000000000 -0400
27083+++ linux-2.6.32.43/drivers/base/class.c 2011-04-17 15:56:46.000000000 -0400
27084@@ -63,7 +63,7 @@ static void class_release(struct kobject
27085 kfree(cp);
27086 }
27087
27088-static struct sysfs_ops class_sysfs_ops = {
27089+static const struct sysfs_ops class_sysfs_ops = {
27090 .show = class_attr_show,
27091 .store = class_attr_store,
27092 };
27093diff -urNp linux-2.6.32.43/drivers/base/core.c linux-2.6.32.43/drivers/base/core.c
27094--- linux-2.6.32.43/drivers/base/core.c 2011-03-27 14:31:47.000000000 -0400
27095+++ linux-2.6.32.43/drivers/base/core.c 2011-04-17 15:56:46.000000000 -0400
27096@@ -100,7 +100,7 @@ static ssize_t dev_attr_store(struct kob
27097 return ret;
27098 }
27099
27100-static struct sysfs_ops dev_sysfs_ops = {
27101+static const struct sysfs_ops dev_sysfs_ops = {
27102 .show = dev_attr_show,
27103 .store = dev_attr_store,
27104 };
27105@@ -252,7 +252,7 @@ static int dev_uevent(struct kset *kset,
27106 return retval;
27107 }
27108
27109-static struct kset_uevent_ops device_uevent_ops = {
27110+static const struct kset_uevent_ops device_uevent_ops = {
27111 .filter = dev_uevent_filter,
27112 .name = dev_uevent_name,
27113 .uevent = dev_uevent,
27114diff -urNp linux-2.6.32.43/drivers/base/memory.c linux-2.6.32.43/drivers/base/memory.c
27115--- linux-2.6.32.43/drivers/base/memory.c 2011-03-27 14:31:47.000000000 -0400
27116+++ linux-2.6.32.43/drivers/base/memory.c 2011-04-17 15:56:46.000000000 -0400
27117@@ -44,7 +44,7 @@ static int memory_uevent(struct kset *ks
27118 return retval;
27119 }
27120
27121-static struct kset_uevent_ops memory_uevent_ops = {
27122+static const struct kset_uevent_ops memory_uevent_ops = {
27123 .name = memory_uevent_name,
27124 .uevent = memory_uevent,
27125 };
27126diff -urNp linux-2.6.32.43/drivers/base/sys.c linux-2.6.32.43/drivers/base/sys.c
27127--- linux-2.6.32.43/drivers/base/sys.c 2011-03-27 14:31:47.000000000 -0400
27128+++ linux-2.6.32.43/drivers/base/sys.c 2011-04-17 15:56:46.000000000 -0400
27129@@ -54,7 +54,7 @@ sysdev_store(struct kobject *kobj, struc
27130 return -EIO;
27131 }
27132
27133-static struct sysfs_ops sysfs_ops = {
27134+static const struct sysfs_ops sysfs_ops = {
27135 .show = sysdev_show,
27136 .store = sysdev_store,
27137 };
27138@@ -104,7 +104,7 @@ static ssize_t sysdev_class_store(struct
27139 return -EIO;
27140 }
27141
27142-static struct sysfs_ops sysfs_class_ops = {
27143+static const struct sysfs_ops sysfs_class_ops = {
27144 .show = sysdev_class_show,
27145 .store = sysdev_class_store,
27146 };
27147diff -urNp linux-2.6.32.43/drivers/block/cciss.c linux-2.6.32.43/drivers/block/cciss.c
27148--- linux-2.6.32.43/drivers/block/cciss.c 2011-03-27 14:31:47.000000000 -0400
27149+++ linux-2.6.32.43/drivers/block/cciss.c 2011-08-05 20:33:55.000000000 -0400
27150@@ -1011,6 +1011,8 @@ static int cciss_ioctl32_passthru(struct
27151 int err;
27152 u32 cp;
27153
27154+ memset(&arg64, 0, sizeof(arg64));
27155+
27156 err = 0;
27157 err |=
27158 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
27159@@ -2852,7 +2854,7 @@ static unsigned long pollcomplete(int ct
27160 /* Wait (up to 20 seconds) for a command to complete */
27161
27162 for (i = 20 * HZ; i > 0; i--) {
27163- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27164+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27165 if (done == FIFO_EMPTY)
27166 schedule_timeout_uninterruptible(1);
27167 else
27168@@ -2876,7 +2878,7 @@ static int sendcmd_core(ctlr_info_t *h,
27169 resend_cmd1:
27170
27171 /* Disable interrupt on the board. */
27172- h->access.set_intr_mask(h, CCISS_INTR_OFF);
27173+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
27174
27175 /* Make sure there is room in the command FIFO */
27176 /* Actually it should be completely empty at this time */
27177@@ -2884,13 +2886,13 @@ resend_cmd1:
27178 /* tape side of the driver. */
27179 for (i = 200000; i > 0; i--) {
27180 /* if fifo isn't full go */
27181- if (!(h->access.fifo_full(h)))
27182+ if (!(h->access->fifo_full(h)))
27183 break;
27184 udelay(10);
27185 printk(KERN_WARNING "cciss cciss%d: SendCmd FIFO full,"
27186 " waiting!\n", h->ctlr);
27187 }
27188- h->access.submit_command(h, c); /* Send the cmd */
27189+ h->access->submit_command(h, c); /* Send the cmd */
27190 do {
27191 complete = pollcomplete(h->ctlr);
27192
27193@@ -3023,7 +3025,7 @@ static void start_io(ctlr_info_t *h)
27194 while (!hlist_empty(&h->reqQ)) {
27195 c = hlist_entry(h->reqQ.first, CommandList_struct, list);
27196 /* can't do anything if fifo is full */
27197- if ((h->access.fifo_full(h))) {
27198+ if ((h->access->fifo_full(h))) {
27199 printk(KERN_WARNING "cciss: fifo full\n");
27200 break;
27201 }
27202@@ -3033,7 +3035,7 @@ static void start_io(ctlr_info_t *h)
27203 h->Qdepth--;
27204
27205 /* Tell the controller execute command */
27206- h->access.submit_command(h, c);
27207+ h->access->submit_command(h, c);
27208
27209 /* Put job onto the completed Q */
27210 addQ(&h->cmpQ, c);
27211@@ -3393,17 +3395,17 @@ startio:
27212
27213 static inline unsigned long get_next_completion(ctlr_info_t *h)
27214 {
27215- return h->access.command_completed(h);
27216+ return h->access->command_completed(h);
27217 }
27218
27219 static inline int interrupt_pending(ctlr_info_t *h)
27220 {
27221- return h->access.intr_pending(h);
27222+ return h->access->intr_pending(h);
27223 }
27224
27225 static inline long interrupt_not_for_us(ctlr_info_t *h)
27226 {
27227- return (((h->access.intr_pending(h) == 0) ||
27228+ return (((h->access->intr_pending(h) == 0) ||
27229 (h->interrupts_enabled == 0)));
27230 }
27231
27232@@ -3892,7 +3894,7 @@ static int __devinit cciss_pci_init(ctlr
27233 */
27234 c->max_commands = readl(&(c->cfgtable->CmdsOutMax));
27235 c->product_name = products[prod_index].product_name;
27236- c->access = *(products[prod_index].access);
27237+ c->access = products[prod_index].access;
27238 c->nr_cmds = c->max_commands - 4;
27239 if ((readb(&c->cfgtable->Signature[0]) != 'C') ||
27240 (readb(&c->cfgtable->Signature[1]) != 'I') ||
27241@@ -4291,7 +4293,7 @@ static int __devinit cciss_init_one(stru
27242 }
27243
27244 /* make sure the board interrupts are off */
27245- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_OFF);
27246+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_OFF);
27247 if (request_irq(hba[i]->intr[SIMPLE_MODE_INT], do_cciss_intr,
27248 IRQF_DISABLED | IRQF_SHARED, hba[i]->devname, hba[i])) {
27249 printk(KERN_ERR "cciss: Unable to get irq %d for %s\n",
27250@@ -4341,7 +4343,7 @@ static int __devinit cciss_init_one(stru
27251 cciss_scsi_setup(i);
27252
27253 /* Turn the interrupts on so we can service requests */
27254- hba[i]->access.set_intr_mask(hba[i], CCISS_INTR_ON);
27255+ hba[i]->access->set_intr_mask(hba[i], CCISS_INTR_ON);
27256
27257 /* Get the firmware version */
27258 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
27259diff -urNp linux-2.6.32.43/drivers/block/cciss.h linux-2.6.32.43/drivers/block/cciss.h
27260--- linux-2.6.32.43/drivers/block/cciss.h 2011-04-17 17:00:52.000000000 -0400
27261+++ linux-2.6.32.43/drivers/block/cciss.h 2011-08-05 20:33:55.000000000 -0400
27262@@ -90,7 +90,7 @@ struct ctlr_info
27263 // information about each logical volume
27264 drive_info_struct *drv[CISS_MAX_LUN];
27265
27266- struct access_method access;
27267+ struct access_method *access;
27268
27269 /* queue and queue Info */
27270 struct hlist_head reqQ;
27271diff -urNp linux-2.6.32.43/drivers/block/cpqarray.c linux-2.6.32.43/drivers/block/cpqarray.c
27272--- linux-2.6.32.43/drivers/block/cpqarray.c 2011-03-27 14:31:47.000000000 -0400
27273+++ linux-2.6.32.43/drivers/block/cpqarray.c 2011-08-05 20:33:55.000000000 -0400
27274@@ -402,7 +402,7 @@ static int __init cpqarray_register_ctlr
27275 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
27276 goto Enomem4;
27277 }
27278- hba[i]->access.set_intr_mask(hba[i], 0);
27279+ hba[i]->access->set_intr_mask(hba[i], 0);
27280 if (request_irq(hba[i]->intr, do_ida_intr,
27281 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
27282 {
27283@@ -460,7 +460,7 @@ static int __init cpqarray_register_ctlr
27284 add_timer(&hba[i]->timer);
27285
27286 /* Enable IRQ now that spinlock and rate limit timer are set up */
27287- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27288+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
27289
27290 for(j=0; j<NWD; j++) {
27291 struct gendisk *disk = ida_gendisk[i][j];
27292@@ -695,7 +695,7 @@ DBGINFO(
27293 for(i=0; i<NR_PRODUCTS; i++) {
27294 if (board_id == products[i].board_id) {
27295 c->product_name = products[i].product_name;
27296- c->access = *(products[i].access);
27297+ c->access = products[i].access;
27298 break;
27299 }
27300 }
27301@@ -793,7 +793,7 @@ static int __init cpqarray_eisa_detect(v
27302 hba[ctlr]->intr = intr;
27303 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
27304 hba[ctlr]->product_name = products[j].product_name;
27305- hba[ctlr]->access = *(products[j].access);
27306+ hba[ctlr]->access = products[j].access;
27307 hba[ctlr]->ctlr = ctlr;
27308 hba[ctlr]->board_id = board_id;
27309 hba[ctlr]->pci_dev = NULL; /* not PCI */
27310@@ -896,6 +896,8 @@ static void do_ida_request(struct reques
27311 struct scatterlist tmp_sg[SG_MAX];
27312 int i, dir, seg;
27313
27314+ pax_track_stack();
27315+
27316 if (blk_queue_plugged(q))
27317 goto startio;
27318
27319@@ -968,7 +970,7 @@ static void start_io(ctlr_info_t *h)
27320
27321 while((c = h->reqQ) != NULL) {
27322 /* Can't do anything if we're busy */
27323- if (h->access.fifo_full(h) == 0)
27324+ if (h->access->fifo_full(h) == 0)
27325 return;
27326
27327 /* Get the first entry from the request Q */
27328@@ -976,7 +978,7 @@ static void start_io(ctlr_info_t *h)
27329 h->Qdepth--;
27330
27331 /* Tell the controller to do our bidding */
27332- h->access.submit_command(h, c);
27333+ h->access->submit_command(h, c);
27334
27335 /* Get onto the completion Q */
27336 addQ(&h->cmpQ, c);
27337@@ -1038,7 +1040,7 @@ static irqreturn_t do_ida_intr(int irq,
27338 unsigned long flags;
27339 __u32 a,a1;
27340
27341- istat = h->access.intr_pending(h);
27342+ istat = h->access->intr_pending(h);
27343 /* Is this interrupt for us? */
27344 if (istat == 0)
27345 return IRQ_NONE;
27346@@ -1049,7 +1051,7 @@ static irqreturn_t do_ida_intr(int irq,
27347 */
27348 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
27349 if (istat & FIFO_NOT_EMPTY) {
27350- while((a = h->access.command_completed(h))) {
27351+ while((a = h->access->command_completed(h))) {
27352 a1 = a; a &= ~3;
27353 if ((c = h->cmpQ) == NULL)
27354 {
27355@@ -1434,11 +1436,11 @@ static int sendcmd(
27356 /*
27357 * Disable interrupt
27358 */
27359- info_p->access.set_intr_mask(info_p, 0);
27360+ info_p->access->set_intr_mask(info_p, 0);
27361 /* Make sure there is room in the command FIFO */
27362 /* Actually it should be completely empty at this time. */
27363 for (i = 200000; i > 0; i--) {
27364- temp = info_p->access.fifo_full(info_p);
27365+ temp = info_p->access->fifo_full(info_p);
27366 if (temp != 0) {
27367 break;
27368 }
27369@@ -1451,7 +1453,7 @@ DBG(
27370 /*
27371 * Send the cmd
27372 */
27373- info_p->access.submit_command(info_p, c);
27374+ info_p->access->submit_command(info_p, c);
27375 complete = pollcomplete(ctlr);
27376
27377 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
27378@@ -1534,9 +1536,9 @@ static int revalidate_allvol(ctlr_info_t
27379 * we check the new geometry. Then turn interrupts back on when
27380 * we're done.
27381 */
27382- host->access.set_intr_mask(host, 0);
27383+ host->access->set_intr_mask(host, 0);
27384 getgeometry(ctlr);
27385- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
27386+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
27387
27388 for(i=0; i<NWD; i++) {
27389 struct gendisk *disk = ida_gendisk[ctlr][i];
27390@@ -1576,7 +1578,7 @@ static int pollcomplete(int ctlr)
27391 /* Wait (up to 2 seconds) for a command to complete */
27392
27393 for (i = 200000; i > 0; i--) {
27394- done = hba[ctlr]->access.command_completed(hba[ctlr]);
27395+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
27396 if (done == 0) {
27397 udelay(10); /* a short fixed delay */
27398 } else
27399diff -urNp linux-2.6.32.43/drivers/block/cpqarray.h linux-2.6.32.43/drivers/block/cpqarray.h
27400--- linux-2.6.32.43/drivers/block/cpqarray.h 2011-03-27 14:31:47.000000000 -0400
27401+++ linux-2.6.32.43/drivers/block/cpqarray.h 2011-08-05 20:33:55.000000000 -0400
27402@@ -99,7 +99,7 @@ struct ctlr_info {
27403 drv_info_t drv[NWD];
27404 struct proc_dir_entry *proc;
27405
27406- struct access_method access;
27407+ struct access_method *access;
27408
27409 cmdlist_t *reqQ;
27410 cmdlist_t *cmpQ;
27411diff -urNp linux-2.6.32.43/drivers/block/DAC960.c linux-2.6.32.43/drivers/block/DAC960.c
27412--- linux-2.6.32.43/drivers/block/DAC960.c 2011-03-27 14:31:47.000000000 -0400
27413+++ linux-2.6.32.43/drivers/block/DAC960.c 2011-05-16 21:46:57.000000000 -0400
27414@@ -1973,6 +1973,8 @@ static bool DAC960_V1_ReadDeviceConfigur
27415 unsigned long flags;
27416 int Channel, TargetID;
27417
27418+ pax_track_stack();
27419+
27420 if (!init_dma_loaf(Controller->PCIDevice, &local_dma,
27421 DAC960_V1_MaxChannels*(sizeof(DAC960_V1_DCDB_T) +
27422 sizeof(DAC960_SCSI_Inquiry_T) +
27423diff -urNp linux-2.6.32.43/drivers/block/nbd.c linux-2.6.32.43/drivers/block/nbd.c
27424--- linux-2.6.32.43/drivers/block/nbd.c 2011-06-25 12:55:34.000000000 -0400
27425+++ linux-2.6.32.43/drivers/block/nbd.c 2011-06-25 12:56:37.000000000 -0400
27426@@ -155,6 +155,8 @@ static int sock_xmit(struct nbd_device *
27427 struct kvec iov;
27428 sigset_t blocked, oldset;
27429
27430+ pax_track_stack();
27431+
27432 if (unlikely(!sock)) {
27433 printk(KERN_ERR "%s: Attempted %s on closed socket in sock_xmit\n",
27434 lo->disk->disk_name, (send ? "send" : "recv"));
27435@@ -569,6 +571,8 @@ static void do_nbd_request(struct reques
27436 static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo,
27437 unsigned int cmd, unsigned long arg)
27438 {
27439+ pax_track_stack();
27440+
27441 switch (cmd) {
27442 case NBD_DISCONNECT: {
27443 struct request sreq;
27444diff -urNp linux-2.6.32.43/drivers/block/pktcdvd.c linux-2.6.32.43/drivers/block/pktcdvd.c
27445--- linux-2.6.32.43/drivers/block/pktcdvd.c 2011-03-27 14:31:47.000000000 -0400
27446+++ linux-2.6.32.43/drivers/block/pktcdvd.c 2011-04-17 15:56:46.000000000 -0400
27447@@ -284,7 +284,7 @@ static ssize_t kobj_pkt_store(struct kob
27448 return len;
27449 }
27450
27451-static struct sysfs_ops kobj_pkt_ops = {
27452+static const struct sysfs_ops kobj_pkt_ops = {
27453 .show = kobj_pkt_show,
27454 .store = kobj_pkt_store
27455 };
27456diff -urNp linux-2.6.32.43/drivers/char/agp/frontend.c linux-2.6.32.43/drivers/char/agp/frontend.c
27457--- linux-2.6.32.43/drivers/char/agp/frontend.c 2011-03-27 14:31:47.000000000 -0400
27458+++ linux-2.6.32.43/drivers/char/agp/frontend.c 2011-04-17 15:56:46.000000000 -0400
27459@@ -824,7 +824,7 @@ static int agpioc_reserve_wrap(struct ag
27460 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
27461 return -EFAULT;
27462
27463- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
27464+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
27465 return -EFAULT;
27466
27467 client = agp_find_client_by_pid(reserve.pid);
27468diff -urNp linux-2.6.32.43/drivers/char/briq_panel.c linux-2.6.32.43/drivers/char/briq_panel.c
27469--- linux-2.6.32.43/drivers/char/briq_panel.c 2011-03-27 14:31:47.000000000 -0400
27470+++ linux-2.6.32.43/drivers/char/briq_panel.c 2011-04-18 19:48:57.000000000 -0400
27471@@ -10,6 +10,7 @@
27472 #include <linux/types.h>
27473 #include <linux/errno.h>
27474 #include <linux/tty.h>
27475+#include <linux/mutex.h>
27476 #include <linux/timer.h>
27477 #include <linux/kernel.h>
27478 #include <linux/wait.h>
27479@@ -36,6 +37,7 @@ static int vfd_is_open;
27480 static unsigned char vfd[40];
27481 static int vfd_cursor;
27482 static unsigned char ledpb, led;
27483+static DEFINE_MUTEX(vfd_mutex);
27484
27485 static void update_vfd(void)
27486 {
27487@@ -142,12 +144,15 @@ static ssize_t briq_panel_write(struct f
27488 if (!vfd_is_open)
27489 return -EBUSY;
27490
27491+ mutex_lock(&vfd_mutex);
27492 for (;;) {
27493 char c;
27494 if (!indx)
27495 break;
27496- if (get_user(c, buf))
27497+ if (get_user(c, buf)) {
27498+ mutex_unlock(&vfd_mutex);
27499 return -EFAULT;
27500+ }
27501 if (esc) {
27502 set_led(c);
27503 esc = 0;
27504@@ -177,6 +182,7 @@ static ssize_t briq_panel_write(struct f
27505 buf++;
27506 }
27507 update_vfd();
27508+ mutex_unlock(&vfd_mutex);
27509
27510 return len;
27511 }
27512diff -urNp linux-2.6.32.43/drivers/char/genrtc.c linux-2.6.32.43/drivers/char/genrtc.c
27513--- linux-2.6.32.43/drivers/char/genrtc.c 2011-03-27 14:31:47.000000000 -0400
27514+++ linux-2.6.32.43/drivers/char/genrtc.c 2011-04-18 19:45:42.000000000 -0400
27515@@ -272,6 +272,7 @@ static int gen_rtc_ioctl(struct inode *i
27516 switch (cmd) {
27517
27518 case RTC_PLL_GET:
27519+ memset(&pll, 0, sizeof(pll));
27520 if (get_rtc_pll(&pll))
27521 return -EINVAL;
27522 else
27523diff -urNp linux-2.6.32.43/drivers/char/hpet.c linux-2.6.32.43/drivers/char/hpet.c
27524--- linux-2.6.32.43/drivers/char/hpet.c 2011-03-27 14:31:47.000000000 -0400
27525+++ linux-2.6.32.43/drivers/char/hpet.c 2011-04-23 12:56:11.000000000 -0400
27526@@ -430,7 +430,7 @@ static int hpet_release(struct inode *in
27527 return 0;
27528 }
27529
27530-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int);
27531+static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int);
27532
27533 static int
27534 hpet_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
27535@@ -565,7 +565,7 @@ static inline unsigned long hpet_time_di
27536 }
27537
27538 static int
27539-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel)
27540+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel)
27541 {
27542 struct hpet_timer __iomem *timer;
27543 struct hpet __iomem *hpet;
27544@@ -608,11 +608,11 @@ hpet_ioctl_common(struct hpet_dev *devp,
27545 {
27546 struct hpet_info info;
27547
27548+ memset(&info, 0, sizeof(info));
27549+
27550 if (devp->hd_ireqfreq)
27551 info.hi_ireqfreq =
27552 hpet_time_div(hpetp, devp->hd_ireqfreq);
27553- else
27554- info.hi_ireqfreq = 0;
27555 info.hi_flags =
27556 readq(&timer->hpet_config) & Tn_PER_INT_CAP_MASK;
27557 info.hi_hpet = hpetp->hp_which;
27558diff -urNp linux-2.6.32.43/drivers/char/hvc_beat.c linux-2.6.32.43/drivers/char/hvc_beat.c
27559--- linux-2.6.32.43/drivers/char/hvc_beat.c 2011-03-27 14:31:47.000000000 -0400
27560+++ linux-2.6.32.43/drivers/char/hvc_beat.c 2011-04-17 15:56:46.000000000 -0400
27561@@ -84,7 +84,7 @@ static int hvc_beat_put_chars(uint32_t v
27562 return cnt;
27563 }
27564
27565-static struct hv_ops hvc_beat_get_put_ops = {
27566+static const struct hv_ops hvc_beat_get_put_ops = {
27567 .get_chars = hvc_beat_get_chars,
27568 .put_chars = hvc_beat_put_chars,
27569 };
27570diff -urNp linux-2.6.32.43/drivers/char/hvc_console.c linux-2.6.32.43/drivers/char/hvc_console.c
27571--- linux-2.6.32.43/drivers/char/hvc_console.c 2011-03-27 14:31:47.000000000 -0400
27572+++ linux-2.6.32.43/drivers/char/hvc_console.c 2011-04-17 15:56:46.000000000 -0400
27573@@ -125,7 +125,7 @@ static struct hvc_struct *hvc_get_by_ind
27574 * console interfaces but can still be used as a tty device. This has to be
27575 * static because kmalloc will not work during early console init.
27576 */
27577-static struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27578+static const struct hv_ops *cons_ops[MAX_NR_HVC_CONSOLES];
27579 static uint32_t vtermnos[MAX_NR_HVC_CONSOLES] =
27580 {[0 ... MAX_NR_HVC_CONSOLES - 1] = -1};
27581
27582@@ -247,7 +247,7 @@ static void destroy_hvc_struct(struct kr
27583 * vty adapters do NOT get an hvc_instantiate() callback since they
27584 * appear after early console init.
27585 */
27586-int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops)
27587+int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops)
27588 {
27589 struct hvc_struct *hp;
27590
27591@@ -756,7 +756,7 @@ static const struct tty_operations hvc_o
27592 };
27593
27594 struct hvc_struct __devinit *hvc_alloc(uint32_t vtermno, int data,
27595- struct hv_ops *ops, int outbuf_size)
27596+ const struct hv_ops *ops, int outbuf_size)
27597 {
27598 struct hvc_struct *hp;
27599 int i;
27600diff -urNp linux-2.6.32.43/drivers/char/hvc_console.h linux-2.6.32.43/drivers/char/hvc_console.h
27601--- linux-2.6.32.43/drivers/char/hvc_console.h 2011-03-27 14:31:47.000000000 -0400
27602+++ linux-2.6.32.43/drivers/char/hvc_console.h 2011-04-17 15:56:46.000000000 -0400
27603@@ -55,7 +55,7 @@ struct hvc_struct {
27604 int outbuf_size;
27605 int n_outbuf;
27606 uint32_t vtermno;
27607- struct hv_ops *ops;
27608+ const struct hv_ops *ops;
27609 int irq_requested;
27610 int data;
27611 struct winsize ws;
27612@@ -76,11 +76,11 @@ struct hv_ops {
27613 };
27614
27615 /* Register a vterm and a slot index for use as a console (console_init) */
27616-extern int hvc_instantiate(uint32_t vtermno, int index, struct hv_ops *ops);
27617+extern int hvc_instantiate(uint32_t vtermno, int index, const struct hv_ops *ops);
27618
27619 /* register a vterm for hvc tty operation (module_init or hotplug add) */
27620 extern struct hvc_struct * __devinit hvc_alloc(uint32_t vtermno, int data,
27621- struct hv_ops *ops, int outbuf_size);
27622+ const struct hv_ops *ops, int outbuf_size);
27623 /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */
27624 extern int hvc_remove(struct hvc_struct *hp);
27625
27626diff -urNp linux-2.6.32.43/drivers/char/hvc_iseries.c linux-2.6.32.43/drivers/char/hvc_iseries.c
27627--- linux-2.6.32.43/drivers/char/hvc_iseries.c 2011-03-27 14:31:47.000000000 -0400
27628+++ linux-2.6.32.43/drivers/char/hvc_iseries.c 2011-04-17 15:56:46.000000000 -0400
27629@@ -197,7 +197,7 @@ done:
27630 return sent;
27631 }
27632
27633-static struct hv_ops hvc_get_put_ops = {
27634+static const struct hv_ops hvc_get_put_ops = {
27635 .get_chars = get_chars,
27636 .put_chars = put_chars,
27637 .notifier_add = notifier_add_irq,
27638diff -urNp linux-2.6.32.43/drivers/char/hvc_iucv.c linux-2.6.32.43/drivers/char/hvc_iucv.c
27639--- linux-2.6.32.43/drivers/char/hvc_iucv.c 2011-03-27 14:31:47.000000000 -0400
27640+++ linux-2.6.32.43/drivers/char/hvc_iucv.c 2011-04-17 15:56:46.000000000 -0400
27641@@ -924,7 +924,7 @@ static int hvc_iucv_pm_restore_thaw(stru
27642
27643
27644 /* HVC operations */
27645-static struct hv_ops hvc_iucv_ops = {
27646+static const struct hv_ops hvc_iucv_ops = {
27647 .get_chars = hvc_iucv_get_chars,
27648 .put_chars = hvc_iucv_put_chars,
27649 .notifier_add = hvc_iucv_notifier_add,
27650diff -urNp linux-2.6.32.43/drivers/char/hvc_rtas.c linux-2.6.32.43/drivers/char/hvc_rtas.c
27651--- linux-2.6.32.43/drivers/char/hvc_rtas.c 2011-03-27 14:31:47.000000000 -0400
27652+++ linux-2.6.32.43/drivers/char/hvc_rtas.c 2011-04-17 15:56:46.000000000 -0400
27653@@ -71,7 +71,7 @@ static int hvc_rtas_read_console(uint32_
27654 return i;
27655 }
27656
27657-static struct hv_ops hvc_rtas_get_put_ops = {
27658+static const struct hv_ops hvc_rtas_get_put_ops = {
27659 .get_chars = hvc_rtas_read_console,
27660 .put_chars = hvc_rtas_write_console,
27661 };
27662diff -urNp linux-2.6.32.43/drivers/char/hvcs.c linux-2.6.32.43/drivers/char/hvcs.c
27663--- linux-2.6.32.43/drivers/char/hvcs.c 2011-03-27 14:31:47.000000000 -0400
27664+++ linux-2.6.32.43/drivers/char/hvcs.c 2011-04-17 15:56:46.000000000 -0400
27665@@ -82,6 +82,7 @@
27666 #include <asm/hvcserver.h>
27667 #include <asm/uaccess.h>
27668 #include <asm/vio.h>
27669+#include <asm/local.h>
27670
27671 /*
27672 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
27673@@ -269,7 +270,7 @@ struct hvcs_struct {
27674 unsigned int index;
27675
27676 struct tty_struct *tty;
27677- int open_count;
27678+ local_t open_count;
27679
27680 /*
27681 * Used to tell the driver kernel_thread what operations need to take
27682@@ -419,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st
27683
27684 spin_lock_irqsave(&hvcsd->lock, flags);
27685
27686- if (hvcsd->open_count > 0) {
27687+ if (local_read(&hvcsd->open_count) > 0) {
27688 spin_unlock_irqrestore(&hvcsd->lock, flags);
27689 printk(KERN_INFO "HVCS: vterm state unchanged. "
27690 "The hvcs device node is still in use.\n");
27691@@ -1135,7 +1136,7 @@ static int hvcs_open(struct tty_struct *
27692 if ((retval = hvcs_partner_connect(hvcsd)))
27693 goto error_release;
27694
27695- hvcsd->open_count = 1;
27696+ local_set(&hvcsd->open_count, 1);
27697 hvcsd->tty = tty;
27698 tty->driver_data = hvcsd;
27699
27700@@ -1169,7 +1170,7 @@ fast_open:
27701
27702 spin_lock_irqsave(&hvcsd->lock, flags);
27703 kref_get(&hvcsd->kref);
27704- hvcsd->open_count++;
27705+ local_inc(&hvcsd->open_count);
27706 hvcsd->todo_mask |= HVCS_SCHED_READ;
27707 spin_unlock_irqrestore(&hvcsd->lock, flags);
27708
27709@@ -1213,7 +1214,7 @@ static void hvcs_close(struct tty_struct
27710 hvcsd = tty->driver_data;
27711
27712 spin_lock_irqsave(&hvcsd->lock, flags);
27713- if (--hvcsd->open_count == 0) {
27714+ if (local_dec_and_test(&hvcsd->open_count)) {
27715
27716 vio_disable_interrupts(hvcsd->vdev);
27717
27718@@ -1239,10 +1240,10 @@ static void hvcs_close(struct tty_struct
27719 free_irq(irq, hvcsd);
27720 kref_put(&hvcsd->kref, destroy_hvcs_struct);
27721 return;
27722- } else if (hvcsd->open_count < 0) {
27723+ } else if (local_read(&hvcsd->open_count) < 0) {
27724 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
27725 " is missmanaged.\n",
27726- hvcsd->vdev->unit_address, hvcsd->open_count);
27727+ hvcsd->vdev->unit_address, local_read(&hvcsd->open_count));
27728 }
27729
27730 spin_unlock_irqrestore(&hvcsd->lock, flags);
27731@@ -1258,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc
27732
27733 spin_lock_irqsave(&hvcsd->lock, flags);
27734 /* Preserve this so that we know how many kref refs to put */
27735- temp_open_count = hvcsd->open_count;
27736+ temp_open_count = local_read(&hvcsd->open_count);
27737
27738 /*
27739 * Don't kref put inside the spinlock because the destruction
27740@@ -1273,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc
27741 hvcsd->tty->driver_data = NULL;
27742 hvcsd->tty = NULL;
27743
27744- hvcsd->open_count = 0;
27745+ local_set(&hvcsd->open_count, 0);
27746
27747 /* This will drop any buffered data on the floor which is OK in a hangup
27748 * scenario. */
27749@@ -1344,7 +1345,7 @@ static int hvcs_write(struct tty_struct
27750 * the middle of a write operation? This is a crummy place to do this
27751 * but we want to keep it all in the spinlock.
27752 */
27753- if (hvcsd->open_count <= 0) {
27754+ if (local_read(&hvcsd->open_count) <= 0) {
27755 spin_unlock_irqrestore(&hvcsd->lock, flags);
27756 return -ENODEV;
27757 }
27758@@ -1418,7 +1419,7 @@ static int hvcs_write_room(struct tty_st
27759 {
27760 struct hvcs_struct *hvcsd = tty->driver_data;
27761
27762- if (!hvcsd || hvcsd->open_count <= 0)
27763+ if (!hvcsd || local_read(&hvcsd->open_count) <= 0)
27764 return 0;
27765
27766 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
27767diff -urNp linux-2.6.32.43/drivers/char/hvc_udbg.c linux-2.6.32.43/drivers/char/hvc_udbg.c
27768--- linux-2.6.32.43/drivers/char/hvc_udbg.c 2011-03-27 14:31:47.000000000 -0400
27769+++ linux-2.6.32.43/drivers/char/hvc_udbg.c 2011-04-17 15:56:46.000000000 -0400
27770@@ -58,7 +58,7 @@ static int hvc_udbg_get(uint32_t vtermno
27771 return i;
27772 }
27773
27774-static struct hv_ops hvc_udbg_ops = {
27775+static const struct hv_ops hvc_udbg_ops = {
27776 .get_chars = hvc_udbg_get,
27777 .put_chars = hvc_udbg_put,
27778 };
27779diff -urNp linux-2.6.32.43/drivers/char/hvc_vio.c linux-2.6.32.43/drivers/char/hvc_vio.c
27780--- linux-2.6.32.43/drivers/char/hvc_vio.c 2011-03-27 14:31:47.000000000 -0400
27781+++ linux-2.6.32.43/drivers/char/hvc_vio.c 2011-04-17 15:56:46.000000000 -0400
27782@@ -77,7 +77,7 @@ static int filtered_get_chars(uint32_t v
27783 return got;
27784 }
27785
27786-static struct hv_ops hvc_get_put_ops = {
27787+static const struct hv_ops hvc_get_put_ops = {
27788 .get_chars = filtered_get_chars,
27789 .put_chars = hvc_put_chars,
27790 .notifier_add = notifier_add_irq,
27791diff -urNp linux-2.6.32.43/drivers/char/hvc_xen.c linux-2.6.32.43/drivers/char/hvc_xen.c
27792--- linux-2.6.32.43/drivers/char/hvc_xen.c 2011-03-27 14:31:47.000000000 -0400
27793+++ linux-2.6.32.43/drivers/char/hvc_xen.c 2011-04-17 15:56:46.000000000 -0400
27794@@ -120,7 +120,7 @@ static int read_console(uint32_t vtermno
27795 return recv;
27796 }
27797
27798-static struct hv_ops hvc_ops = {
27799+static const struct hv_ops hvc_ops = {
27800 .get_chars = read_console,
27801 .put_chars = write_console,
27802 .notifier_add = notifier_add_irq,
27803diff -urNp linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c
27804--- linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c 2011-03-27 14:31:47.000000000 -0400
27805+++ linux-2.6.32.43/drivers/char/ipmi/ipmi_msghandler.c 2011-05-16 21:46:57.000000000 -0400
27806@@ -414,7 +414,7 @@ struct ipmi_smi {
27807 struct proc_dir_entry *proc_dir;
27808 char proc_dir_name[10];
27809
27810- atomic_t stats[IPMI_NUM_STATS];
27811+ atomic_unchecked_t stats[IPMI_NUM_STATS];
27812
27813 /*
27814 * run_to_completion duplicate of smb_info, smi_info
27815@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
27816
27817
27818 #define ipmi_inc_stat(intf, stat) \
27819- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
27820+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
27821 #define ipmi_get_stat(intf, stat) \
27822- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
27823+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
27824
27825 static int is_lan_addr(struct ipmi_addr *addr)
27826 {
27827@@ -2808,7 +2808,7 @@ int ipmi_register_smi(struct ipmi_smi_ha
27828 INIT_LIST_HEAD(&intf->cmd_rcvrs);
27829 init_waitqueue_head(&intf->waitq);
27830 for (i = 0; i < IPMI_NUM_STATS; i++)
27831- atomic_set(&intf->stats[i], 0);
27832+ atomic_set_unchecked(&intf->stats[i], 0);
27833
27834 intf->proc_dir = NULL;
27835
27836@@ -4160,6 +4160,8 @@ static void send_panic_events(char *str)
27837 struct ipmi_smi_msg smi_msg;
27838 struct ipmi_recv_msg recv_msg;
27839
27840+ pax_track_stack();
27841+
27842 si = (struct ipmi_system_interface_addr *) &addr;
27843 si->addr_type = IPMI_SYSTEM_INTERFACE_ADDR_TYPE;
27844 si->channel = IPMI_BMC_CHANNEL;
27845diff -urNp linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c
27846--- linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c 2011-03-27 14:31:47.000000000 -0400
27847+++ linux-2.6.32.43/drivers/char/ipmi/ipmi_si_intf.c 2011-04-17 15:56:46.000000000 -0400
27848@@ -277,7 +277,7 @@ struct smi_info {
27849 unsigned char slave_addr;
27850
27851 /* Counters and things for the proc filesystem. */
27852- atomic_t stats[SI_NUM_STATS];
27853+ atomic_unchecked_t stats[SI_NUM_STATS];
27854
27855 struct task_struct *thread;
27856
27857@@ -285,9 +285,9 @@ struct smi_info {
27858 };
27859
27860 #define smi_inc_stat(smi, stat) \
27861- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
27862+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
27863 #define smi_get_stat(smi, stat) \
27864- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
27865+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
27866
27867 #define SI_MAX_PARMS 4
27868
27869@@ -2931,7 +2931,7 @@ static int try_smi_init(struct smi_info
27870 atomic_set(&new_smi->req_events, 0);
27871 new_smi->run_to_completion = 0;
27872 for (i = 0; i < SI_NUM_STATS; i++)
27873- atomic_set(&new_smi->stats[i], 0);
27874+ atomic_set_unchecked(&new_smi->stats[i], 0);
27875
27876 new_smi->interrupt_disabled = 0;
27877 atomic_set(&new_smi->stop_operation, 0);
27878diff -urNp linux-2.6.32.43/drivers/char/istallion.c linux-2.6.32.43/drivers/char/istallion.c
27879--- linux-2.6.32.43/drivers/char/istallion.c 2011-03-27 14:31:47.000000000 -0400
27880+++ linux-2.6.32.43/drivers/char/istallion.c 2011-05-16 21:46:57.000000000 -0400
27881@@ -187,7 +187,6 @@ static struct ktermios stli_deftermios
27882 * re-used for each stats call.
27883 */
27884 static comstats_t stli_comstats;
27885-static combrd_t stli_brdstats;
27886 static struct asystats stli_cdkstats;
27887
27888 /*****************************************************************************/
27889@@ -4058,6 +4057,7 @@ static int stli_getbrdstats(combrd_t __u
27890 {
27891 struct stlibrd *brdp;
27892 unsigned int i;
27893+ combrd_t stli_brdstats;
27894
27895 if (copy_from_user(&stli_brdstats, bp, sizeof(combrd_t)))
27896 return -EFAULT;
27897@@ -4269,6 +4269,8 @@ static int stli_getportstruct(struct stl
27898 struct stliport stli_dummyport;
27899 struct stliport *portp;
27900
27901+ pax_track_stack();
27902+
27903 if (copy_from_user(&stli_dummyport, arg, sizeof(struct stliport)))
27904 return -EFAULT;
27905 portp = stli_getport(stli_dummyport.brdnr, stli_dummyport.panelnr,
27906@@ -4291,6 +4293,8 @@ static int stli_getbrdstruct(struct stli
27907 struct stlibrd stli_dummybrd;
27908 struct stlibrd *brdp;
27909
27910+ pax_track_stack();
27911+
27912 if (copy_from_user(&stli_dummybrd, arg, sizeof(struct stlibrd)))
27913 return -EFAULT;
27914 if (stli_dummybrd.brdnr >= STL_MAXBRDS)
27915diff -urNp linux-2.6.32.43/drivers/char/Kconfig linux-2.6.32.43/drivers/char/Kconfig
27916--- linux-2.6.32.43/drivers/char/Kconfig 2011-03-27 14:31:47.000000000 -0400
27917+++ linux-2.6.32.43/drivers/char/Kconfig 2011-04-18 19:20:15.000000000 -0400
27918@@ -90,7 +90,8 @@ config VT_HW_CONSOLE_BINDING
27919
27920 config DEVKMEM
27921 bool "/dev/kmem virtual device support"
27922- default y
27923+ default n
27924+ depends on !GRKERNSEC_KMEM
27925 help
27926 Say Y here if you want to support the /dev/kmem device. The
27927 /dev/kmem device is rarely used, but can be used for certain
27928@@ -1114,6 +1115,7 @@ config DEVPORT
27929 bool
27930 depends on !M68K
27931 depends on ISA || PCI
27932+ depends on !GRKERNSEC_KMEM
27933 default y
27934
27935 source "drivers/s390/char/Kconfig"
27936diff -urNp linux-2.6.32.43/drivers/char/keyboard.c linux-2.6.32.43/drivers/char/keyboard.c
27937--- linux-2.6.32.43/drivers/char/keyboard.c 2011-03-27 14:31:47.000000000 -0400
27938+++ linux-2.6.32.43/drivers/char/keyboard.c 2011-04-17 15:56:46.000000000 -0400
27939@@ -635,6 +635,16 @@ static void k_spec(struct vc_data *vc, u
27940 kbd->kbdmode == VC_MEDIUMRAW) &&
27941 value != KVAL(K_SAK))
27942 return; /* SAK is allowed even in raw mode */
27943+
27944+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
27945+ {
27946+ void *func = fn_handler[value];
27947+ if (func == fn_show_state || func == fn_show_ptregs ||
27948+ func == fn_show_mem)
27949+ return;
27950+ }
27951+#endif
27952+
27953 fn_handler[value](vc);
27954 }
27955
27956@@ -1386,7 +1396,7 @@ static const struct input_device_id kbd_
27957 .evbit = { BIT_MASK(EV_SND) },
27958 },
27959
27960- { }, /* Terminating entry */
27961+ { 0 }, /* Terminating entry */
27962 };
27963
27964 MODULE_DEVICE_TABLE(input, kbd_ids);
27965diff -urNp linux-2.6.32.43/drivers/char/mem.c linux-2.6.32.43/drivers/char/mem.c
27966--- linux-2.6.32.43/drivers/char/mem.c 2011-03-27 14:31:47.000000000 -0400
27967+++ linux-2.6.32.43/drivers/char/mem.c 2011-04-17 15:56:46.000000000 -0400
27968@@ -18,6 +18,7 @@
27969 #include <linux/raw.h>
27970 #include <linux/tty.h>
27971 #include <linux/capability.h>
27972+#include <linux/security.h>
27973 #include <linux/ptrace.h>
27974 #include <linux/device.h>
27975 #include <linux/highmem.h>
27976@@ -35,6 +36,10 @@
27977 # include <linux/efi.h>
27978 #endif
27979
27980+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
27981+extern struct file_operations grsec_fops;
27982+#endif
27983+
27984 static inline unsigned long size_inside_page(unsigned long start,
27985 unsigned long size)
27986 {
27987@@ -102,9 +107,13 @@ static inline int range_is_allowed(unsig
27988
27989 while (cursor < to) {
27990 if (!devmem_is_allowed(pfn)) {
27991+#ifdef CONFIG_GRKERNSEC_KMEM
27992+ gr_handle_mem_readwrite(from, to);
27993+#else
27994 printk(KERN_INFO
27995 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
27996 current->comm, from, to);
27997+#endif
27998 return 0;
27999 }
28000 cursor += PAGE_SIZE;
28001@@ -112,6 +121,11 @@ static inline int range_is_allowed(unsig
28002 }
28003 return 1;
28004 }
28005+#elif defined(CONFIG_GRKERNSEC_KMEM)
28006+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28007+{
28008+ return 0;
28009+}
28010 #else
28011 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
28012 {
28013@@ -155,6 +169,8 @@ static ssize_t read_mem(struct file * fi
28014 #endif
28015
28016 while (count > 0) {
28017+ char *temp;
28018+
28019 /*
28020 * Handle first page in case it's not aligned
28021 */
28022@@ -177,11 +193,31 @@ static ssize_t read_mem(struct file * fi
28023 if (!ptr)
28024 return -EFAULT;
28025
28026- if (copy_to_user(buf, ptr, sz)) {
28027+#ifdef CONFIG_PAX_USERCOPY
28028+ temp = kmalloc(sz, GFP_KERNEL);
28029+ if (!temp) {
28030+ unxlate_dev_mem_ptr(p, ptr);
28031+ return -ENOMEM;
28032+ }
28033+ memcpy(temp, ptr, sz);
28034+#else
28035+ temp = ptr;
28036+#endif
28037+
28038+ if (copy_to_user(buf, temp, sz)) {
28039+
28040+#ifdef CONFIG_PAX_USERCOPY
28041+ kfree(temp);
28042+#endif
28043+
28044 unxlate_dev_mem_ptr(p, ptr);
28045 return -EFAULT;
28046 }
28047
28048+#ifdef CONFIG_PAX_USERCOPY
28049+ kfree(temp);
28050+#endif
28051+
28052 unxlate_dev_mem_ptr(p, ptr);
28053
28054 buf += sz;
28055@@ -419,9 +455,8 @@ static ssize_t read_kmem(struct file *fi
28056 size_t count, loff_t *ppos)
28057 {
28058 unsigned long p = *ppos;
28059- ssize_t low_count, read, sz;
28060+ ssize_t low_count, read, sz, err = 0;
28061 char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
28062- int err = 0;
28063
28064 read = 0;
28065 if (p < (unsigned long) high_memory) {
28066@@ -444,6 +479,8 @@ static ssize_t read_kmem(struct file *fi
28067 }
28068 #endif
28069 while (low_count > 0) {
28070+ char *temp;
28071+
28072 sz = size_inside_page(p, low_count);
28073
28074 /*
28075@@ -453,7 +490,22 @@ static ssize_t read_kmem(struct file *fi
28076 */
28077 kbuf = xlate_dev_kmem_ptr((char *)p);
28078
28079- if (copy_to_user(buf, kbuf, sz))
28080+#ifdef CONFIG_PAX_USERCOPY
28081+ temp = kmalloc(sz, GFP_KERNEL);
28082+ if (!temp)
28083+ return -ENOMEM;
28084+ memcpy(temp, kbuf, sz);
28085+#else
28086+ temp = kbuf;
28087+#endif
28088+
28089+ err = copy_to_user(buf, temp, sz);
28090+
28091+#ifdef CONFIG_PAX_USERCOPY
28092+ kfree(temp);
28093+#endif
28094+
28095+ if (err)
28096 return -EFAULT;
28097 buf += sz;
28098 p += sz;
28099@@ -889,6 +941,9 @@ static const struct memdev {
28100 #ifdef CONFIG_CRASH_DUMP
28101 [12] = { "oldmem", 0, &oldmem_fops, NULL },
28102 #endif
28103+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
28104+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
28105+#endif
28106 };
28107
28108 static int memory_open(struct inode *inode, struct file *filp)
28109diff -urNp linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c
28110--- linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c 2011-03-27 14:31:47.000000000 -0400
28111+++ linux-2.6.32.43/drivers/char/pcmcia/ipwireless/tty.c 2011-04-17 15:56:46.000000000 -0400
28112@@ -29,6 +29,7 @@
28113 #include <linux/tty_driver.h>
28114 #include <linux/tty_flip.h>
28115 #include <linux/uaccess.h>
28116+#include <asm/local.h>
28117
28118 #include "tty.h"
28119 #include "network.h"
28120@@ -51,7 +52,7 @@ struct ipw_tty {
28121 int tty_type;
28122 struct ipw_network *network;
28123 struct tty_struct *linux_tty;
28124- int open_count;
28125+ local_t open_count;
28126 unsigned int control_lines;
28127 struct mutex ipw_tty_mutex;
28128 int tx_bytes_queued;
28129@@ -127,10 +128,10 @@ static int ipw_open(struct tty_struct *l
28130 mutex_unlock(&tty->ipw_tty_mutex);
28131 return -ENODEV;
28132 }
28133- if (tty->open_count == 0)
28134+ if (local_read(&tty->open_count) == 0)
28135 tty->tx_bytes_queued = 0;
28136
28137- tty->open_count++;
28138+ local_inc(&tty->open_count);
28139
28140 tty->linux_tty = linux_tty;
28141 linux_tty->driver_data = tty;
28142@@ -146,9 +147,7 @@ static int ipw_open(struct tty_struct *l
28143
28144 static void do_ipw_close(struct ipw_tty *tty)
28145 {
28146- tty->open_count--;
28147-
28148- if (tty->open_count == 0) {
28149+ if (local_dec_return(&tty->open_count) == 0) {
28150 struct tty_struct *linux_tty = tty->linux_tty;
28151
28152 if (linux_tty != NULL) {
28153@@ -169,7 +168,7 @@ static void ipw_hangup(struct tty_struct
28154 return;
28155
28156 mutex_lock(&tty->ipw_tty_mutex);
28157- if (tty->open_count == 0) {
28158+ if (local_read(&tty->open_count) == 0) {
28159 mutex_unlock(&tty->ipw_tty_mutex);
28160 return;
28161 }
28162@@ -198,7 +197,7 @@ void ipwireless_tty_received(struct ipw_
28163 return;
28164 }
28165
28166- if (!tty->open_count) {
28167+ if (!local_read(&tty->open_count)) {
28168 mutex_unlock(&tty->ipw_tty_mutex);
28169 return;
28170 }
28171@@ -240,7 +239,7 @@ static int ipw_write(struct tty_struct *
28172 return -ENODEV;
28173
28174 mutex_lock(&tty->ipw_tty_mutex);
28175- if (!tty->open_count) {
28176+ if (!local_read(&tty->open_count)) {
28177 mutex_unlock(&tty->ipw_tty_mutex);
28178 return -EINVAL;
28179 }
28180@@ -280,7 +279,7 @@ static int ipw_write_room(struct tty_str
28181 if (!tty)
28182 return -ENODEV;
28183
28184- if (!tty->open_count)
28185+ if (!local_read(&tty->open_count))
28186 return -EINVAL;
28187
28188 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
28189@@ -322,7 +321,7 @@ static int ipw_chars_in_buffer(struct tt
28190 if (!tty)
28191 return 0;
28192
28193- if (!tty->open_count)
28194+ if (!local_read(&tty->open_count))
28195 return 0;
28196
28197 return tty->tx_bytes_queued;
28198@@ -403,7 +402,7 @@ static int ipw_tiocmget(struct tty_struc
28199 if (!tty)
28200 return -ENODEV;
28201
28202- if (!tty->open_count)
28203+ if (!local_read(&tty->open_count))
28204 return -EINVAL;
28205
28206 return get_control_lines(tty);
28207@@ -419,7 +418,7 @@ ipw_tiocmset(struct tty_struct *linux_tt
28208 if (!tty)
28209 return -ENODEV;
28210
28211- if (!tty->open_count)
28212+ if (!local_read(&tty->open_count))
28213 return -EINVAL;
28214
28215 return set_control_lines(tty, set, clear);
28216@@ -433,7 +432,7 @@ static int ipw_ioctl(struct tty_struct *
28217 if (!tty)
28218 return -ENODEV;
28219
28220- if (!tty->open_count)
28221+ if (!local_read(&tty->open_count))
28222 return -EINVAL;
28223
28224 /* FIXME: Exactly how is the tty object locked here .. */
28225@@ -591,7 +590,7 @@ void ipwireless_tty_free(struct ipw_tty
28226 against a parallel ioctl etc */
28227 mutex_lock(&ttyj->ipw_tty_mutex);
28228 }
28229- while (ttyj->open_count)
28230+ while (local_read(&ttyj->open_count))
28231 do_ipw_close(ttyj);
28232 ipwireless_disassociate_network_ttys(network,
28233 ttyj->channel_idx);
28234diff -urNp linux-2.6.32.43/drivers/char/pty.c linux-2.6.32.43/drivers/char/pty.c
28235--- linux-2.6.32.43/drivers/char/pty.c 2011-03-27 14:31:47.000000000 -0400
28236+++ linux-2.6.32.43/drivers/char/pty.c 2011-08-05 20:33:55.000000000 -0400
28237@@ -736,8 +736,10 @@ static void __init unix98_pty_init(void)
28238 register_sysctl_table(pty_root_table);
28239
28240 /* Now create the /dev/ptmx special device */
28241+ pax_open_kernel();
28242 tty_default_fops(&ptmx_fops);
28243- ptmx_fops.open = ptmx_open;
28244+ *(void **)&ptmx_fops.open = ptmx_open;
28245+ pax_close_kernel();
28246
28247 cdev_init(&ptmx_cdev, &ptmx_fops);
28248 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
28249diff -urNp linux-2.6.32.43/drivers/char/random.c linux-2.6.32.43/drivers/char/random.c
28250--- linux-2.6.32.43/drivers/char/random.c 2011-03-27 14:31:47.000000000 -0400
28251+++ linux-2.6.32.43/drivers/char/random.c 2011-08-07 19:48:09.000000000 -0400
28252@@ -254,8 +254,13 @@
28253 /*
28254 * Configuration information
28255 */
28256+#ifdef CONFIG_GRKERNSEC_RANDNET
28257+#define INPUT_POOL_WORDS 512
28258+#define OUTPUT_POOL_WORDS 128
28259+#else
28260 #define INPUT_POOL_WORDS 128
28261 #define OUTPUT_POOL_WORDS 32
28262+#endif
28263 #define SEC_XFER_SIZE 512
28264
28265 /*
28266@@ -292,10 +297,17 @@ static struct poolinfo {
28267 int poolwords;
28268 int tap1, tap2, tap3, tap4, tap5;
28269 } poolinfo_table[] = {
28270+#ifdef CONFIG_GRKERNSEC_RANDNET
28271+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
28272+ { 512, 411, 308, 208, 104, 1 },
28273+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
28274+ { 128, 103, 76, 51, 25, 1 },
28275+#else
28276 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
28277 { 128, 103, 76, 51, 25, 1 },
28278 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
28279 { 32, 26, 20, 14, 7, 1 },
28280+#endif
28281 #if 0
28282 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
28283 { 2048, 1638, 1231, 819, 411, 1 },
28284@@ -1209,7 +1221,7 @@ EXPORT_SYMBOL(generate_random_uuid);
28285 #include <linux/sysctl.h>
28286
28287 static int min_read_thresh = 8, min_write_thresh;
28288-static int max_read_thresh = INPUT_POOL_WORDS * 32;
28289+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
28290 static int max_write_thresh = INPUT_POOL_WORDS * 32;
28291 static char sysctl_bootid[16];
28292
28293@@ -1339,330 +1351,14 @@ ctl_table random_table[] = {
28294 };
28295 #endif /* CONFIG_SYSCTL */
28296
28297-/********************************************************************
28298- *
28299- * Random funtions for networking
28300- *
28301- ********************************************************************/
28302-
28303-/*
28304- * TCP initial sequence number picking. This uses the random number
28305- * generator to pick an initial secret value. This value is hashed
28306- * along with the TCP endpoint information to provide a unique
28307- * starting point for each pair of TCP endpoints. This defeats
28308- * attacks which rely on guessing the initial TCP sequence number.
28309- * This algorithm was suggested by Steve Bellovin.
28310- *
28311- * Using a very strong hash was taking an appreciable amount of the total
28312- * TCP connection establishment time, so this is a weaker hash,
28313- * compensated for by changing the secret periodically.
28314- */
28315-
28316-/* F, G and H are basic MD4 functions: selection, majority, parity */
28317-#define F(x, y, z) ((z) ^ ((x) & ((y) ^ (z))))
28318-#define G(x, y, z) (((x) & (y)) + (((x) ^ (y)) & (z)))
28319-#define H(x, y, z) ((x) ^ (y) ^ (z))
28320-
28321-/*
28322- * The generic round function. The application is so specific that
28323- * we don't bother protecting all the arguments with parens, as is generally
28324- * good macro practice, in favor of extra legibility.
28325- * Rotation is separate from addition to prevent recomputation
28326- */
28327-#define ROUND(f, a, b, c, d, x, s) \
28328- (a += f(b, c, d) + x, a = (a << s) | (a >> (32 - s)))
28329-#define K1 0
28330-#define K2 013240474631UL
28331-#define K3 015666365641UL
28332-
28333-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28334-
28335-static __u32 twothirdsMD4Transform(__u32 const buf[4], __u32 const in[12])
28336-{
28337- __u32 a = buf[0], b = buf[1], c = buf[2], d = buf[3];
28338-
28339- /* Round 1 */
28340- ROUND(F, a, b, c, d, in[ 0] + K1, 3);
28341- ROUND(F, d, a, b, c, in[ 1] + K1, 7);
28342- ROUND(F, c, d, a, b, in[ 2] + K1, 11);
28343- ROUND(F, b, c, d, a, in[ 3] + K1, 19);
28344- ROUND(F, a, b, c, d, in[ 4] + K1, 3);
28345- ROUND(F, d, a, b, c, in[ 5] + K1, 7);
28346- ROUND(F, c, d, a, b, in[ 6] + K1, 11);
28347- ROUND(F, b, c, d, a, in[ 7] + K1, 19);
28348- ROUND(F, a, b, c, d, in[ 8] + K1, 3);
28349- ROUND(F, d, a, b, c, in[ 9] + K1, 7);
28350- ROUND(F, c, d, a, b, in[10] + K1, 11);
28351- ROUND(F, b, c, d, a, in[11] + K1, 19);
28352-
28353- /* Round 2 */
28354- ROUND(G, a, b, c, d, in[ 1] + K2, 3);
28355- ROUND(G, d, a, b, c, in[ 3] + K2, 5);
28356- ROUND(G, c, d, a, b, in[ 5] + K2, 9);
28357- ROUND(G, b, c, d, a, in[ 7] + K2, 13);
28358- ROUND(G, a, b, c, d, in[ 9] + K2, 3);
28359- ROUND(G, d, a, b, c, in[11] + K2, 5);
28360- ROUND(G, c, d, a, b, in[ 0] + K2, 9);
28361- ROUND(G, b, c, d, a, in[ 2] + K2, 13);
28362- ROUND(G, a, b, c, d, in[ 4] + K2, 3);
28363- ROUND(G, d, a, b, c, in[ 6] + K2, 5);
28364- ROUND(G, c, d, a, b, in[ 8] + K2, 9);
28365- ROUND(G, b, c, d, a, in[10] + K2, 13);
28366-
28367- /* Round 3 */
28368- ROUND(H, a, b, c, d, in[ 3] + K3, 3);
28369- ROUND(H, d, a, b, c, in[ 7] + K3, 9);
28370- ROUND(H, c, d, a, b, in[11] + K3, 11);
28371- ROUND(H, b, c, d, a, in[ 2] + K3, 15);
28372- ROUND(H, a, b, c, d, in[ 6] + K3, 3);
28373- ROUND(H, d, a, b, c, in[10] + K3, 9);
28374- ROUND(H, c, d, a, b, in[ 1] + K3, 11);
28375- ROUND(H, b, c, d, a, in[ 5] + K3, 15);
28376- ROUND(H, a, b, c, d, in[ 9] + K3, 3);
28377- ROUND(H, d, a, b, c, in[ 0] + K3, 9);
28378- ROUND(H, c, d, a, b, in[ 4] + K3, 11);
28379- ROUND(H, b, c, d, a, in[ 8] + K3, 15);
28380-
28381- return buf[1] + b; /* "most hashed" word */
28382- /* Alternative: return sum of all words? */
28383-}
28384-#endif
28385-
28386-#undef ROUND
28387-#undef F
28388-#undef G
28389-#undef H
28390-#undef K1
28391-#undef K2
28392-#undef K3
28393-
28394-/* This should not be decreased so low that ISNs wrap too fast. */
28395-#define REKEY_INTERVAL (300 * HZ)
28396-/*
28397- * Bit layout of the tcp sequence numbers (before adding current time):
28398- * bit 24-31: increased after every key exchange
28399- * bit 0-23: hash(source,dest)
28400- *
28401- * The implementation is similar to the algorithm described
28402- * in the Appendix of RFC 1185, except that
28403- * - it uses a 1 MHz clock instead of a 250 kHz clock
28404- * - it performs a rekey every 5 minutes, which is equivalent
28405- * to a (source,dest) tulple dependent forward jump of the
28406- * clock by 0..2^(HASH_BITS+1)
28407- *
28408- * Thus the average ISN wraparound time is 68 minutes instead of
28409- * 4.55 hours.
28410- *
28411- * SMP cleanup and lock avoidance with poor man's RCU.
28412- * Manfred Spraul <manfred@colorfullife.com>
28413- *
28414- */
28415-#define COUNT_BITS 8
28416-#define COUNT_MASK ((1 << COUNT_BITS) - 1)
28417-#define HASH_BITS 24
28418-#define HASH_MASK ((1 << HASH_BITS) - 1)
28419-
28420-static struct keydata {
28421- __u32 count; /* already shifted to the final position */
28422- __u32 secret[12];
28423-} ____cacheline_aligned ip_keydata[2];
28424-
28425-static unsigned int ip_cnt;
28426-
28427-static void rekey_seq_generator(struct work_struct *work);
28428-
28429-static DECLARE_DELAYED_WORK(rekey_work, rekey_seq_generator);
28430-
28431-/*
28432- * Lock avoidance:
28433- * The ISN generation runs lockless - it's just a hash over random data.
28434- * State changes happen every 5 minutes when the random key is replaced.
28435- * Synchronization is performed by having two copies of the hash function
28436- * state and rekey_seq_generator always updates the inactive copy.
28437- * The copy is then activated by updating ip_cnt.
28438- * The implementation breaks down if someone blocks the thread
28439- * that processes SYN requests for more than 5 minutes. Should never
28440- * happen, and even if that happens only a not perfectly compliant
28441- * ISN is generated, nothing fatal.
28442- */
28443-static void rekey_seq_generator(struct work_struct *work)
28444-{
28445- struct keydata *keyptr = &ip_keydata[1 ^ (ip_cnt & 1)];
28446-
28447- get_random_bytes(keyptr->secret, sizeof(keyptr->secret));
28448- keyptr->count = (ip_cnt & COUNT_MASK) << HASH_BITS;
28449- smp_wmb();
28450- ip_cnt++;
28451- schedule_delayed_work(&rekey_work,
28452- round_jiffies_relative(REKEY_INTERVAL));
28453-}
28454-
28455-static inline struct keydata *get_keyptr(void)
28456-{
28457- struct keydata *keyptr = &ip_keydata[ip_cnt & 1];
28458-
28459- smp_rmb();
28460-
28461- return keyptr;
28462-}
28463-
28464-static __init int seqgen_init(void)
28465+static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
28466+static int __init random_int_secret_init(void)
28467 {
28468- rekey_seq_generator(NULL);
28469+ get_random_bytes(random_int_secret, sizeof(random_int_secret));
28470 return 0;
28471 }
28472-late_initcall(seqgen_init);
28473-
28474-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28475-__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
28476- __be16 sport, __be16 dport)
28477-{
28478- __u32 seq;
28479- __u32 hash[12];
28480- struct keydata *keyptr = get_keyptr();
28481-
28482- /* The procedure is the same as for IPv4, but addresses are longer.
28483- * Thus we must use twothirdsMD4Transform.
28484- */
28485-
28486- memcpy(hash, saddr, 16);
28487- hash[4] = ((__force u16)sport << 16) + (__force u16)dport;
28488- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
28489-
28490- seq = twothirdsMD4Transform((const __u32 *)daddr, hash) & HASH_MASK;
28491- seq += keyptr->count;
28492-
28493- seq += ktime_to_ns(ktime_get_real());
28494-
28495- return seq;
28496-}
28497-EXPORT_SYMBOL(secure_tcpv6_sequence_number);
28498-#endif
28499-
28500-/* The code below is shamelessly stolen from secure_tcp_sequence_number().
28501- * All blames to Andrey V. Savochkin <saw@msu.ru>.
28502- */
28503-__u32 secure_ip_id(__be32 daddr)
28504-{
28505- struct keydata *keyptr;
28506- __u32 hash[4];
28507-
28508- keyptr = get_keyptr();
28509-
28510- /*
28511- * Pick a unique starting offset for each IP destination.
28512- * The dest ip address is placed in the starting vector,
28513- * which is then hashed with random data.
28514- */
28515- hash[0] = (__force __u32)daddr;
28516- hash[1] = keyptr->secret[9];
28517- hash[2] = keyptr->secret[10];
28518- hash[3] = keyptr->secret[11];
28519-
28520- return half_md4_transform(hash, keyptr->secret);
28521-}
28522-
28523-#ifdef CONFIG_INET
28524-
28525-__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
28526- __be16 sport, __be16 dport)
28527-{
28528- __u32 seq;
28529- __u32 hash[4];
28530- struct keydata *keyptr = get_keyptr();
28531-
28532- /*
28533- * Pick a unique starting offset for each TCP connection endpoints
28534- * (saddr, daddr, sport, dport).
28535- * Note that the words are placed into the starting vector, which is
28536- * then mixed with a partial MD4 over random data.
28537- */
28538- hash[0] = (__force u32)saddr;
28539- hash[1] = (__force u32)daddr;
28540- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
28541- hash[3] = keyptr->secret[11];
28542-
28543- seq = half_md4_transform(hash, keyptr->secret) & HASH_MASK;
28544- seq += keyptr->count;
28545- /*
28546- * As close as possible to RFC 793, which
28547- * suggests using a 250 kHz clock.
28548- * Further reading shows this assumes 2 Mb/s networks.
28549- * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
28550- * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
28551- * we also need to limit the resolution so that the u32 seq
28552- * overlaps less than one time per MSL (2 minutes).
28553- * Choosing a clock of 64 ns period is OK. (period of 274 s)
28554- */
28555- seq += ktime_to_ns(ktime_get_real()) >> 6;
28556-
28557- return seq;
28558-}
28559-
28560-/* Generate secure starting point for ephemeral IPV4 transport port search */
28561-u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
28562-{
28563- struct keydata *keyptr = get_keyptr();
28564- u32 hash[4];
28565-
28566- /*
28567- * Pick a unique starting offset for each ephemeral port search
28568- * (saddr, daddr, dport) and 48bits of random data.
28569- */
28570- hash[0] = (__force u32)saddr;
28571- hash[1] = (__force u32)daddr;
28572- hash[2] = (__force u32)dport ^ keyptr->secret[10];
28573- hash[3] = keyptr->secret[11];
28574-
28575- return half_md4_transform(hash, keyptr->secret);
28576-}
28577-EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
28578-
28579-#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
28580-u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
28581- __be16 dport)
28582-{
28583- struct keydata *keyptr = get_keyptr();
28584- u32 hash[12];
28585-
28586- memcpy(hash, saddr, 16);
28587- hash[4] = (__force u32)dport;
28588- memcpy(&hash[5], keyptr->secret, sizeof(__u32) * 7);
28589-
28590- return twothirdsMD4Transform((const __u32 *)daddr, hash);
28591-}
28592-#endif
28593-
28594-#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
28595-/* Similar to secure_tcp_sequence_number but generate a 48 bit value
28596- * bit's 32-47 increase every key exchange
28597- * 0-31 hash(source, dest)
28598- */
28599-u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
28600- __be16 sport, __be16 dport)
28601-{
28602- u64 seq;
28603- __u32 hash[4];
28604- struct keydata *keyptr = get_keyptr();
28605-
28606- hash[0] = (__force u32)saddr;
28607- hash[1] = (__force u32)daddr;
28608- hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
28609- hash[3] = keyptr->secret[11];
28610-
28611- seq = half_md4_transform(hash, keyptr->secret);
28612- seq |= ((u64)keyptr->count) << (32 - HASH_BITS);
28613-
28614- seq += ktime_to_ns(ktime_get_real());
28615- seq &= (1ull << 48) - 1;
28616-
28617- return seq;
28618-}
28619-EXPORT_SYMBOL(secure_dccp_sequence_number);
28620-#endif
28621-
28622-#endif /* CONFIG_INET */
28623
28624+late_initcall(random_int_secret_init);
28625
28626 /*
28627 * Get a random word for internal kernel use only. Similar to urandom but
28628@@ -1670,17 +1366,16 @@ EXPORT_SYMBOL(secure_dccp_sequence_numbe
28629 * value is not cryptographically secure but for several uses the cost of
28630 * depleting entropy is too high
28631 */
28632-DEFINE_PER_CPU(__u32 [4], get_random_int_hash);
28633+DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
28634 unsigned int get_random_int(void)
28635 {
28636- struct keydata *keyptr;
28637 __u32 *hash = get_cpu_var(get_random_int_hash);
28638- int ret;
28639+ unsigned int ret;
28640
28641- keyptr = get_keyptr();
28642 hash[0] += current->pid + jiffies + get_cycles();
28643
28644- ret = half_md4_transform(hash, keyptr->secret);
28645+ md5_transform(hash, random_int_secret);
28646+ ret = hash[0];
28647 put_cpu_var(get_random_int_hash);
28648
28649 return ret;
28650diff -urNp linux-2.6.32.43/drivers/char/rocket.c linux-2.6.32.43/drivers/char/rocket.c
28651--- linux-2.6.32.43/drivers/char/rocket.c 2011-03-27 14:31:47.000000000 -0400
28652+++ linux-2.6.32.43/drivers/char/rocket.c 2011-05-16 21:46:57.000000000 -0400
28653@@ -1266,6 +1266,8 @@ static int get_ports(struct r_port *info
28654 struct rocket_ports tmp;
28655 int board;
28656
28657+ pax_track_stack();
28658+
28659 if (!retports)
28660 return -EFAULT;
28661 memset(&tmp, 0, sizeof (tmp));
28662diff -urNp linux-2.6.32.43/drivers/char/sonypi.c linux-2.6.32.43/drivers/char/sonypi.c
28663--- linux-2.6.32.43/drivers/char/sonypi.c 2011-03-27 14:31:47.000000000 -0400
28664+++ linux-2.6.32.43/drivers/char/sonypi.c 2011-04-17 15:56:46.000000000 -0400
28665@@ -55,6 +55,7 @@
28666 #include <asm/uaccess.h>
28667 #include <asm/io.h>
28668 #include <asm/system.h>
28669+#include <asm/local.h>
28670
28671 #include <linux/sonypi.h>
28672
28673@@ -491,7 +492,7 @@ static struct sonypi_device {
28674 spinlock_t fifo_lock;
28675 wait_queue_head_t fifo_proc_list;
28676 struct fasync_struct *fifo_async;
28677- int open_count;
28678+ local_t open_count;
28679 int model;
28680 struct input_dev *input_jog_dev;
28681 struct input_dev *input_key_dev;
28682@@ -895,7 +896,7 @@ static int sonypi_misc_fasync(int fd, st
28683 static int sonypi_misc_release(struct inode *inode, struct file *file)
28684 {
28685 mutex_lock(&sonypi_device.lock);
28686- sonypi_device.open_count--;
28687+ local_dec(&sonypi_device.open_count);
28688 mutex_unlock(&sonypi_device.lock);
28689 return 0;
28690 }
28691@@ -905,9 +906,9 @@ static int sonypi_misc_open(struct inode
28692 lock_kernel();
28693 mutex_lock(&sonypi_device.lock);
28694 /* Flush input queue on first open */
28695- if (!sonypi_device.open_count)
28696+ if (!local_read(&sonypi_device.open_count))
28697 kfifo_reset(sonypi_device.fifo);
28698- sonypi_device.open_count++;
28699+ local_inc(&sonypi_device.open_count);
28700 mutex_unlock(&sonypi_device.lock);
28701 unlock_kernel();
28702 return 0;
28703diff -urNp linux-2.6.32.43/drivers/char/stallion.c linux-2.6.32.43/drivers/char/stallion.c
28704--- linux-2.6.32.43/drivers/char/stallion.c 2011-03-27 14:31:47.000000000 -0400
28705+++ linux-2.6.32.43/drivers/char/stallion.c 2011-05-16 21:46:57.000000000 -0400
28706@@ -2448,6 +2448,8 @@ static int stl_getportstruct(struct stlp
28707 struct stlport stl_dummyport;
28708 struct stlport *portp;
28709
28710+ pax_track_stack();
28711+
28712 if (copy_from_user(&stl_dummyport, arg, sizeof(struct stlport)))
28713 return -EFAULT;
28714 portp = stl_getport(stl_dummyport.brdnr, stl_dummyport.panelnr,
28715diff -urNp linux-2.6.32.43/drivers/char/tpm/tpm_bios.c linux-2.6.32.43/drivers/char/tpm/tpm_bios.c
28716--- linux-2.6.32.43/drivers/char/tpm/tpm_bios.c 2011-03-27 14:31:47.000000000 -0400
28717+++ linux-2.6.32.43/drivers/char/tpm/tpm_bios.c 2011-04-17 15:56:46.000000000 -0400
28718@@ -172,7 +172,7 @@ static void *tpm_bios_measurements_start
28719 event = addr;
28720
28721 if ((event->event_type == 0 && event->event_size == 0) ||
28722- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
28723+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
28724 return NULL;
28725
28726 return addr;
28727@@ -197,7 +197,7 @@ static void *tpm_bios_measurements_next(
28728 return NULL;
28729
28730 if ((event->event_type == 0 && event->event_size == 0) ||
28731- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
28732+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
28733 return NULL;
28734
28735 (*pos)++;
28736@@ -290,7 +290,8 @@ static int tpm_binary_bios_measurements_
28737 int i;
28738
28739 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
28740- seq_putc(m, data[i]);
28741+ if (!seq_putc(m, data[i]))
28742+ return -EFAULT;
28743
28744 return 0;
28745 }
28746@@ -409,6 +410,11 @@ static int read_log(struct tpm_bios_log
28747 log->bios_event_log_end = log->bios_event_log + len;
28748
28749 virt = acpi_os_map_memory(start, len);
28750+ if (!virt) {
28751+ kfree(log->bios_event_log);
28752+ log->bios_event_log = NULL;
28753+ return -EFAULT;
28754+ }
28755
28756 memcpy(log->bios_event_log, virt, len);
28757
28758diff -urNp linux-2.6.32.43/drivers/char/tpm/tpm.c linux-2.6.32.43/drivers/char/tpm/tpm.c
28759--- linux-2.6.32.43/drivers/char/tpm/tpm.c 2011-04-17 17:00:52.000000000 -0400
28760+++ linux-2.6.32.43/drivers/char/tpm/tpm.c 2011-05-16 21:46:57.000000000 -0400
28761@@ -402,7 +402,7 @@ static ssize_t tpm_transmit(struct tpm_c
28762 chip->vendor.req_complete_val)
28763 goto out_recv;
28764
28765- if ((status == chip->vendor.req_canceled)) {
28766+ if (status == chip->vendor.req_canceled) {
28767 dev_err(chip->dev, "Operation Canceled\n");
28768 rc = -ECANCELED;
28769 goto out;
28770@@ -821,6 +821,8 @@ ssize_t tpm_show_pubek(struct device *de
28771
28772 struct tpm_chip *chip = dev_get_drvdata(dev);
28773
28774+ pax_track_stack();
28775+
28776 tpm_cmd.header.in = tpm_readpubek_header;
28777 err = transmit_cmd(chip, &tpm_cmd, READ_PUBEK_RESULT_SIZE,
28778 "attempting to read the PUBEK");
28779diff -urNp linux-2.6.32.43/drivers/char/tty_io.c linux-2.6.32.43/drivers/char/tty_io.c
28780--- linux-2.6.32.43/drivers/char/tty_io.c 2011-03-27 14:31:47.000000000 -0400
28781+++ linux-2.6.32.43/drivers/char/tty_io.c 2011-08-05 20:33:55.000000000 -0400
28782@@ -2582,8 +2582,10 @@ long tty_ioctl(struct file *file, unsign
28783 return retval;
28784 }
28785
28786+EXPORT_SYMBOL(tty_ioctl);
28787+
28788 #ifdef CONFIG_COMPAT
28789-static long tty_compat_ioctl(struct file *file, unsigned int cmd,
28790+long tty_compat_ioctl(struct file *file, unsigned int cmd,
28791 unsigned long arg)
28792 {
28793 struct inode *inode = file->f_dentry->d_inode;
28794@@ -2607,6 +2609,8 @@ static long tty_compat_ioctl(struct file
28795
28796 return retval;
28797 }
28798+
28799+EXPORT_SYMBOL(tty_compat_ioctl);
28800 #endif
28801
28802 /*
28803@@ -3052,7 +3056,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
28804
28805 void tty_default_fops(struct file_operations *fops)
28806 {
28807- *fops = tty_fops;
28808+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
28809 }
28810
28811 /*
28812diff -urNp linux-2.6.32.43/drivers/char/tty_ldisc.c linux-2.6.32.43/drivers/char/tty_ldisc.c
28813--- linux-2.6.32.43/drivers/char/tty_ldisc.c 2011-07-13 17:23:04.000000000 -0400
28814+++ linux-2.6.32.43/drivers/char/tty_ldisc.c 2011-07-13 17:23:18.000000000 -0400
28815@@ -74,7 +74,7 @@ static void put_ldisc(struct tty_ldisc *
28816 if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) {
28817 struct tty_ldisc_ops *ldo = ld->ops;
28818
28819- ldo->refcount--;
28820+ atomic_dec(&ldo->refcount);
28821 module_put(ldo->owner);
28822 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28823
28824@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct
28825 spin_lock_irqsave(&tty_ldisc_lock, flags);
28826 tty_ldiscs[disc] = new_ldisc;
28827 new_ldisc->num = disc;
28828- new_ldisc->refcount = 0;
28829+ atomic_set(&new_ldisc->refcount, 0);
28830 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28831
28832 return ret;
28833@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc)
28834 return -EINVAL;
28835
28836 spin_lock_irqsave(&tty_ldisc_lock, flags);
28837- if (tty_ldiscs[disc]->refcount)
28838+ if (atomic_read(&tty_ldiscs[disc]->refcount))
28839 ret = -EBUSY;
28840 else
28841 tty_ldiscs[disc] = NULL;
28842@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i
28843 if (ldops) {
28844 ret = ERR_PTR(-EAGAIN);
28845 if (try_module_get(ldops->owner)) {
28846- ldops->refcount++;
28847+ atomic_inc(&ldops->refcount);
28848 ret = ldops;
28849 }
28850 }
28851@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o
28852 unsigned long flags;
28853
28854 spin_lock_irqsave(&tty_ldisc_lock, flags);
28855- ldops->refcount--;
28856+ atomic_dec(&ldops->refcount);
28857 module_put(ldops->owner);
28858 spin_unlock_irqrestore(&tty_ldisc_lock, flags);
28859 }
28860diff -urNp linux-2.6.32.43/drivers/char/virtio_console.c linux-2.6.32.43/drivers/char/virtio_console.c
28861--- linux-2.6.32.43/drivers/char/virtio_console.c 2011-03-27 14:31:47.000000000 -0400
28862+++ linux-2.6.32.43/drivers/char/virtio_console.c 2011-08-05 20:33:55.000000000 -0400
28863@@ -133,7 +133,9 @@ static int get_chars(u32 vtermno, char *
28864 * virtqueue, so we let the drivers do some boutique early-output thing. */
28865 int __init virtio_cons_early_init(int (*put_chars)(u32, const char *, int))
28866 {
28867- virtio_cons.put_chars = put_chars;
28868+ pax_open_kernel();
28869+ *(void **)&virtio_cons.put_chars = put_chars;
28870+ pax_close_kernel();
28871 return hvc_instantiate(0, 0, &virtio_cons);
28872 }
28873
28874@@ -213,11 +215,13 @@ static int __devinit virtcons_probe(stru
28875 out_vq = vqs[1];
28876
28877 /* Start using the new console output. */
28878- virtio_cons.get_chars = get_chars;
28879- virtio_cons.put_chars = put_chars;
28880- virtio_cons.notifier_add = notifier_add_vio;
28881- virtio_cons.notifier_del = notifier_del_vio;
28882- virtio_cons.notifier_hangup = notifier_del_vio;
28883+ pax_open_kernel();
28884+ *(void **)&virtio_cons.get_chars = get_chars;
28885+ *(void **)&virtio_cons.put_chars = put_chars;
28886+ *(void **)&virtio_cons.notifier_add = notifier_add_vio;
28887+ *(void **)&virtio_cons.notifier_del = notifier_del_vio;
28888+ *(void **)&virtio_cons.notifier_hangup = notifier_del_vio;
28889+ pax_close_kernel();
28890
28891 /* The first argument of hvc_alloc() is the virtual console number, so
28892 * we use zero. The second argument is the parameter for the
28893diff -urNp linux-2.6.32.43/drivers/char/vt.c linux-2.6.32.43/drivers/char/vt.c
28894--- linux-2.6.32.43/drivers/char/vt.c 2011-03-27 14:31:47.000000000 -0400
28895+++ linux-2.6.32.43/drivers/char/vt.c 2011-04-17 15:56:46.000000000 -0400
28896@@ -243,7 +243,7 @@ EXPORT_SYMBOL_GPL(unregister_vt_notifier
28897
28898 static void notify_write(struct vc_data *vc, unsigned int unicode)
28899 {
28900- struct vt_notifier_param param = { .vc = vc, unicode = unicode };
28901+ struct vt_notifier_param param = { .vc = vc, .c = unicode };
28902 atomic_notifier_call_chain(&vt_notifier_list, VT_WRITE, &param);
28903 }
28904
28905diff -urNp linux-2.6.32.43/drivers/char/vt_ioctl.c linux-2.6.32.43/drivers/char/vt_ioctl.c
28906--- linux-2.6.32.43/drivers/char/vt_ioctl.c 2011-03-27 14:31:47.000000000 -0400
28907+++ linux-2.6.32.43/drivers/char/vt_ioctl.c 2011-04-17 15:56:46.000000000 -0400
28908@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28909 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
28910 return -EFAULT;
28911
28912- if (!capable(CAP_SYS_TTY_CONFIG))
28913- perm = 0;
28914-
28915 switch (cmd) {
28916 case KDGKBENT:
28917 key_map = key_maps[s];
28918@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __
28919 val = (i ? K_HOLE : K_NOSUCHMAP);
28920 return put_user(val, &user_kbe->kb_value);
28921 case KDSKBENT:
28922+ if (!capable(CAP_SYS_TTY_CONFIG))
28923+ perm = 0;
28924+
28925 if (!perm)
28926 return -EPERM;
28927+
28928 if (!i && v == K_NOSUCHMAP) {
28929 /* deallocate map */
28930 key_map = key_maps[s];
28931@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28932 int i, j, k;
28933 int ret;
28934
28935- if (!capable(CAP_SYS_TTY_CONFIG))
28936- perm = 0;
28937-
28938 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
28939 if (!kbs) {
28940 ret = -ENOMEM;
28941@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry
28942 kfree(kbs);
28943 return ((p && *p) ? -EOVERFLOW : 0);
28944 case KDSKBSENT:
28945+ if (!capable(CAP_SYS_TTY_CONFIG))
28946+ perm = 0;
28947+
28948 if (!perm) {
28949 ret = -EPERM;
28950 goto reterr;
28951diff -urNp linux-2.6.32.43/drivers/cpufreq/cpufreq.c linux-2.6.32.43/drivers/cpufreq/cpufreq.c
28952--- linux-2.6.32.43/drivers/cpufreq/cpufreq.c 2011-06-25 12:55:34.000000000 -0400
28953+++ linux-2.6.32.43/drivers/cpufreq/cpufreq.c 2011-06-25 12:56:37.000000000 -0400
28954@@ -750,7 +750,7 @@ static void cpufreq_sysfs_release(struct
28955 complete(&policy->kobj_unregister);
28956 }
28957
28958-static struct sysfs_ops sysfs_ops = {
28959+static const struct sysfs_ops sysfs_ops = {
28960 .show = show,
28961 .store = store,
28962 };
28963diff -urNp linux-2.6.32.43/drivers/cpuidle/sysfs.c linux-2.6.32.43/drivers/cpuidle/sysfs.c
28964--- linux-2.6.32.43/drivers/cpuidle/sysfs.c 2011-03-27 14:31:47.000000000 -0400
28965+++ linux-2.6.32.43/drivers/cpuidle/sysfs.c 2011-04-17 15:56:46.000000000 -0400
28966@@ -191,7 +191,7 @@ static ssize_t cpuidle_store(struct kobj
28967 return ret;
28968 }
28969
28970-static struct sysfs_ops cpuidle_sysfs_ops = {
28971+static const struct sysfs_ops cpuidle_sysfs_ops = {
28972 .show = cpuidle_show,
28973 .store = cpuidle_store,
28974 };
28975@@ -277,7 +277,7 @@ static ssize_t cpuidle_state_show(struct
28976 return ret;
28977 }
28978
28979-static struct sysfs_ops cpuidle_state_sysfs_ops = {
28980+static const struct sysfs_ops cpuidle_state_sysfs_ops = {
28981 .show = cpuidle_state_show,
28982 };
28983
28984@@ -294,7 +294,7 @@ static struct kobj_type ktype_state_cpui
28985 .release = cpuidle_state_sysfs_release,
28986 };
28987
28988-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28989+static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i)
28990 {
28991 kobject_put(&device->kobjs[i]->kobj);
28992 wait_for_completion(&device->kobjs[i]->kobj_unregister);
28993diff -urNp linux-2.6.32.43/drivers/crypto/hifn_795x.c linux-2.6.32.43/drivers/crypto/hifn_795x.c
28994--- linux-2.6.32.43/drivers/crypto/hifn_795x.c 2011-03-27 14:31:47.000000000 -0400
28995+++ linux-2.6.32.43/drivers/crypto/hifn_795x.c 2011-05-16 21:46:57.000000000 -0400
28996@@ -1655,6 +1655,8 @@ static int hifn_test(struct hifn_device
28997 0xCA, 0x34, 0x2B, 0x2E};
28998 struct scatterlist sg;
28999
29000+ pax_track_stack();
29001+
29002 memset(src, 0, sizeof(src));
29003 memset(ctx.key, 0, sizeof(ctx.key));
29004
29005diff -urNp linux-2.6.32.43/drivers/crypto/padlock-aes.c linux-2.6.32.43/drivers/crypto/padlock-aes.c
29006--- linux-2.6.32.43/drivers/crypto/padlock-aes.c 2011-03-27 14:31:47.000000000 -0400
29007+++ linux-2.6.32.43/drivers/crypto/padlock-aes.c 2011-05-16 21:46:57.000000000 -0400
29008@@ -108,6 +108,8 @@ static int aes_set_key(struct crypto_tfm
29009 struct crypto_aes_ctx gen_aes;
29010 int cpu;
29011
29012+ pax_track_stack();
29013+
29014 if (key_len % 8) {
29015 *flags |= CRYPTO_TFM_RES_BAD_KEY_LEN;
29016 return -EINVAL;
29017diff -urNp linux-2.6.32.43/drivers/dma/ioat/dma.c linux-2.6.32.43/drivers/dma/ioat/dma.c
29018--- linux-2.6.32.43/drivers/dma/ioat/dma.c 2011-03-27 14:31:47.000000000 -0400
29019+++ linux-2.6.32.43/drivers/dma/ioat/dma.c 2011-04-17 15:56:46.000000000 -0400
29020@@ -1146,7 +1146,7 @@ ioat_attr_show(struct kobject *kobj, str
29021 return entry->show(&chan->common, page);
29022 }
29023
29024-struct sysfs_ops ioat_sysfs_ops = {
29025+const struct sysfs_ops ioat_sysfs_ops = {
29026 .show = ioat_attr_show,
29027 };
29028
29029diff -urNp linux-2.6.32.43/drivers/dma/ioat/dma.h linux-2.6.32.43/drivers/dma/ioat/dma.h
29030--- linux-2.6.32.43/drivers/dma/ioat/dma.h 2011-03-27 14:31:47.000000000 -0400
29031+++ linux-2.6.32.43/drivers/dma/ioat/dma.h 2011-04-17 15:56:46.000000000 -0400
29032@@ -347,7 +347,7 @@ bool ioat_cleanup_preamble(struct ioat_c
29033 unsigned long *phys_complete);
29034 void ioat_kobject_add(struct ioatdma_device *device, struct kobj_type *type);
29035 void ioat_kobject_del(struct ioatdma_device *device);
29036-extern struct sysfs_ops ioat_sysfs_ops;
29037+extern const struct sysfs_ops ioat_sysfs_ops;
29038 extern struct ioat_sysfs_entry ioat_version_attr;
29039 extern struct ioat_sysfs_entry ioat_cap_attr;
29040 #endif /* IOATDMA_H */
29041diff -urNp linux-2.6.32.43/drivers/edac/edac_device_sysfs.c linux-2.6.32.43/drivers/edac/edac_device_sysfs.c
29042--- linux-2.6.32.43/drivers/edac/edac_device_sysfs.c 2011-03-27 14:31:47.000000000 -0400
29043+++ linux-2.6.32.43/drivers/edac/edac_device_sysfs.c 2011-04-17 15:56:46.000000000 -0400
29044@@ -137,7 +137,7 @@ static ssize_t edac_dev_ctl_info_store(s
29045 }
29046
29047 /* edac_dev file operations for an 'ctl_info' */
29048-static struct sysfs_ops device_ctl_info_ops = {
29049+static const struct sysfs_ops device_ctl_info_ops = {
29050 .show = edac_dev_ctl_info_show,
29051 .store = edac_dev_ctl_info_store
29052 };
29053@@ -373,7 +373,7 @@ static ssize_t edac_dev_instance_store(s
29054 }
29055
29056 /* edac_dev file operations for an 'instance' */
29057-static struct sysfs_ops device_instance_ops = {
29058+static const struct sysfs_ops device_instance_ops = {
29059 .show = edac_dev_instance_show,
29060 .store = edac_dev_instance_store
29061 };
29062@@ -476,7 +476,7 @@ static ssize_t edac_dev_block_store(stru
29063 }
29064
29065 /* edac_dev file operations for a 'block' */
29066-static struct sysfs_ops device_block_ops = {
29067+static const struct sysfs_ops device_block_ops = {
29068 .show = edac_dev_block_show,
29069 .store = edac_dev_block_store
29070 };
29071diff -urNp linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c
29072--- linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c 2011-03-27 14:31:47.000000000 -0400
29073+++ linux-2.6.32.43/drivers/edac/edac_mc_sysfs.c 2011-04-17 15:56:46.000000000 -0400
29074@@ -245,7 +245,7 @@ static ssize_t csrowdev_store(struct kob
29075 return -EIO;
29076 }
29077
29078-static struct sysfs_ops csrowfs_ops = {
29079+static const struct sysfs_ops csrowfs_ops = {
29080 .show = csrowdev_show,
29081 .store = csrowdev_store
29082 };
29083@@ -575,7 +575,7 @@ static ssize_t mcidev_store(struct kobje
29084 }
29085
29086 /* Intermediate show/store table */
29087-static struct sysfs_ops mci_ops = {
29088+static const struct sysfs_ops mci_ops = {
29089 .show = mcidev_show,
29090 .store = mcidev_store
29091 };
29092diff -urNp linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c
29093--- linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c 2011-03-27 14:31:47.000000000 -0400
29094+++ linux-2.6.32.43/drivers/edac/edac_pci_sysfs.c 2011-05-04 17:56:20.000000000 -0400
29095@@ -25,8 +25,8 @@ static int edac_pci_log_pe = 1; /* log
29096 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
29097 static int edac_pci_poll_msec = 1000; /* one second workq period */
29098
29099-static atomic_t pci_parity_count = ATOMIC_INIT(0);
29100-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
29101+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
29102+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
29103
29104 static struct kobject *edac_pci_top_main_kobj;
29105 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
29106@@ -121,7 +121,7 @@ static ssize_t edac_pci_instance_store(s
29107 }
29108
29109 /* fs_ops table */
29110-static struct sysfs_ops pci_instance_ops = {
29111+static const struct sysfs_ops pci_instance_ops = {
29112 .show = edac_pci_instance_show,
29113 .store = edac_pci_instance_store
29114 };
29115@@ -261,7 +261,7 @@ static ssize_t edac_pci_dev_store(struct
29116 return -EIO;
29117 }
29118
29119-static struct sysfs_ops edac_pci_sysfs_ops = {
29120+static const struct sysfs_ops edac_pci_sysfs_ops = {
29121 .show = edac_pci_dev_show,
29122 .store = edac_pci_dev_store
29123 };
29124@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(str
29125 edac_printk(KERN_CRIT, EDAC_PCI,
29126 "Signaled System Error on %s\n",
29127 pci_name(dev));
29128- atomic_inc(&pci_nonparity_count);
29129+ atomic_inc_unchecked(&pci_nonparity_count);
29130 }
29131
29132 if (status & (PCI_STATUS_PARITY)) {
29133@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(str
29134 "Master Data Parity Error on %s\n",
29135 pci_name(dev));
29136
29137- atomic_inc(&pci_parity_count);
29138+ atomic_inc_unchecked(&pci_parity_count);
29139 }
29140
29141 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29142@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(str
29143 "Detected Parity Error on %s\n",
29144 pci_name(dev));
29145
29146- atomic_inc(&pci_parity_count);
29147+ atomic_inc_unchecked(&pci_parity_count);
29148 }
29149 }
29150
29151@@ -616,7 +616,7 @@ static void edac_pci_dev_parity_test(str
29152 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
29153 "Signaled System Error on %s\n",
29154 pci_name(dev));
29155- atomic_inc(&pci_nonparity_count);
29156+ atomic_inc_unchecked(&pci_nonparity_count);
29157 }
29158
29159 if (status & (PCI_STATUS_PARITY)) {
29160@@ -624,7 +624,7 @@ static void edac_pci_dev_parity_test(str
29161 "Master Data Parity Error on "
29162 "%s\n", pci_name(dev));
29163
29164- atomic_inc(&pci_parity_count);
29165+ atomic_inc_unchecked(&pci_parity_count);
29166 }
29167
29168 if (status & (PCI_STATUS_DETECTED_PARITY)) {
29169@@ -632,7 +632,7 @@ static void edac_pci_dev_parity_test(str
29170 "Detected Parity Error on %s\n",
29171 pci_name(dev));
29172
29173- atomic_inc(&pci_parity_count);
29174+ atomic_inc_unchecked(&pci_parity_count);
29175 }
29176 }
29177 }
29178@@ -674,7 +674,7 @@ void edac_pci_do_parity_check(void)
29179 if (!check_pci_errors)
29180 return;
29181
29182- before_count = atomic_read(&pci_parity_count);
29183+ before_count = atomic_read_unchecked(&pci_parity_count);
29184
29185 /* scan all PCI devices looking for a Parity Error on devices and
29186 * bridges.
29187@@ -686,7 +686,7 @@ void edac_pci_do_parity_check(void)
29188 /* Only if operator has selected panic on PCI Error */
29189 if (edac_pci_get_panic_on_pe()) {
29190 /* If the count is different 'after' from 'before' */
29191- if (before_count != atomic_read(&pci_parity_count))
29192+ if (before_count != atomic_read_unchecked(&pci_parity_count))
29193 panic("EDAC: PCI Parity Error");
29194 }
29195 }
29196diff -urNp linux-2.6.32.43/drivers/firewire/core-card.c linux-2.6.32.43/drivers/firewire/core-card.c
29197--- linux-2.6.32.43/drivers/firewire/core-card.c 2011-03-27 14:31:47.000000000 -0400
29198+++ linux-2.6.32.43/drivers/firewire/core-card.c 2011-08-05 20:33:55.000000000 -0400
29199@@ -569,8 +569,10 @@ void fw_core_remove_card(struct fw_card
29200 mutex_unlock(&card_mutex);
29201
29202 /* Switch off most of the card driver interface. */
29203- dummy_driver.free_iso_context = card->driver->free_iso_context;
29204- dummy_driver.stop_iso = card->driver->stop_iso;
29205+ pax_open_kernel();
29206+ *(void **)&dummy_driver.free_iso_context = card->driver->free_iso_context;
29207+ *(void **)&dummy_driver.stop_iso = card->driver->stop_iso;
29208+ pax_close_kernel();
29209 card->driver = &dummy_driver;
29210
29211 fw_destroy_nodes(card);
29212diff -urNp linux-2.6.32.43/drivers/firewire/core-cdev.c linux-2.6.32.43/drivers/firewire/core-cdev.c
29213--- linux-2.6.32.43/drivers/firewire/core-cdev.c 2011-03-27 14:31:47.000000000 -0400
29214+++ linux-2.6.32.43/drivers/firewire/core-cdev.c 2011-04-17 15:56:46.000000000 -0400
29215@@ -1141,8 +1141,7 @@ static int init_iso_resource(struct clie
29216 int ret;
29217
29218 if ((request->channels == 0 && request->bandwidth == 0) ||
29219- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL ||
29220- request->bandwidth < 0)
29221+ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL)
29222 return -EINVAL;
29223
29224 r = kmalloc(sizeof(*r), GFP_KERNEL);
29225diff -urNp linux-2.6.32.43/drivers/firewire/core-transaction.c linux-2.6.32.43/drivers/firewire/core-transaction.c
29226--- linux-2.6.32.43/drivers/firewire/core-transaction.c 2011-03-27 14:31:47.000000000 -0400
29227+++ linux-2.6.32.43/drivers/firewire/core-transaction.c 2011-05-16 21:46:57.000000000 -0400
29228@@ -36,6 +36,7 @@
29229 #include <linux/string.h>
29230 #include <linux/timer.h>
29231 #include <linux/types.h>
29232+#include <linux/sched.h>
29233
29234 #include <asm/byteorder.h>
29235
29236@@ -344,6 +345,8 @@ int fw_run_transaction(struct fw_card *c
29237 struct transaction_callback_data d;
29238 struct fw_transaction t;
29239
29240+ pax_track_stack();
29241+
29242 init_completion(&d.done);
29243 d.payload = payload;
29244 fw_send_request(card, &t, tcode, destination_id, generation, speed,
29245diff -urNp linux-2.6.32.43/drivers/firmware/dmi_scan.c linux-2.6.32.43/drivers/firmware/dmi_scan.c
29246--- linux-2.6.32.43/drivers/firmware/dmi_scan.c 2011-03-27 14:31:47.000000000 -0400
29247+++ linux-2.6.32.43/drivers/firmware/dmi_scan.c 2011-04-17 15:56:46.000000000 -0400
29248@@ -391,11 +391,6 @@ void __init dmi_scan_machine(void)
29249 }
29250 }
29251 else {
29252- /*
29253- * no iounmap() for that ioremap(); it would be a no-op, but
29254- * it's so early in setup that sucker gets confused into doing
29255- * what it shouldn't if we actually call it.
29256- */
29257 p = dmi_ioremap(0xF0000, 0x10000);
29258 if (p == NULL)
29259 goto error;
29260diff -urNp linux-2.6.32.43/drivers/firmware/edd.c linux-2.6.32.43/drivers/firmware/edd.c
29261--- linux-2.6.32.43/drivers/firmware/edd.c 2011-03-27 14:31:47.000000000 -0400
29262+++ linux-2.6.32.43/drivers/firmware/edd.c 2011-04-17 15:56:46.000000000 -0400
29263@@ -122,7 +122,7 @@ edd_attr_show(struct kobject * kobj, str
29264 return ret;
29265 }
29266
29267-static struct sysfs_ops edd_attr_ops = {
29268+static const struct sysfs_ops edd_attr_ops = {
29269 .show = edd_attr_show,
29270 };
29271
29272diff -urNp linux-2.6.32.43/drivers/firmware/efivars.c linux-2.6.32.43/drivers/firmware/efivars.c
29273--- linux-2.6.32.43/drivers/firmware/efivars.c 2011-03-27 14:31:47.000000000 -0400
29274+++ linux-2.6.32.43/drivers/firmware/efivars.c 2011-04-17 15:56:46.000000000 -0400
29275@@ -362,7 +362,7 @@ static ssize_t efivar_attr_store(struct
29276 return ret;
29277 }
29278
29279-static struct sysfs_ops efivar_attr_ops = {
29280+static const struct sysfs_ops efivar_attr_ops = {
29281 .show = efivar_attr_show,
29282 .store = efivar_attr_store,
29283 };
29284diff -urNp linux-2.6.32.43/drivers/firmware/iscsi_ibft.c linux-2.6.32.43/drivers/firmware/iscsi_ibft.c
29285--- linux-2.6.32.43/drivers/firmware/iscsi_ibft.c 2011-03-27 14:31:47.000000000 -0400
29286+++ linux-2.6.32.43/drivers/firmware/iscsi_ibft.c 2011-04-17 15:56:46.000000000 -0400
29287@@ -525,7 +525,7 @@ static ssize_t ibft_show_attribute(struc
29288 return ret;
29289 }
29290
29291-static struct sysfs_ops ibft_attr_ops = {
29292+static const struct sysfs_ops ibft_attr_ops = {
29293 .show = ibft_show_attribute,
29294 };
29295
29296diff -urNp linux-2.6.32.43/drivers/firmware/memmap.c linux-2.6.32.43/drivers/firmware/memmap.c
29297--- linux-2.6.32.43/drivers/firmware/memmap.c 2011-03-27 14:31:47.000000000 -0400
29298+++ linux-2.6.32.43/drivers/firmware/memmap.c 2011-04-17 15:56:46.000000000 -0400
29299@@ -74,7 +74,7 @@ static struct attribute *def_attrs[] = {
29300 NULL
29301 };
29302
29303-static struct sysfs_ops memmap_attr_ops = {
29304+static const struct sysfs_ops memmap_attr_ops = {
29305 .show = memmap_attr_show,
29306 };
29307
29308diff -urNp linux-2.6.32.43/drivers/gpio/vr41xx_giu.c linux-2.6.32.43/drivers/gpio/vr41xx_giu.c
29309--- linux-2.6.32.43/drivers/gpio/vr41xx_giu.c 2011-03-27 14:31:47.000000000 -0400
29310+++ linux-2.6.32.43/drivers/gpio/vr41xx_giu.c 2011-05-04 17:56:28.000000000 -0400
29311@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
29312 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
29313 maskl, pendl, maskh, pendh);
29314
29315- atomic_inc(&irq_err_count);
29316+ atomic_inc_unchecked(&irq_err_count);
29317
29318 return -EINVAL;
29319 }
29320diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c
29321--- linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c 2011-03-27 14:31:47.000000000 -0400
29322+++ linux-2.6.32.43/drivers/gpu/drm/drm_crtc_helper.c 2011-05-16 21:46:57.000000000 -0400
29323@@ -573,7 +573,7 @@ static bool drm_encoder_crtc_ok(struct d
29324 struct drm_crtc *tmp;
29325 int crtc_mask = 1;
29326
29327- WARN(!crtc, "checking null crtc?");
29328+ BUG_ON(!crtc);
29329
29330 dev = crtc->dev;
29331
29332@@ -642,6 +642,8 @@ bool drm_crtc_helper_set_mode(struct drm
29333
29334 adjusted_mode = drm_mode_duplicate(dev, mode);
29335
29336+ pax_track_stack();
29337+
29338 crtc->enabled = drm_helper_crtc_in_use(crtc);
29339
29340 if (!crtc->enabled)
29341diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_drv.c linux-2.6.32.43/drivers/gpu/drm/drm_drv.c
29342--- linux-2.6.32.43/drivers/gpu/drm/drm_drv.c 2011-03-27 14:31:47.000000000 -0400
29343+++ linux-2.6.32.43/drivers/gpu/drm/drm_drv.c 2011-04-17 15:56:46.000000000 -0400
29344@@ -417,7 +417,7 @@ int drm_ioctl(struct inode *inode, struc
29345 char *kdata = NULL;
29346
29347 atomic_inc(&dev->ioctl_count);
29348- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
29349+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
29350 ++file_priv->ioctl_count;
29351
29352 DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n",
29353diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_fops.c linux-2.6.32.43/drivers/gpu/drm/drm_fops.c
29354--- linux-2.6.32.43/drivers/gpu/drm/drm_fops.c 2011-03-27 14:31:47.000000000 -0400
29355+++ linux-2.6.32.43/drivers/gpu/drm/drm_fops.c 2011-04-17 15:56:46.000000000 -0400
29356@@ -66,7 +66,7 @@ static int drm_setup(struct drm_device *
29357 }
29358
29359 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
29360- atomic_set(&dev->counts[i], 0);
29361+ atomic_set_unchecked(&dev->counts[i], 0);
29362
29363 dev->sigdata.lock = NULL;
29364
29365@@ -130,9 +130,9 @@ int drm_open(struct inode *inode, struct
29366
29367 retcode = drm_open_helper(inode, filp, dev);
29368 if (!retcode) {
29369- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
29370+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
29371 spin_lock(&dev->count_lock);
29372- if (!dev->open_count++) {
29373+ if (local_inc_return(&dev->open_count) == 1) {
29374 spin_unlock(&dev->count_lock);
29375 retcode = drm_setup(dev);
29376 goto out;
29377@@ -435,7 +435,7 @@ int drm_release(struct inode *inode, str
29378
29379 lock_kernel();
29380
29381- DRM_DEBUG("open_count = %d\n", dev->open_count);
29382+ DRM_DEBUG("open_count = %d\n", local_read(&dev->open_count));
29383
29384 if (dev->driver->preclose)
29385 dev->driver->preclose(dev, file_priv);
29386@@ -447,7 +447,7 @@ int drm_release(struct inode *inode, str
29387 DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
29388 task_pid_nr(current),
29389 (long)old_encode_dev(file_priv->minor->device),
29390- dev->open_count);
29391+ local_read(&dev->open_count));
29392
29393 /* if the master has gone away we can't do anything with the lock */
29394 if (file_priv->minor->master)
29395@@ -524,9 +524,9 @@ int drm_release(struct inode *inode, str
29396 * End inline drm_release
29397 */
29398
29399- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
29400+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
29401 spin_lock(&dev->count_lock);
29402- if (!--dev->open_count) {
29403+ if (local_dec_and_test(&dev->open_count)) {
29404 if (atomic_read(&dev->ioctl_count)) {
29405 DRM_ERROR("Device busy: %d\n",
29406 atomic_read(&dev->ioctl_count));
29407diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_gem.c linux-2.6.32.43/drivers/gpu/drm/drm_gem.c
29408--- linux-2.6.32.43/drivers/gpu/drm/drm_gem.c 2011-03-27 14:31:47.000000000 -0400
29409+++ linux-2.6.32.43/drivers/gpu/drm/drm_gem.c 2011-04-17 15:56:46.000000000 -0400
29410@@ -83,11 +83,11 @@ drm_gem_init(struct drm_device *dev)
29411 spin_lock_init(&dev->object_name_lock);
29412 idr_init(&dev->object_name_idr);
29413 atomic_set(&dev->object_count, 0);
29414- atomic_set(&dev->object_memory, 0);
29415+ atomic_set_unchecked(&dev->object_memory, 0);
29416 atomic_set(&dev->pin_count, 0);
29417- atomic_set(&dev->pin_memory, 0);
29418+ atomic_set_unchecked(&dev->pin_memory, 0);
29419 atomic_set(&dev->gtt_count, 0);
29420- atomic_set(&dev->gtt_memory, 0);
29421+ atomic_set_unchecked(&dev->gtt_memory, 0);
29422
29423 mm = kzalloc(sizeof(struct drm_gem_mm), GFP_KERNEL);
29424 if (!mm) {
29425@@ -150,7 +150,7 @@ drm_gem_object_alloc(struct drm_device *
29426 goto fput;
29427 }
29428 atomic_inc(&dev->object_count);
29429- atomic_add(obj->size, &dev->object_memory);
29430+ atomic_add_unchecked(obj->size, &dev->object_memory);
29431 return obj;
29432 fput:
29433 fput(obj->filp);
29434@@ -429,7 +429,7 @@ drm_gem_object_free(struct kref *kref)
29435
29436 fput(obj->filp);
29437 atomic_dec(&dev->object_count);
29438- atomic_sub(obj->size, &dev->object_memory);
29439+ atomic_sub_unchecked(obj->size, &dev->object_memory);
29440 kfree(obj);
29441 }
29442 EXPORT_SYMBOL(drm_gem_object_free);
29443diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_info.c linux-2.6.32.43/drivers/gpu/drm/drm_info.c
29444--- linux-2.6.32.43/drivers/gpu/drm/drm_info.c 2011-03-27 14:31:47.000000000 -0400
29445+++ linux-2.6.32.43/drivers/gpu/drm/drm_info.c 2011-04-17 15:56:46.000000000 -0400
29446@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void
29447 struct drm_local_map *map;
29448 struct drm_map_list *r_list;
29449
29450- /* Hardcoded from _DRM_FRAME_BUFFER,
29451- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
29452- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
29453- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
29454+ static const char * const types[] = {
29455+ [_DRM_FRAME_BUFFER] = "FB",
29456+ [_DRM_REGISTERS] = "REG",
29457+ [_DRM_SHM] = "SHM",
29458+ [_DRM_AGP] = "AGP",
29459+ [_DRM_SCATTER_GATHER] = "SG",
29460+ [_DRM_CONSISTENT] = "PCI",
29461+ [_DRM_GEM] = "GEM" };
29462 const char *type;
29463 int i;
29464
29465@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void
29466 map = r_list->map;
29467 if (!map)
29468 continue;
29469- if (map->type < 0 || map->type > 5)
29470+ if (map->type >= ARRAY_SIZE(types))
29471 type = "??";
29472 else
29473 type = types[map->type];
29474@@ -265,10 +269,10 @@ int drm_gem_object_info(struct seq_file
29475 struct drm_device *dev = node->minor->dev;
29476
29477 seq_printf(m, "%d objects\n", atomic_read(&dev->object_count));
29478- seq_printf(m, "%d object bytes\n", atomic_read(&dev->object_memory));
29479+ seq_printf(m, "%d object bytes\n", atomic_read_unchecked(&dev->object_memory));
29480 seq_printf(m, "%d pinned\n", atomic_read(&dev->pin_count));
29481- seq_printf(m, "%d pin bytes\n", atomic_read(&dev->pin_memory));
29482- seq_printf(m, "%d gtt bytes\n", atomic_read(&dev->gtt_memory));
29483+ seq_printf(m, "%d pin bytes\n", atomic_read_unchecked(&dev->pin_memory));
29484+ seq_printf(m, "%d gtt bytes\n", atomic_read_unchecked(&dev->gtt_memory));
29485 seq_printf(m, "%d gtt total\n", dev->gtt_total);
29486 return 0;
29487 }
29488@@ -288,7 +292,11 @@ int drm_vma_info(struct seq_file *m, voi
29489 mutex_lock(&dev->struct_mutex);
29490 seq_printf(m, "vma use count: %d, high_memory = %p, 0x%08llx\n",
29491 atomic_read(&dev->vma_count),
29492+#ifdef CONFIG_GRKERNSEC_HIDESYM
29493+ NULL, 0);
29494+#else
29495 high_memory, (u64)virt_to_phys(high_memory));
29496+#endif
29497
29498 list_for_each_entry(pt, &dev->vmalist, head) {
29499 vma = pt->vma;
29500@@ -296,14 +304,23 @@ int drm_vma_info(struct seq_file *m, voi
29501 continue;
29502 seq_printf(m,
29503 "\n%5d 0x%08lx-0x%08lx %c%c%c%c%c%c 0x%08lx000",
29504- pt->pid, vma->vm_start, vma->vm_end,
29505+ pt->pid,
29506+#ifdef CONFIG_GRKERNSEC_HIDESYM
29507+ 0, 0,
29508+#else
29509+ vma->vm_start, vma->vm_end,
29510+#endif
29511 vma->vm_flags & VM_READ ? 'r' : '-',
29512 vma->vm_flags & VM_WRITE ? 'w' : '-',
29513 vma->vm_flags & VM_EXEC ? 'x' : '-',
29514 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
29515 vma->vm_flags & VM_LOCKED ? 'l' : '-',
29516 vma->vm_flags & VM_IO ? 'i' : '-',
29517+#ifdef CONFIG_GRKERNSEC_HIDESYM
29518+ 0);
29519+#else
29520 vma->vm_pgoff);
29521+#endif
29522
29523 #if defined(__i386__)
29524 pgprot = pgprot_val(vma->vm_page_prot);
29525diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c
29526--- linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c 2011-03-27 14:31:47.000000000 -0400
29527+++ linux-2.6.32.43/drivers/gpu/drm/drm_ioctl.c 2011-04-17 15:56:46.000000000 -0400
29528@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev,
29529 stats->data[i].value =
29530 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
29531 else
29532- stats->data[i].value = atomic_read(&dev->counts[i]);
29533+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
29534 stats->data[i].type = dev->types[i];
29535 }
29536
29537diff -urNp linux-2.6.32.43/drivers/gpu/drm/drm_lock.c linux-2.6.32.43/drivers/gpu/drm/drm_lock.c
29538--- linux-2.6.32.43/drivers/gpu/drm/drm_lock.c 2011-03-27 14:31:47.000000000 -0400
29539+++ linux-2.6.32.43/drivers/gpu/drm/drm_lock.c 2011-04-17 15:56:46.000000000 -0400
29540@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi
29541 if (drm_lock_take(&master->lock, lock->context)) {
29542 master->lock.file_priv = file_priv;
29543 master->lock.lock_time = jiffies;
29544- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
29545+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
29546 break; /* Got lock */
29547 }
29548
29549@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v
29550 return -EINVAL;
29551 }
29552
29553- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
29554+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
29555
29556 /* kernel_context_switch isn't used by any of the x86 drm
29557 * modules but is required by the Sparc driver.
29558diff -urNp linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c
29559--- linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c 2011-03-27 14:31:47.000000000 -0400
29560+++ linux-2.6.32.43/drivers/gpu/drm/i810/i810_dma.c 2011-04-17 15:56:46.000000000 -0400
29561@@ -952,8 +952,8 @@ static int i810_dma_vertex(struct drm_de
29562 dma->buflist[vertex->idx],
29563 vertex->discard, vertex->used);
29564
29565- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29566- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29567+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
29568+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29569 sarea_priv->last_enqueue = dev_priv->counter - 1;
29570 sarea_priv->last_dispatch = (int)hw_status[5];
29571
29572@@ -1115,8 +1115,8 @@ static int i810_dma_mc(struct drm_device
29573 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
29574 mc->last_render);
29575
29576- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29577- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
29578+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
29579+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
29580 sarea_priv->last_enqueue = dev_priv->counter - 1;
29581 sarea_priv->last_dispatch = (int)hw_status[5];
29582
29583diff -urNp linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h
29584--- linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h 2011-03-27 14:31:47.000000000 -0400
29585+++ linux-2.6.32.43/drivers/gpu/drm/i810/i810_drv.h 2011-05-04 17:56:28.000000000 -0400
29586@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
29587 int page_flipping;
29588
29589 wait_queue_head_t irq_queue;
29590- atomic_t irq_received;
29591- atomic_t irq_emitted;
29592+ atomic_unchecked_t irq_received;
29593+ atomic_unchecked_t irq_emitted;
29594
29595 int front_offset;
29596 } drm_i810_private_t;
29597diff -urNp linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h
29598--- linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h 2011-03-27 14:31:47.000000000 -0400
29599+++ linux-2.6.32.43/drivers/gpu/drm/i830/i830_drv.h 2011-05-04 17:56:28.000000000 -0400
29600@@ -115,8 +115,8 @@ typedef struct drm_i830_private {
29601 int page_flipping;
29602
29603 wait_queue_head_t irq_queue;
29604- atomic_t irq_received;
29605- atomic_t irq_emitted;
29606+ atomic_unchecked_t irq_received;
29607+ atomic_unchecked_t irq_emitted;
29608
29609 int use_mi_batchbuffer_start;
29610
29611diff -urNp linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c
29612--- linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c 2011-03-27 14:31:47.000000000 -0400
29613+++ linux-2.6.32.43/drivers/gpu/drm/i830/i830_irq.c 2011-05-04 17:56:28.000000000 -0400
29614@@ -47,7 +47,7 @@ irqreturn_t i830_driver_irq_handler(DRM_
29615
29616 I830_WRITE16(I830REG_INT_IDENTITY_R, temp);
29617
29618- atomic_inc(&dev_priv->irq_received);
29619+ atomic_inc_unchecked(&dev_priv->irq_received);
29620 wake_up_interruptible(&dev_priv->irq_queue);
29621
29622 return IRQ_HANDLED;
29623@@ -60,14 +60,14 @@ static int i830_emit_irq(struct drm_devi
29624
29625 DRM_DEBUG("%s\n", __func__);
29626
29627- atomic_inc(&dev_priv->irq_emitted);
29628+ atomic_inc_unchecked(&dev_priv->irq_emitted);
29629
29630 BEGIN_LP_RING(2);
29631 OUT_RING(0);
29632 OUT_RING(GFX_OP_USER_INTERRUPT);
29633 ADVANCE_LP_RING();
29634
29635- return atomic_read(&dev_priv->irq_emitted);
29636+ return atomic_read_unchecked(&dev_priv->irq_emitted);
29637 }
29638
29639 static int i830_wait_irq(struct drm_device * dev, int irq_nr)
29640@@ -79,7 +79,7 @@ static int i830_wait_irq(struct drm_devi
29641
29642 DRM_DEBUG("%s\n", __func__);
29643
29644- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29645+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29646 return 0;
29647
29648 dev_priv->sarea_priv->perf_boxes |= I830_BOX_WAIT;
29649@@ -88,7 +88,7 @@ static int i830_wait_irq(struct drm_devi
29650
29651 for (;;) {
29652 __set_current_state(TASK_INTERRUPTIBLE);
29653- if (atomic_read(&dev_priv->irq_received) >= irq_nr)
29654+ if (atomic_read_unchecked(&dev_priv->irq_received) >= irq_nr)
29655 break;
29656 if ((signed)(end - jiffies) <= 0) {
29657 DRM_ERROR("timeout iir %x imr %x ier %x hwstam %x\n",
29658@@ -163,8 +163,8 @@ void i830_driver_irq_preinstall(struct d
29659 I830_WRITE16(I830REG_HWSTAM, 0xffff);
29660 I830_WRITE16(I830REG_INT_MASK_R, 0x0);
29661 I830_WRITE16(I830REG_INT_ENABLE_R, 0x0);
29662- atomic_set(&dev_priv->irq_received, 0);
29663- atomic_set(&dev_priv->irq_emitted, 0);
29664+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29665+ atomic_set_unchecked(&dev_priv->irq_emitted, 0);
29666 init_waitqueue_head(&dev_priv->irq_queue);
29667 }
29668
29669diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c
29670--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c 2011-03-27 14:31:47.000000000 -0400
29671+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7017.c 2011-04-17 15:56:46.000000000 -0400
29672@@ -443,7 +443,7 @@ static void ch7017_destroy(struct intel_
29673 }
29674 }
29675
29676-struct intel_dvo_dev_ops ch7017_ops = {
29677+const struct intel_dvo_dev_ops ch7017_ops = {
29678 .init = ch7017_init,
29679 .detect = ch7017_detect,
29680 .mode_valid = ch7017_mode_valid,
29681diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c
29682--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-03-27 14:31:47.000000000 -0400
29683+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ch7xxx.c 2011-04-17 15:56:46.000000000 -0400
29684@@ -356,7 +356,7 @@ static void ch7xxx_destroy(struct intel_
29685 }
29686 }
29687
29688-struct intel_dvo_dev_ops ch7xxx_ops = {
29689+const struct intel_dvo_dev_ops ch7xxx_ops = {
29690 .init = ch7xxx_init,
29691 .detect = ch7xxx_detect,
29692 .mode_valid = ch7xxx_mode_valid,
29693diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h
29694--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h 2011-03-27 14:31:47.000000000 -0400
29695+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo.h 2011-04-17 15:56:46.000000000 -0400
29696@@ -135,23 +135,23 @@ struct intel_dvo_dev_ops {
29697 *
29698 * \return singly-linked list of modes or NULL if no modes found.
29699 */
29700- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo);
29701+ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo);
29702
29703 /**
29704 * Clean up driver-specific bits of the output
29705 */
29706- void (*destroy) (struct intel_dvo_device *dvo);
29707+ void (* const destroy) (struct intel_dvo_device *dvo);
29708
29709 /**
29710 * Debugging hook to dump device registers to log file
29711 */
29712- void (*dump_regs)(struct intel_dvo_device *dvo);
29713+ void (* const dump_regs)(struct intel_dvo_device *dvo);
29714 };
29715
29716-extern struct intel_dvo_dev_ops sil164_ops;
29717-extern struct intel_dvo_dev_ops ch7xxx_ops;
29718-extern struct intel_dvo_dev_ops ivch_ops;
29719-extern struct intel_dvo_dev_ops tfp410_ops;
29720-extern struct intel_dvo_dev_ops ch7017_ops;
29721+extern const struct intel_dvo_dev_ops sil164_ops;
29722+extern const struct intel_dvo_dev_ops ch7xxx_ops;
29723+extern const struct intel_dvo_dev_ops ivch_ops;
29724+extern const struct intel_dvo_dev_ops tfp410_ops;
29725+extern const struct intel_dvo_dev_ops ch7017_ops;
29726
29727 #endif /* _INTEL_DVO_H */
29728diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c
29729--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c 2011-03-27 14:31:47.000000000 -0400
29730+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_ivch.c 2011-04-17 15:56:46.000000000 -0400
29731@@ -430,7 +430,7 @@ static void ivch_destroy(struct intel_dv
29732 }
29733 }
29734
29735-struct intel_dvo_dev_ops ivch_ops= {
29736+const struct intel_dvo_dev_ops ivch_ops= {
29737 .init = ivch_init,
29738 .dpms = ivch_dpms,
29739 .save = ivch_save,
29740diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c
29741--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c 2011-03-27 14:31:47.000000000 -0400
29742+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_sil164.c 2011-04-17 15:56:46.000000000 -0400
29743@@ -290,7 +290,7 @@ static void sil164_destroy(struct intel_
29744 }
29745 }
29746
29747-struct intel_dvo_dev_ops sil164_ops = {
29748+const struct intel_dvo_dev_ops sil164_ops = {
29749 .init = sil164_init,
29750 .detect = sil164_detect,
29751 .mode_valid = sil164_mode_valid,
29752diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c
29753--- linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c 2011-03-27 14:31:47.000000000 -0400
29754+++ linux-2.6.32.43/drivers/gpu/drm/i915/dvo_tfp410.c 2011-04-17 15:56:46.000000000 -0400
29755@@ -323,7 +323,7 @@ static void tfp410_destroy(struct intel_
29756 }
29757 }
29758
29759-struct intel_dvo_dev_ops tfp410_ops = {
29760+const struct intel_dvo_dev_ops tfp410_ops = {
29761 .init = tfp410_init,
29762 .detect = tfp410_detect,
29763 .mode_valid = tfp410_mode_valid,
29764diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c
29765--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c 2011-03-27 14:31:47.000000000 -0400
29766+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_debugfs.c 2011-05-04 17:56:28.000000000 -0400
29767@@ -192,7 +192,7 @@ static int i915_interrupt_info(struct se
29768 I915_READ(GTIMR));
29769 }
29770 seq_printf(m, "Interrupts received: %d\n",
29771- atomic_read(&dev_priv->irq_received));
29772+ atomic_read_unchecked(&dev_priv->irq_received));
29773 if (dev_priv->hw_status_page != NULL) {
29774 seq_printf(m, "Current sequence: %d\n",
29775 i915_get_gem_seqno(dev));
29776diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c
29777--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c 2011-03-27 14:31:47.000000000 -0400
29778+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.c 2011-04-17 15:56:46.000000000 -0400
29779@@ -285,7 +285,7 @@ i915_pci_resume(struct pci_dev *pdev)
29780 return i915_resume(dev);
29781 }
29782
29783-static struct vm_operations_struct i915_gem_vm_ops = {
29784+static const struct vm_operations_struct i915_gem_vm_ops = {
29785 .fault = i915_gem_fault,
29786 .open = drm_gem_vm_open,
29787 .close = drm_gem_vm_close,
29788diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h
29789--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h 2011-03-27 14:31:47.000000000 -0400
29790+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_drv.h 2011-08-05 20:33:55.000000000 -0400
29791@@ -168,7 +168,7 @@ struct drm_i915_display_funcs {
29792 /* display clock increase/decrease */
29793 /* pll clock increase/decrease */
29794 /* clock gating init */
29795-};
29796+} __no_const;
29797
29798 typedef struct drm_i915_private {
29799 struct drm_device *dev;
29800@@ -197,7 +197,7 @@ typedef struct drm_i915_private {
29801 int page_flipping;
29802
29803 wait_queue_head_t irq_queue;
29804- atomic_t irq_received;
29805+ atomic_unchecked_t irq_received;
29806 /** Protects user_irq_refcount and irq_mask_reg */
29807 spinlock_t user_irq_lock;
29808 /** Refcount for i915_user_irq_get() versus i915_user_irq_put(). */
29809diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c
29810--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c 2011-03-27 14:31:47.000000000 -0400
29811+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_gem.c 2011-04-17 15:56:46.000000000 -0400
29812@@ -102,7 +102,7 @@ i915_gem_get_aperture_ioctl(struct drm_d
29813
29814 args->aper_size = dev->gtt_total;
29815 args->aper_available_size = (args->aper_size -
29816- atomic_read(&dev->pin_memory));
29817+ atomic_read_unchecked(&dev->pin_memory));
29818
29819 return 0;
29820 }
29821@@ -492,6 +492,11 @@ i915_gem_pread_ioctl(struct drm_device *
29822 return -EINVAL;
29823 }
29824
29825+ if (!access_ok(VERIFY_WRITE, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29826+ drm_gem_object_unreference(obj);
29827+ return -EFAULT;
29828+ }
29829+
29830 if (i915_gem_object_needs_bit17_swizzle(obj)) {
29831 ret = i915_gem_shmem_pread_slow(dev, obj, args, file_priv);
29832 } else {
29833@@ -965,6 +970,11 @@ i915_gem_pwrite_ioctl(struct drm_device
29834 return -EINVAL;
29835 }
29836
29837+ if (!access_ok(VERIFY_READ, (char __user *) (uintptr_t)args->data_ptr, args->size)) {
29838+ drm_gem_object_unreference(obj);
29839+ return -EFAULT;
29840+ }
29841+
29842 /* We can only do the GTT pwrite on untiled buffers, as otherwise
29843 * it would end up going through the fenced access, and we'll get
29844 * different detiling behavior between reading and writing.
29845@@ -2054,7 +2064,7 @@ i915_gem_object_unbind(struct drm_gem_ob
29846
29847 if (obj_priv->gtt_space) {
29848 atomic_dec(&dev->gtt_count);
29849- atomic_sub(obj->size, &dev->gtt_memory);
29850+ atomic_sub_unchecked(obj->size, &dev->gtt_memory);
29851
29852 drm_mm_put_block(obj_priv->gtt_space);
29853 obj_priv->gtt_space = NULL;
29854@@ -2697,7 +2707,7 @@ i915_gem_object_bind_to_gtt(struct drm_g
29855 goto search_free;
29856 }
29857 atomic_inc(&dev->gtt_count);
29858- atomic_add(obj->size, &dev->gtt_memory);
29859+ atomic_add_unchecked(obj->size, &dev->gtt_memory);
29860
29861 /* Assert that the object is not currently in any GPU domain. As it
29862 * wasn't in the GTT, there shouldn't be any way it could have been in
29863@@ -3751,9 +3761,9 @@ i915_gem_execbuffer(struct drm_device *d
29864 "%d/%d gtt bytes\n",
29865 atomic_read(&dev->object_count),
29866 atomic_read(&dev->pin_count),
29867- atomic_read(&dev->object_memory),
29868- atomic_read(&dev->pin_memory),
29869- atomic_read(&dev->gtt_memory),
29870+ atomic_read_unchecked(&dev->object_memory),
29871+ atomic_read_unchecked(&dev->pin_memory),
29872+ atomic_read_unchecked(&dev->gtt_memory),
29873 dev->gtt_total);
29874 }
29875 goto err;
29876@@ -3985,7 +3995,7 @@ i915_gem_object_pin(struct drm_gem_objec
29877 */
29878 if (obj_priv->pin_count == 1) {
29879 atomic_inc(&dev->pin_count);
29880- atomic_add(obj->size, &dev->pin_memory);
29881+ atomic_add_unchecked(obj->size, &dev->pin_memory);
29882 if (!obj_priv->active &&
29883 (obj->write_domain & I915_GEM_GPU_DOMAINS) == 0 &&
29884 !list_empty(&obj_priv->list))
29885@@ -4018,7 +4028,7 @@ i915_gem_object_unpin(struct drm_gem_obj
29886 list_move_tail(&obj_priv->list,
29887 &dev_priv->mm.inactive_list);
29888 atomic_dec(&dev->pin_count);
29889- atomic_sub(obj->size, &dev->pin_memory);
29890+ atomic_sub_unchecked(obj->size, &dev->pin_memory);
29891 }
29892 i915_verify_inactive(dev, __FILE__, __LINE__);
29893 }
29894diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c
29895--- linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c 2011-03-27 14:31:47.000000000 -0400
29896+++ linux-2.6.32.43/drivers/gpu/drm/i915/i915_irq.c 2011-05-04 17:56:28.000000000 -0400
29897@@ -528,7 +528,7 @@ irqreturn_t i915_driver_irq_handler(DRM_
29898 int irq_received;
29899 int ret = IRQ_NONE;
29900
29901- atomic_inc(&dev_priv->irq_received);
29902+ atomic_inc_unchecked(&dev_priv->irq_received);
29903
29904 if (IS_IGDNG(dev))
29905 return igdng_irq_handler(dev);
29906@@ -1021,7 +1021,7 @@ void i915_driver_irq_preinstall(struct d
29907 {
29908 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
29909
29910- atomic_set(&dev_priv->irq_received, 0);
29911+ atomic_set_unchecked(&dev_priv->irq_received, 0);
29912
29913 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func);
29914 INIT_WORK(&dev_priv->error_work, i915_error_work_func);
29915diff -urNp linux-2.6.32.43/drivers/gpu/drm/i915/intel_sdvo.c linux-2.6.32.43/drivers/gpu/drm/i915/intel_sdvo.c
29916--- linux-2.6.32.43/drivers/gpu/drm/i915/intel_sdvo.c 2011-03-27 14:31:47.000000000 -0400
29917+++ linux-2.6.32.43/drivers/gpu/drm/i915/intel_sdvo.c 2011-08-05 20:33:55.000000000 -0400
29918@@ -2795,7 +2795,9 @@ bool intel_sdvo_init(struct drm_device *
29919 sdvo_priv->slave_addr = intel_sdvo_get_slave_addr(dev, output_device);
29920
29921 /* Save the bit-banging i2c functionality for use by the DDC wrapper */
29922- intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29923+ pax_open_kernel();
29924+ *(void **)&intel_sdvo_i2c_bit_algo.functionality = intel_output->i2c_bus->algo->functionality;
29925+ pax_close_kernel();
29926
29927 /* Read the regs to test if we can talk to the device */
29928 for (i = 0; i < 0x40; i++) {
29929diff -urNp linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h
29930--- linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h 2011-03-27 14:31:47.000000000 -0400
29931+++ linux-2.6.32.43/drivers/gpu/drm/mga/mga_drv.h 2011-05-04 17:56:28.000000000 -0400
29932@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
29933 u32 clear_cmd;
29934 u32 maccess;
29935
29936- atomic_t vbl_received; /**< Number of vblanks received. */
29937+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
29938 wait_queue_head_t fence_queue;
29939- atomic_t last_fence_retired;
29940+ atomic_unchecked_t last_fence_retired;
29941 u32 next_fence_to_post;
29942
29943 unsigned int fb_cpp;
29944diff -urNp linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c
29945--- linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c 2011-03-27 14:31:47.000000000 -0400
29946+++ linux-2.6.32.43/drivers/gpu/drm/mga/mga_irq.c 2011-05-04 17:56:28.000000000 -0400
29947@@ -44,7 +44,7 @@ u32 mga_get_vblank_counter(struct drm_de
29948 if (crtc != 0)
29949 return 0;
29950
29951- return atomic_read(&dev_priv->vbl_received);
29952+ return atomic_read_unchecked(&dev_priv->vbl_received);
29953 }
29954
29955
29956@@ -60,7 +60,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29957 /* VBLANK interrupt */
29958 if (status & MGA_VLINEPEN) {
29959 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
29960- atomic_inc(&dev_priv->vbl_received);
29961+ atomic_inc_unchecked(&dev_priv->vbl_received);
29962 drm_handle_vblank(dev, 0);
29963 handled = 1;
29964 }
29965@@ -80,7 +80,7 @@ irqreturn_t mga_driver_irq_handler(DRM_I
29966 MGA_WRITE(MGA_PRIMEND, prim_end);
29967 }
29968
29969- atomic_inc(&dev_priv->last_fence_retired);
29970+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
29971 DRM_WAKEUP(&dev_priv->fence_queue);
29972 handled = 1;
29973 }
29974@@ -131,7 +131,7 @@ int mga_driver_fence_wait(struct drm_dev
29975 * using fences.
29976 */
29977 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
29978- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
29979+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
29980 - *sequence) <= (1 << 23)));
29981
29982 *sequence = cur_fence;
29983diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c
29984--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c 2011-03-27 14:31:47.000000000 -0400
29985+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_cce.c 2011-05-04 17:56:28.000000000 -0400
29986@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_d
29987
29988 /* GH: Simple idle check.
29989 */
29990- atomic_set(&dev_priv->idle_count, 0);
29991+ atomic_set_unchecked(&dev_priv->idle_count, 0);
29992
29993 /* We don't support anything other than bus-mastering ring mode,
29994 * but the ring can be in either AGP or PCI space for the ring
29995diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h
29996--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h 2011-03-27 14:31:47.000000000 -0400
29997+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_drv.h 2011-05-04 17:56:28.000000000 -0400
29998@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
29999 int is_pci;
30000 unsigned long cce_buffers_offset;
30001
30002- atomic_t idle_count;
30003+ atomic_unchecked_t idle_count;
30004
30005 int page_flipping;
30006 int current_page;
30007 u32 crtc_offset;
30008 u32 crtc_offset_cntl;
30009
30010- atomic_t vbl_received;
30011+ atomic_unchecked_t vbl_received;
30012
30013 u32 color_fmt;
30014 unsigned int front_offset;
30015diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c
30016--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c 2011-03-27 14:31:47.000000000 -0400
30017+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_irq.c 2011-05-04 17:56:28.000000000 -0400
30018@@ -42,7 +42,7 @@ u32 r128_get_vblank_counter(struct drm_d
30019 if (crtc != 0)
30020 return 0;
30021
30022- return atomic_read(&dev_priv->vbl_received);
30023+ return atomic_read_unchecked(&dev_priv->vbl_received);
30024 }
30025
30026 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
30027@@ -56,7 +56,7 @@ irqreturn_t r128_driver_irq_handler(DRM_
30028 /* VBLANK interrupt */
30029 if (status & R128_CRTC_VBLANK_INT) {
30030 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
30031- atomic_inc(&dev_priv->vbl_received);
30032+ atomic_inc_unchecked(&dev_priv->vbl_received);
30033 drm_handle_vblank(dev, 0);
30034 return IRQ_HANDLED;
30035 }
30036diff -urNp linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c
30037--- linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c 2011-03-27 14:31:47.000000000 -0400
30038+++ linux-2.6.32.43/drivers/gpu/drm/r128/r128_state.c 2011-05-04 17:56:28.000000000 -0400
30039@@ -323,10 +323,10 @@ static void r128_clear_box(drm_r128_priv
30040
30041 static void r128_cce_performance_boxes(drm_r128_private_t * dev_priv)
30042 {
30043- if (atomic_read(&dev_priv->idle_count) == 0) {
30044+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0) {
30045 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
30046 } else {
30047- atomic_set(&dev_priv->idle_count, 0);
30048+ atomic_set_unchecked(&dev_priv->idle_count, 0);
30049 }
30050 }
30051
30052diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c
30053--- linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c 2011-05-10 22:12:01.000000000 -0400
30054+++ linux-2.6.32.43/drivers/gpu/drm/radeon/atom.c 2011-05-16 21:46:57.000000000 -0400
30055@@ -1115,6 +1115,8 @@ struct atom_context *atom_parse(struct c
30056 char name[512];
30057 int i;
30058
30059+ pax_track_stack();
30060+
30061 ctx->card = card;
30062 ctx->bios = bios;
30063
30064diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c
30065--- linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c 2011-03-27 14:31:47.000000000 -0400
30066+++ linux-2.6.32.43/drivers/gpu/drm/radeon/mkregtable.c 2011-04-17 15:56:46.000000000 -0400
30067@@ -637,14 +637,14 @@ static int parser_auth(struct table *t,
30068 regex_t mask_rex;
30069 regmatch_t match[4];
30070 char buf[1024];
30071- size_t end;
30072+ long end;
30073 int len;
30074 int done = 0;
30075 int r;
30076 unsigned o;
30077 struct offset *offset;
30078 char last_reg_s[10];
30079- int last_reg;
30080+ unsigned long last_reg;
30081
30082 if (regcomp
30083 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
30084diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c
30085--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c 2011-03-27 14:31:47.000000000 -0400
30086+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_atombios.c 2011-05-16 21:46:57.000000000 -0400
30087@@ -275,6 +275,8 @@ bool radeon_get_atom_connector_info_from
30088 bool linkb;
30089 struct radeon_i2c_bus_rec ddc_bus;
30090
30091+ pax_track_stack();
30092+
30093 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
30094
30095 if (data_offset == 0)
30096@@ -520,13 +522,13 @@ static uint16_t atombios_get_connector_o
30097 }
30098 }
30099
30100-struct bios_connector {
30101+static struct bios_connector {
30102 bool valid;
30103 uint16_t line_mux;
30104 uint16_t devices;
30105 int connector_type;
30106 struct radeon_i2c_bus_rec ddc_bus;
30107-};
30108+} bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
30109
30110 bool radeon_get_atom_connector_info_from_supported_devices_table(struct
30111 drm_device
30112@@ -542,7 +544,6 @@ bool radeon_get_atom_connector_info_from
30113 uint8_t dac;
30114 union atom_supported_devices *supported_devices;
30115 int i, j;
30116- struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
30117
30118 atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
30119
30120diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c
30121--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c 2011-03-27 14:31:47.000000000 -0400
30122+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_display.c 2011-04-17 15:56:46.000000000 -0400
30123@@ -482,7 +482,7 @@ void radeon_compute_pll(struct radeon_pl
30124
30125 if (flags & RADEON_PLL_PREFER_CLOSEST_LOWER) {
30126 error = freq - current_freq;
30127- error = error < 0 ? 0xffffffff : error;
30128+ error = (int32_t)error < 0 ? 0xffffffff : error;
30129 } else
30130 error = abs(current_freq - freq);
30131 vco_diff = abs(vco - best_vco);
30132diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h
30133--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h 2011-03-27 14:31:47.000000000 -0400
30134+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_drv.h 2011-05-04 17:56:28.000000000 -0400
30135@@ -253,7 +253,7 @@ typedef struct drm_radeon_private {
30136
30137 /* SW interrupt */
30138 wait_queue_head_t swi_queue;
30139- atomic_t swi_emitted;
30140+ atomic_unchecked_t swi_emitted;
30141 int vblank_crtc;
30142 uint32_t irq_enable_reg;
30143 uint32_t r500_disp_irq_reg;
30144diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c
30145--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c 2011-03-27 14:31:47.000000000 -0400
30146+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_fence.c 2011-05-04 17:56:28.000000000 -0400
30147@@ -47,7 +47,7 @@ int radeon_fence_emit(struct radeon_devi
30148 write_unlock_irqrestore(&rdev->fence_drv.lock, irq_flags);
30149 return 0;
30150 }
30151- fence->seq = atomic_add_return(1, &rdev->fence_drv.seq);
30152+ fence->seq = atomic_add_return_unchecked(1, &rdev->fence_drv.seq);
30153 if (!rdev->cp.ready) {
30154 /* FIXME: cp is not running assume everythings is done right
30155 * away
30156@@ -364,7 +364,7 @@ int radeon_fence_driver_init(struct rade
30157 return r;
30158 }
30159 WREG32(rdev->fence_drv.scratch_reg, 0);
30160- atomic_set(&rdev->fence_drv.seq, 0);
30161+ atomic_set_unchecked(&rdev->fence_drv.seq, 0);
30162 INIT_LIST_HEAD(&rdev->fence_drv.created);
30163 INIT_LIST_HEAD(&rdev->fence_drv.emited);
30164 INIT_LIST_HEAD(&rdev->fence_drv.signaled);
30165diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h
30166--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h 2011-03-27 14:31:47.000000000 -0400
30167+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon.h 2011-08-05 20:33:55.000000000 -0400
30168@@ -149,7 +149,7 @@ int radeon_pm_init(struct radeon_device
30169 */
30170 struct radeon_fence_driver {
30171 uint32_t scratch_reg;
30172- atomic_t seq;
30173+ atomic_unchecked_t seq;
30174 uint32_t last_seq;
30175 unsigned long count_timeout;
30176 wait_queue_head_t queue;
30177@@ -640,7 +640,7 @@ struct radeon_asic {
30178 uint32_t offset, uint32_t obj_size);
30179 int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
30180 void (*bandwidth_update)(struct radeon_device *rdev);
30181-};
30182+} __no_const;
30183
30184 /*
30185 * Asic structures
30186diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c
30187--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-03-27 14:31:47.000000000 -0400
30188+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ioc32.c 2011-04-23 13:57:24.000000000 -0400
30189@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(str
30190 request = compat_alloc_user_space(sizeof(*request));
30191 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
30192 || __put_user(req32.param, &request->param)
30193- || __put_user((void __user *)(unsigned long)req32.value,
30194+ || __put_user((unsigned long)req32.value,
30195 &request->value))
30196 return -EFAULT;
30197
30198diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c
30199--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c 2011-03-27 14:31:47.000000000 -0400
30200+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_irq.c 2011-05-04 17:56:28.000000000 -0400
30201@@ -225,8 +225,8 @@ static int radeon_emit_irq(struct drm_de
30202 unsigned int ret;
30203 RING_LOCALS;
30204
30205- atomic_inc(&dev_priv->swi_emitted);
30206- ret = atomic_read(&dev_priv->swi_emitted);
30207+ atomic_inc_unchecked(&dev_priv->swi_emitted);
30208+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
30209
30210 BEGIN_RING(4);
30211 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
30212@@ -352,7 +352,7 @@ int radeon_driver_irq_postinstall(struct
30213 drm_radeon_private_t *dev_priv =
30214 (drm_radeon_private_t *) dev->dev_private;
30215
30216- atomic_set(&dev_priv->swi_emitted, 0);
30217+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
30218 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
30219
30220 dev->max_vblank_count = 0x001fffff;
30221diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c
30222--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c 2011-03-27 14:31:47.000000000 -0400
30223+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_state.c 2011-04-17 15:56:46.000000000 -0400
30224@@ -3021,7 +3021,7 @@ static int radeon_cp_getparam(struct drm
30225 {
30226 drm_radeon_private_t *dev_priv = dev->dev_private;
30227 drm_radeon_getparam_t *param = data;
30228- int value;
30229+ int value = 0;
30230
30231 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
30232
30233diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c
30234--- linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c 2011-03-27 14:31:47.000000000 -0400
30235+++ linux-2.6.32.43/drivers/gpu/drm/radeon/radeon_ttm.c 2011-04-17 15:56:46.000000000 -0400
30236@@ -535,27 +535,10 @@ void radeon_ttm_fini(struct radeon_devic
30237 DRM_INFO("radeon: ttm finalized\n");
30238 }
30239
30240-static struct vm_operations_struct radeon_ttm_vm_ops;
30241-static const struct vm_operations_struct *ttm_vm_ops = NULL;
30242-
30243-static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
30244-{
30245- struct ttm_buffer_object *bo;
30246- int r;
30247-
30248- bo = (struct ttm_buffer_object *)vma->vm_private_data;
30249- if (bo == NULL) {
30250- return VM_FAULT_NOPAGE;
30251- }
30252- r = ttm_vm_ops->fault(vma, vmf);
30253- return r;
30254-}
30255-
30256 int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
30257 {
30258 struct drm_file *file_priv;
30259 struct radeon_device *rdev;
30260- int r;
30261
30262 if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET)) {
30263 return drm_mmap(filp, vma);
30264@@ -563,20 +546,9 @@ int radeon_mmap(struct file *filp, struc
30265
30266 file_priv = (struct drm_file *)filp->private_data;
30267 rdev = file_priv->minor->dev->dev_private;
30268- if (rdev == NULL) {
30269+ if (!rdev)
30270 return -EINVAL;
30271- }
30272- r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30273- if (unlikely(r != 0)) {
30274- return r;
30275- }
30276- if (unlikely(ttm_vm_ops == NULL)) {
30277- ttm_vm_ops = vma->vm_ops;
30278- radeon_ttm_vm_ops = *ttm_vm_ops;
30279- radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
30280- }
30281- vma->vm_ops = &radeon_ttm_vm_ops;
30282- return 0;
30283+ return ttm_bo_mmap(filp, vma, &rdev->mman.bdev);
30284 }
30285
30286
30287diff -urNp linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c
30288--- linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c 2011-03-27 14:31:47.000000000 -0400
30289+++ linux-2.6.32.43/drivers/gpu/drm/radeon/rs690.c 2011-04-17 15:56:46.000000000 -0400
30290@@ -302,9 +302,11 @@ void rs690_crtc_bandwidth_compute(struct
30291 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
30292 rdev->pm.sideport_bandwidth.full)
30293 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
30294- read_delay_latency.full = rfixed_const(370 * 800 * 1000);
30295+ read_delay_latency.full = rfixed_const(800 * 1000);
30296 read_delay_latency.full = rfixed_div(read_delay_latency,
30297 rdev->pm.igp_sideport_mclk);
30298+ a.full = rfixed_const(370);
30299+ read_delay_latency.full = rfixed_mul(read_delay_latency, a);
30300 } else {
30301 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
30302 rdev->pm.k8_bandwidth.full)
30303diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c
30304--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c 2011-03-27 14:31:47.000000000 -0400
30305+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo.c 2011-04-23 12:56:11.000000000 -0400
30306@@ -67,7 +67,7 @@ static struct attribute *ttm_bo_global_a
30307 NULL
30308 };
30309
30310-static struct sysfs_ops ttm_bo_global_ops = {
30311+static const struct sysfs_ops ttm_bo_global_ops = {
30312 .show = &ttm_bo_global_show
30313 };
30314
30315diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c
30316--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-03-27 14:31:47.000000000 -0400
30317+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_bo_vm.c 2011-04-17 15:56:46.000000000 -0400
30318@@ -73,7 +73,7 @@ static int ttm_bo_vm_fault(struct vm_are
30319 {
30320 struct ttm_buffer_object *bo = (struct ttm_buffer_object *)
30321 vma->vm_private_data;
30322- struct ttm_bo_device *bdev = bo->bdev;
30323+ struct ttm_bo_device *bdev;
30324 unsigned long bus_base;
30325 unsigned long bus_offset;
30326 unsigned long bus_size;
30327@@ -88,6 +88,10 @@ static int ttm_bo_vm_fault(struct vm_are
30328 unsigned long address = (unsigned long)vmf->virtual_address;
30329 int retval = VM_FAULT_NOPAGE;
30330
30331+ if (!bo)
30332+ return VM_FAULT_NOPAGE;
30333+ bdev = bo->bdev;
30334+
30335 /*
30336 * Work around locking order reversal in fault / nopfn
30337 * between mmap_sem and bo_reserve: Perform a trylock operation
30338diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c
30339--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c 2011-03-27 14:31:47.000000000 -0400
30340+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_global.c 2011-04-17 15:56:46.000000000 -0400
30341@@ -36,7 +36,7 @@
30342 struct ttm_global_item {
30343 struct mutex mutex;
30344 void *object;
30345- int refcount;
30346+ atomic_t refcount;
30347 };
30348
30349 static struct ttm_global_item glob[TTM_GLOBAL_NUM];
30350@@ -49,7 +49,7 @@ void ttm_global_init(void)
30351 struct ttm_global_item *item = &glob[i];
30352 mutex_init(&item->mutex);
30353 item->object = NULL;
30354- item->refcount = 0;
30355+ atomic_set(&item->refcount, 0);
30356 }
30357 }
30358
30359@@ -59,7 +59,7 @@ void ttm_global_release(void)
30360 for (i = 0; i < TTM_GLOBAL_NUM; ++i) {
30361 struct ttm_global_item *item = &glob[i];
30362 BUG_ON(item->object != NULL);
30363- BUG_ON(item->refcount != 0);
30364+ BUG_ON(atomic_read(&item->refcount) != 0);
30365 }
30366 }
30367
30368@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa
30369 void *object;
30370
30371 mutex_lock(&item->mutex);
30372- if (item->refcount == 0) {
30373+ if (atomic_read(&item->refcount) == 0) {
30374 item->object = kzalloc(ref->size, GFP_KERNEL);
30375 if (unlikely(item->object == NULL)) {
30376 ret = -ENOMEM;
30377@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa
30378 goto out_err;
30379
30380 }
30381- ++item->refcount;
30382+ atomic_inc(&item->refcount);
30383 ref->object = item->object;
30384 object = item->object;
30385 mutex_unlock(&item->mutex);
30386@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl
30387 struct ttm_global_item *item = &glob[ref->global_type];
30388
30389 mutex_lock(&item->mutex);
30390- BUG_ON(item->refcount == 0);
30391+ BUG_ON(atomic_read(&item->refcount) == 0);
30392 BUG_ON(ref->object != item->object);
30393- if (--item->refcount == 0) {
30394+ if (atomic_dec_and_test(&item->refcount)) {
30395 ref->release(ref);
30396 item->object = NULL;
30397 }
30398diff -urNp linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c
30399--- linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c 2011-03-27 14:31:47.000000000 -0400
30400+++ linux-2.6.32.43/drivers/gpu/drm/ttm/ttm_memory.c 2011-04-17 15:56:46.000000000 -0400
30401@@ -152,7 +152,7 @@ static struct attribute *ttm_mem_zone_at
30402 NULL
30403 };
30404
30405-static struct sysfs_ops ttm_mem_zone_ops = {
30406+static const struct sysfs_ops ttm_mem_zone_ops = {
30407 .show = &ttm_mem_zone_show,
30408 .store = &ttm_mem_zone_store
30409 };
30410diff -urNp linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h
30411--- linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h 2011-03-27 14:31:47.000000000 -0400
30412+++ linux-2.6.32.43/drivers/gpu/drm/via/via_drv.h 2011-05-04 17:56:28.000000000 -0400
30413@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
30414 typedef uint32_t maskarray_t[5];
30415
30416 typedef struct drm_via_irq {
30417- atomic_t irq_received;
30418+ atomic_unchecked_t irq_received;
30419 uint32_t pending_mask;
30420 uint32_t enable_mask;
30421 wait_queue_head_t irq_queue;
30422@@ -75,7 +75,7 @@ typedef struct drm_via_private {
30423 struct timeval last_vblank;
30424 int last_vblank_valid;
30425 unsigned usec_per_vblank;
30426- atomic_t vbl_received;
30427+ atomic_unchecked_t vbl_received;
30428 drm_via_state_t hc_state;
30429 char pci_buf[VIA_PCI_BUF_SIZE];
30430 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
30431diff -urNp linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c
30432--- linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c 2011-03-27 14:31:47.000000000 -0400
30433+++ linux-2.6.32.43/drivers/gpu/drm/via/via_irq.c 2011-05-04 17:56:28.000000000 -0400
30434@@ -102,7 +102,7 @@ u32 via_get_vblank_counter(struct drm_de
30435 if (crtc != 0)
30436 return 0;
30437
30438- return atomic_read(&dev_priv->vbl_received);
30439+ return atomic_read_unchecked(&dev_priv->vbl_received);
30440 }
30441
30442 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
30443@@ -117,8 +117,8 @@ irqreturn_t via_driver_irq_handler(DRM_I
30444
30445 status = VIA_READ(VIA_REG_INTERRUPT);
30446 if (status & VIA_IRQ_VBLANK_PENDING) {
30447- atomic_inc(&dev_priv->vbl_received);
30448- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
30449+ atomic_inc_unchecked(&dev_priv->vbl_received);
30450+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
30451 do_gettimeofday(&cur_vblank);
30452 if (dev_priv->last_vblank_valid) {
30453 dev_priv->usec_per_vblank =
30454@@ -128,7 +128,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30455 dev_priv->last_vblank = cur_vblank;
30456 dev_priv->last_vblank_valid = 1;
30457 }
30458- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
30459+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
30460 DRM_DEBUG("US per vblank is: %u\n",
30461 dev_priv->usec_per_vblank);
30462 }
30463@@ -138,7 +138,7 @@ irqreturn_t via_driver_irq_handler(DRM_I
30464
30465 for (i = 0; i < dev_priv->num_irqs; ++i) {
30466 if (status & cur_irq->pending_mask) {
30467- atomic_inc(&cur_irq->irq_received);
30468+ atomic_inc_unchecked(&cur_irq->irq_received);
30469 DRM_WAKEUP(&cur_irq->irq_queue);
30470 handled = 1;
30471 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i) {
30472@@ -244,11 +244,11 @@ via_driver_irq_wait(struct drm_device *
30473 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30474 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
30475 masks[irq][4]));
30476- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
30477+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
30478 } else {
30479 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
30480 (((cur_irq_sequence =
30481- atomic_read(&cur_irq->irq_received)) -
30482+ atomic_read_unchecked(&cur_irq->irq_received)) -
30483 *sequence) <= (1 << 23)));
30484 }
30485 *sequence = cur_irq_sequence;
30486@@ -286,7 +286,7 @@ void via_driver_irq_preinstall(struct dr
30487 }
30488
30489 for (i = 0; i < dev_priv->num_irqs; ++i) {
30490- atomic_set(&cur_irq->irq_received, 0);
30491+ atomic_set_unchecked(&cur_irq->irq_received, 0);
30492 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
30493 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
30494 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
30495@@ -368,7 +368,7 @@ int via_wait_irq(struct drm_device *dev,
30496 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
30497 case VIA_IRQ_RELATIVE:
30498 irqwait->request.sequence +=
30499- atomic_read(&cur_irq->irq_received);
30500+ atomic_read_unchecked(&cur_irq->irq_received);
30501 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
30502 case VIA_IRQ_ABSOLUTE:
30503 break;
30504diff -urNp linux-2.6.32.43/drivers/hid/hid-core.c linux-2.6.32.43/drivers/hid/hid-core.c
30505--- linux-2.6.32.43/drivers/hid/hid-core.c 2011-05-10 22:12:01.000000000 -0400
30506+++ linux-2.6.32.43/drivers/hid/hid-core.c 2011-05-10 22:12:32.000000000 -0400
30507@@ -1752,7 +1752,7 @@ static bool hid_ignore(struct hid_device
30508
30509 int hid_add_device(struct hid_device *hdev)
30510 {
30511- static atomic_t id = ATOMIC_INIT(0);
30512+ static atomic_unchecked_t id = ATOMIC_INIT(0);
30513 int ret;
30514
30515 if (WARN_ON(hdev->status & HID_STAT_ADDED))
30516@@ -1766,7 +1766,7 @@ int hid_add_device(struct hid_device *hd
30517 /* XXX hack, any other cleaner solution after the driver core
30518 * is converted to allow more than 20 bytes as the device name? */
30519 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
30520- hdev->vendor, hdev->product, atomic_inc_return(&id));
30521+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
30522
30523 ret = device_add(&hdev->dev);
30524 if (!ret)
30525diff -urNp linux-2.6.32.43/drivers/hid/usbhid/hiddev.c linux-2.6.32.43/drivers/hid/usbhid/hiddev.c
30526--- linux-2.6.32.43/drivers/hid/usbhid/hiddev.c 2011-03-27 14:31:47.000000000 -0400
30527+++ linux-2.6.32.43/drivers/hid/usbhid/hiddev.c 2011-04-17 15:56:46.000000000 -0400
30528@@ -617,7 +617,7 @@ static long hiddev_ioctl(struct file *fi
30529 return put_user(HID_VERSION, (int __user *)arg);
30530
30531 case HIDIOCAPPLICATION:
30532- if (arg < 0 || arg >= hid->maxapplication)
30533+ if (arg >= hid->maxapplication)
30534 return -EINVAL;
30535
30536 for (i = 0; i < hid->maxcollection; i++)
30537diff -urNp linux-2.6.32.43/drivers/hwmon/lis3lv02d.c linux-2.6.32.43/drivers/hwmon/lis3lv02d.c
30538--- linux-2.6.32.43/drivers/hwmon/lis3lv02d.c 2011-03-27 14:31:47.000000000 -0400
30539+++ linux-2.6.32.43/drivers/hwmon/lis3lv02d.c 2011-05-04 17:56:28.000000000 -0400
30540@@ -146,7 +146,7 @@ static irqreturn_t lis302dl_interrupt(in
30541 * the lid is closed. This leads to interrupts as soon as a little move
30542 * is done.
30543 */
30544- atomic_inc(&lis3_dev.count);
30545+ atomic_inc_unchecked(&lis3_dev.count);
30546
30547 wake_up_interruptible(&lis3_dev.misc_wait);
30548 kill_fasync(&lis3_dev.async_queue, SIGIO, POLL_IN);
30549@@ -160,7 +160,7 @@ static int lis3lv02d_misc_open(struct in
30550 if (test_and_set_bit(0, &lis3_dev.misc_opened))
30551 return -EBUSY; /* already open */
30552
30553- atomic_set(&lis3_dev.count, 0);
30554+ atomic_set_unchecked(&lis3_dev.count, 0);
30555
30556 /*
30557 * The sensor can generate interrupts for free-fall and direction
30558@@ -206,7 +206,7 @@ static ssize_t lis3lv02d_misc_read(struc
30559 add_wait_queue(&lis3_dev.misc_wait, &wait);
30560 while (true) {
30561 set_current_state(TASK_INTERRUPTIBLE);
30562- data = atomic_xchg(&lis3_dev.count, 0);
30563+ data = atomic_xchg_unchecked(&lis3_dev.count, 0);
30564 if (data)
30565 break;
30566
30567@@ -244,7 +244,7 @@ out:
30568 static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
30569 {
30570 poll_wait(file, &lis3_dev.misc_wait, wait);
30571- if (atomic_read(&lis3_dev.count))
30572+ if (atomic_read_unchecked(&lis3_dev.count))
30573 return POLLIN | POLLRDNORM;
30574 return 0;
30575 }
30576diff -urNp linux-2.6.32.43/drivers/hwmon/lis3lv02d.h linux-2.6.32.43/drivers/hwmon/lis3lv02d.h
30577--- linux-2.6.32.43/drivers/hwmon/lis3lv02d.h 2011-03-27 14:31:47.000000000 -0400
30578+++ linux-2.6.32.43/drivers/hwmon/lis3lv02d.h 2011-05-04 17:56:28.000000000 -0400
30579@@ -201,7 +201,7 @@ struct lis3lv02d {
30580
30581 struct input_polled_dev *idev; /* input device */
30582 struct platform_device *pdev; /* platform device */
30583- atomic_t count; /* interrupt count after last read */
30584+ atomic_unchecked_t count; /* interrupt count after last read */
30585 int xcalib; /* calibrated null value for x */
30586 int ycalib; /* calibrated null value for y */
30587 int zcalib; /* calibrated null value for z */
30588diff -urNp linux-2.6.32.43/drivers/hwmon/sht15.c linux-2.6.32.43/drivers/hwmon/sht15.c
30589--- linux-2.6.32.43/drivers/hwmon/sht15.c 2011-03-27 14:31:47.000000000 -0400
30590+++ linux-2.6.32.43/drivers/hwmon/sht15.c 2011-05-04 17:56:28.000000000 -0400
30591@@ -112,7 +112,7 @@ struct sht15_data {
30592 int supply_uV;
30593 int supply_uV_valid;
30594 struct work_struct update_supply_work;
30595- atomic_t interrupt_handled;
30596+ atomic_unchecked_t interrupt_handled;
30597 };
30598
30599 /**
30600@@ -245,13 +245,13 @@ static inline int sht15_update_single_va
30601 return ret;
30602
30603 gpio_direction_input(data->pdata->gpio_data);
30604- atomic_set(&data->interrupt_handled, 0);
30605+ atomic_set_unchecked(&data->interrupt_handled, 0);
30606
30607 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30608 if (gpio_get_value(data->pdata->gpio_data) == 0) {
30609 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
30610 /* Only relevant if the interrupt hasn't occured. */
30611- if (!atomic_read(&data->interrupt_handled))
30612+ if (!atomic_read_unchecked(&data->interrupt_handled))
30613 schedule_work(&data->read_work);
30614 }
30615 ret = wait_event_timeout(data->wait_queue,
30616@@ -398,7 +398,7 @@ static irqreturn_t sht15_interrupt_fired
30617 struct sht15_data *data = d;
30618 /* First disable the interrupt */
30619 disable_irq_nosync(irq);
30620- atomic_inc(&data->interrupt_handled);
30621+ atomic_inc_unchecked(&data->interrupt_handled);
30622 /* Then schedule a reading work struct */
30623 if (data->flag != SHT15_READING_NOTHING)
30624 schedule_work(&data->read_work);
30625@@ -449,11 +449,11 @@ static void sht15_bh_read_data(struct wo
30626 here as could have gone low in meantime so verify
30627 it hasn't!
30628 */
30629- atomic_set(&data->interrupt_handled, 0);
30630+ atomic_set_unchecked(&data->interrupt_handled, 0);
30631 enable_irq(gpio_to_irq(data->pdata->gpio_data));
30632 /* If still not occured or another handler has been scheduled */
30633 if (gpio_get_value(data->pdata->gpio_data)
30634- || atomic_read(&data->interrupt_handled))
30635+ || atomic_read_unchecked(&data->interrupt_handled))
30636 return;
30637 }
30638 /* Read the data back from the device */
30639diff -urNp linux-2.6.32.43/drivers/hwmon/w83791d.c linux-2.6.32.43/drivers/hwmon/w83791d.c
30640--- linux-2.6.32.43/drivers/hwmon/w83791d.c 2011-03-27 14:31:47.000000000 -0400
30641+++ linux-2.6.32.43/drivers/hwmon/w83791d.c 2011-04-17 15:56:46.000000000 -0400
30642@@ -330,8 +330,8 @@ static int w83791d_detect(struct i2c_cli
30643 struct i2c_board_info *info);
30644 static int w83791d_remove(struct i2c_client *client);
30645
30646-static int w83791d_read(struct i2c_client *client, u8 register);
30647-static int w83791d_write(struct i2c_client *client, u8 register, u8 value);
30648+static int w83791d_read(struct i2c_client *client, u8 reg);
30649+static int w83791d_write(struct i2c_client *client, u8 reg, u8 value);
30650 static struct w83791d_data *w83791d_update_device(struct device *dev);
30651
30652 #ifdef DEBUG
30653diff -urNp linux-2.6.32.43/drivers/i2c/busses/i2c-amd756-s4882.c linux-2.6.32.43/drivers/i2c/busses/i2c-amd756-s4882.c
30654--- linux-2.6.32.43/drivers/i2c/busses/i2c-amd756-s4882.c 2011-03-27 14:31:47.000000000 -0400
30655+++ linux-2.6.32.43/drivers/i2c/busses/i2c-amd756-s4882.c 2011-08-05 20:33:55.000000000 -0400
30656@@ -189,23 +189,23 @@ static int __init amd756_s4882_init(void
30657 }
30658
30659 /* Fill in the new structures */
30660- s4882_algo[0] = *(amd756_smbus.algo);
30661- s4882_algo[0].smbus_xfer = amd756_access_virt0;
30662+ memcpy((void *)&s4882_algo[0], amd756_smbus.algo, sizeof(s4882_algo[0]));
30663+ *(void **)&s4882_algo[0].smbus_xfer = amd756_access_virt0;
30664 s4882_adapter[0] = amd756_smbus;
30665 s4882_adapter[0].algo = s4882_algo;
30666- s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30667+ *(void **)&s4882_adapter[0].dev.parent = amd756_smbus.dev.parent;
30668 for (i = 1; i < 5; i++) {
30669- s4882_algo[i] = *(amd756_smbus.algo);
30670+ memcpy((void *)&s4882_algo[i], amd756_smbus.algo, sizeof(s4882_algo[i]));
30671 s4882_adapter[i] = amd756_smbus;
30672 snprintf(s4882_adapter[i].name, sizeof(s4882_adapter[i].name),
30673 "SMBus 8111 adapter (CPU%d)", i-1);
30674 s4882_adapter[i].algo = s4882_algo+i;
30675 s4882_adapter[i].dev.parent = amd756_smbus.dev.parent;
30676 }
30677- s4882_algo[1].smbus_xfer = amd756_access_virt1;
30678- s4882_algo[2].smbus_xfer = amd756_access_virt2;
30679- s4882_algo[3].smbus_xfer = amd756_access_virt3;
30680- s4882_algo[4].smbus_xfer = amd756_access_virt4;
30681+ *(void **)&s4882_algo[1].smbus_xfer = amd756_access_virt1;
30682+ *(void **)&s4882_algo[2].smbus_xfer = amd756_access_virt2;
30683+ *(void **)&s4882_algo[3].smbus_xfer = amd756_access_virt3;
30684+ *(void **)&s4882_algo[4].smbus_xfer = amd756_access_virt4;
30685
30686 /* Register virtual adapters */
30687 for (i = 0; i < 5; i++) {
30688diff -urNp linux-2.6.32.43/drivers/i2c/busses/i2c-nforce2-s4985.c linux-2.6.32.43/drivers/i2c/busses/i2c-nforce2-s4985.c
30689--- linux-2.6.32.43/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-03-27 14:31:47.000000000 -0400
30690+++ linux-2.6.32.43/drivers/i2c/busses/i2c-nforce2-s4985.c 2011-08-05 20:33:55.000000000 -0400
30691@@ -184,23 +184,23 @@ static int __init nforce2_s4985_init(voi
30692 }
30693
30694 /* Fill in the new structures */
30695- s4985_algo[0] = *(nforce2_smbus->algo);
30696- s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30697+ memcpy((void *)&s4985_algo[0], nforce2_smbus->algo, sizeof(s4985_algo[0]));
30698+ *(void **)&s4985_algo[0].smbus_xfer = nforce2_access_virt0;
30699 s4985_adapter[0] = *nforce2_smbus;
30700 s4985_adapter[0].algo = s4985_algo;
30701 s4985_adapter[0].dev.parent = nforce2_smbus->dev.parent;
30702 for (i = 1; i < 5; i++) {
30703- s4985_algo[i] = *(nforce2_smbus->algo);
30704+ memcpy((void *)&s4985_algo[i], nforce2_smbus->algo, sizeof(s4985_algo[i]));
30705 s4985_adapter[i] = *nforce2_smbus;
30706 snprintf(s4985_adapter[i].name, sizeof(s4985_adapter[i].name),
30707 "SMBus nForce2 adapter (CPU%d)", i - 1);
30708 s4985_adapter[i].algo = s4985_algo + i;
30709 s4985_adapter[i].dev.parent = nforce2_smbus->dev.parent;
30710 }
30711- s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30712- s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30713- s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30714- s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30715+ *(void **)&s4985_algo[1].smbus_xfer = nforce2_access_virt1;
30716+ *(void **)&s4985_algo[2].smbus_xfer = nforce2_access_virt2;
30717+ *(void **)&s4985_algo[3].smbus_xfer = nforce2_access_virt3;
30718+ *(void **)&s4985_algo[4].smbus_xfer = nforce2_access_virt4;
30719
30720 /* Register virtual adapters */
30721 for (i = 0; i < 5; i++) {
30722diff -urNp linux-2.6.32.43/drivers/ide/ide-cd.c linux-2.6.32.43/drivers/ide/ide-cd.c
30723--- linux-2.6.32.43/drivers/ide/ide-cd.c 2011-03-27 14:31:47.000000000 -0400
30724+++ linux-2.6.32.43/drivers/ide/ide-cd.c 2011-04-17 15:56:46.000000000 -0400
30725@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_
30726 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
30727 if ((unsigned long)buf & alignment
30728 || blk_rq_bytes(rq) & q->dma_pad_mask
30729- || object_is_on_stack(buf))
30730+ || object_starts_on_stack(buf))
30731 drive->dma = 0;
30732 }
30733 }
30734diff -urNp linux-2.6.32.43/drivers/ide/ide-floppy.c linux-2.6.32.43/drivers/ide/ide-floppy.c
30735--- linux-2.6.32.43/drivers/ide/ide-floppy.c 2011-03-27 14:31:47.000000000 -0400
30736+++ linux-2.6.32.43/drivers/ide/ide-floppy.c 2011-05-16 21:46:57.000000000 -0400
30737@@ -373,6 +373,8 @@ static int ide_floppy_get_capacity(ide_d
30738 u8 pc_buf[256], header_len, desc_cnt;
30739 int i, rc = 1, blocks, length;
30740
30741+ pax_track_stack();
30742+
30743 ide_debug_log(IDE_DBG_FUNC, "enter");
30744
30745 drive->bios_cyl = 0;
30746diff -urNp linux-2.6.32.43/drivers/ide/setup-pci.c linux-2.6.32.43/drivers/ide/setup-pci.c
30747--- linux-2.6.32.43/drivers/ide/setup-pci.c 2011-03-27 14:31:47.000000000 -0400
30748+++ linux-2.6.32.43/drivers/ide/setup-pci.c 2011-05-16 21:46:57.000000000 -0400
30749@@ -542,6 +542,8 @@ int ide_pci_init_two(struct pci_dev *dev
30750 int ret, i, n_ports = dev2 ? 4 : 2;
30751 struct ide_hw hw[4], *hws[] = { NULL, NULL, NULL, NULL };
30752
30753+ pax_track_stack();
30754+
30755 for (i = 0; i < n_ports / 2; i++) {
30756 ret = ide_setup_pci_controller(pdev[i], d, !i);
30757 if (ret < 0)
30758diff -urNp linux-2.6.32.43/drivers/ieee1394/dv1394.c linux-2.6.32.43/drivers/ieee1394/dv1394.c
30759--- linux-2.6.32.43/drivers/ieee1394/dv1394.c 2011-03-27 14:31:47.000000000 -0400
30760+++ linux-2.6.32.43/drivers/ieee1394/dv1394.c 2011-04-23 12:56:11.000000000 -0400
30761@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c
30762 based upon DIF section and sequence
30763 */
30764
30765-static void inline
30766+static inline void
30767 frame_put_packet (struct frame *f, struct packet *p)
30768 {
30769 int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */
30770diff -urNp linux-2.6.32.43/drivers/ieee1394/hosts.c linux-2.6.32.43/drivers/ieee1394/hosts.c
30771--- linux-2.6.32.43/drivers/ieee1394/hosts.c 2011-03-27 14:31:47.000000000 -0400
30772+++ linux-2.6.32.43/drivers/ieee1394/hosts.c 2011-04-17 15:56:46.000000000 -0400
30773@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso
30774 }
30775
30776 static struct hpsb_host_driver dummy_driver = {
30777+ .name = "dummy",
30778 .transmit_packet = dummy_transmit_packet,
30779 .devctl = dummy_devctl,
30780 .isoctl = dummy_isoctl
30781diff -urNp linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c
30782--- linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c 2011-03-27 14:31:47.000000000 -0400
30783+++ linux-2.6.32.43/drivers/ieee1394/init_ohci1394_dma.c 2011-04-17 15:56:46.000000000 -0400
30784@@ -257,7 +257,7 @@ void __init init_ohci1394_dma_on_all_con
30785 for (func = 0; func < 8; func++) {
30786 u32 class = read_pci_config(num,slot,func,
30787 PCI_CLASS_REVISION);
30788- if ((class == 0xffffffff))
30789+ if (class == 0xffffffff)
30790 continue; /* No device at this func */
30791
30792 if (class>>8 != PCI_CLASS_SERIAL_FIREWIRE_OHCI)
30793diff -urNp linux-2.6.32.43/drivers/ieee1394/ohci1394.c linux-2.6.32.43/drivers/ieee1394/ohci1394.c
30794--- linux-2.6.32.43/drivers/ieee1394/ohci1394.c 2011-03-27 14:31:47.000000000 -0400
30795+++ linux-2.6.32.43/drivers/ieee1394/ohci1394.c 2011-04-23 12:56:11.000000000 -0400
30796@@ -147,9 +147,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_
30797 printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args)
30798
30799 /* Module Parameters */
30800-static int phys_dma = 1;
30801+static int phys_dma;
30802 module_param(phys_dma, int, 0444);
30803-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1).");
30804+MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0).");
30805
30806 static void dma_trm_tasklet(unsigned long data);
30807 static void dma_trm_reset(struct dma_trm_ctx *d);
30808diff -urNp linux-2.6.32.43/drivers/ieee1394/sbp2.c linux-2.6.32.43/drivers/ieee1394/sbp2.c
30809--- linux-2.6.32.43/drivers/ieee1394/sbp2.c 2011-03-27 14:31:47.000000000 -0400
30810+++ linux-2.6.32.43/drivers/ieee1394/sbp2.c 2011-04-23 12:56:11.000000000 -0400
30811@@ -2111,7 +2111,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot
30812 MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME);
30813 MODULE_LICENSE("GPL");
30814
30815-static int sbp2_module_init(void)
30816+static int __init sbp2_module_init(void)
30817 {
30818 int ret;
30819
30820diff -urNp linux-2.6.32.43/drivers/infiniband/core/cm.c linux-2.6.32.43/drivers/infiniband/core/cm.c
30821--- linux-2.6.32.43/drivers/infiniband/core/cm.c 2011-03-27 14:31:47.000000000 -0400
30822+++ linux-2.6.32.43/drivers/infiniband/core/cm.c 2011-04-17 15:56:46.000000000 -0400
30823@@ -112,7 +112,7 @@ static char const counter_group_names[CM
30824
30825 struct cm_counter_group {
30826 struct kobject obj;
30827- atomic_long_t counter[CM_ATTR_COUNT];
30828+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
30829 };
30830
30831 struct cm_counter_attribute {
30832@@ -1386,7 +1386,7 @@ static void cm_dup_req_handler(struct cm
30833 struct ib_mad_send_buf *msg = NULL;
30834 int ret;
30835
30836- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30837+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30838 counter[CM_REQ_COUNTER]);
30839
30840 /* Quick state check to discard duplicate REQs. */
30841@@ -1764,7 +1764,7 @@ static void cm_dup_rep_handler(struct cm
30842 if (!cm_id_priv)
30843 return;
30844
30845- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30846+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30847 counter[CM_REP_COUNTER]);
30848 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
30849 if (ret)
30850@@ -1931,7 +1931,7 @@ static int cm_rtu_handler(struct cm_work
30851 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
30852 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
30853 spin_unlock_irq(&cm_id_priv->lock);
30854- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30855+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30856 counter[CM_RTU_COUNTER]);
30857 goto out;
30858 }
30859@@ -2110,7 +2110,7 @@ static int cm_dreq_handler(struct cm_wor
30860 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
30861 dreq_msg->local_comm_id);
30862 if (!cm_id_priv) {
30863- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30864+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30865 counter[CM_DREQ_COUNTER]);
30866 cm_issue_drep(work->port, work->mad_recv_wc);
30867 return -EINVAL;
30868@@ -2131,7 +2131,7 @@ static int cm_dreq_handler(struct cm_wor
30869 case IB_CM_MRA_REP_RCVD:
30870 break;
30871 case IB_CM_TIMEWAIT:
30872- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30873+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30874 counter[CM_DREQ_COUNTER]);
30875 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30876 goto unlock;
30877@@ -2145,7 +2145,7 @@ static int cm_dreq_handler(struct cm_wor
30878 cm_free_msg(msg);
30879 goto deref;
30880 case IB_CM_DREQ_RCVD:
30881- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30882+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30883 counter[CM_DREQ_COUNTER]);
30884 goto unlock;
30885 default:
30886@@ -2501,7 +2501,7 @@ static int cm_mra_handler(struct cm_work
30887 ib_modify_mad(cm_id_priv->av.port->mad_agent,
30888 cm_id_priv->msg, timeout)) {
30889 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
30890- atomic_long_inc(&work->port->
30891+ atomic_long_inc_unchecked(&work->port->
30892 counter_group[CM_RECV_DUPLICATES].
30893 counter[CM_MRA_COUNTER]);
30894 goto out;
30895@@ -2510,7 +2510,7 @@ static int cm_mra_handler(struct cm_work
30896 break;
30897 case IB_CM_MRA_REQ_RCVD:
30898 case IB_CM_MRA_REP_RCVD:
30899- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30900+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30901 counter[CM_MRA_COUNTER]);
30902 /* fall through */
30903 default:
30904@@ -2672,7 +2672,7 @@ static int cm_lap_handler(struct cm_work
30905 case IB_CM_LAP_IDLE:
30906 break;
30907 case IB_CM_MRA_LAP_SENT:
30908- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30909+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30910 counter[CM_LAP_COUNTER]);
30911 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
30912 goto unlock;
30913@@ -2688,7 +2688,7 @@ static int cm_lap_handler(struct cm_work
30914 cm_free_msg(msg);
30915 goto deref;
30916 case IB_CM_LAP_RCVD:
30917- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30918+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30919 counter[CM_LAP_COUNTER]);
30920 goto unlock;
30921 default:
30922@@ -2972,7 +2972,7 @@ static int cm_sidr_req_handler(struct cm
30923 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
30924 if (cur_cm_id_priv) {
30925 spin_unlock_irq(&cm.lock);
30926- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
30927+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
30928 counter[CM_SIDR_REQ_COUNTER]);
30929 goto out; /* Duplicate message. */
30930 }
30931@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma
30932 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
30933 msg->retries = 1;
30934
30935- atomic_long_add(1 + msg->retries,
30936+ atomic_long_add_unchecked(1 + msg->retries,
30937 &port->counter_group[CM_XMIT].counter[attr_index]);
30938 if (msg->retries)
30939- atomic_long_add(msg->retries,
30940+ atomic_long_add_unchecked(msg->retries,
30941 &port->counter_group[CM_XMIT_RETRIES].
30942 counter[attr_index]);
30943
30944@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma
30945 }
30946
30947 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
30948- atomic_long_inc(&port->counter_group[CM_RECV].
30949+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
30950 counter[attr_id - CM_ATTR_ID_OFFSET]);
30951
30952 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
30953@@ -3595,10 +3595,10 @@ static ssize_t cm_show_counter(struct ko
30954 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
30955
30956 return sprintf(buf, "%ld\n",
30957- atomic_long_read(&group->counter[cm_attr->index]));
30958+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
30959 }
30960
30961-static struct sysfs_ops cm_counter_ops = {
30962+static const struct sysfs_ops cm_counter_ops = {
30963 .show = cm_show_counter
30964 };
30965
30966diff -urNp linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c
30967--- linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c 2011-03-27 14:31:47.000000000 -0400
30968+++ linux-2.6.32.43/drivers/infiniband/core/fmr_pool.c 2011-05-04 17:56:28.000000000 -0400
30969@@ -97,8 +97,8 @@ struct ib_fmr_pool {
30970
30971 struct task_struct *thread;
30972
30973- atomic_t req_ser;
30974- atomic_t flush_ser;
30975+ atomic_unchecked_t req_ser;
30976+ atomic_unchecked_t flush_ser;
30977
30978 wait_queue_head_t force_wait;
30979 };
30980@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *p
30981 struct ib_fmr_pool *pool = pool_ptr;
30982
30983 do {
30984- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
30985+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
30986 ib_fmr_batch_release(pool);
30987
30988- atomic_inc(&pool->flush_ser);
30989+ atomic_inc_unchecked(&pool->flush_ser);
30990 wake_up_interruptible(&pool->force_wait);
30991
30992 if (pool->flush_function)
30993@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *p
30994 }
30995
30996 set_current_state(TASK_INTERRUPTIBLE);
30997- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
30998+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
30999 !kthread_should_stop())
31000 schedule();
31001 __set_current_state(TASK_RUNNING);
31002@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(s
31003 pool->dirty_watermark = params->dirty_watermark;
31004 pool->dirty_len = 0;
31005 spin_lock_init(&pool->pool_lock);
31006- atomic_set(&pool->req_ser, 0);
31007- atomic_set(&pool->flush_ser, 0);
31008+ atomic_set_unchecked(&pool->req_ser, 0);
31009+ atomic_set_unchecked(&pool->flush_ser, 0);
31010 init_waitqueue_head(&pool->force_wait);
31011
31012 pool->thread = kthread_run(ib_fmr_cleanup_thread,
31013@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool
31014 }
31015 spin_unlock_irq(&pool->pool_lock);
31016
31017- serial = atomic_inc_return(&pool->req_ser);
31018+ serial = atomic_inc_return_unchecked(&pool->req_ser);
31019 wake_up_process(pool->thread);
31020
31021 if (wait_event_interruptible(pool->force_wait,
31022- atomic_read(&pool->flush_ser) - serial >= 0))
31023+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
31024 return -EINTR;
31025
31026 return 0;
31027@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr
31028 } else {
31029 list_add_tail(&fmr->list, &pool->dirty_list);
31030 if (++pool->dirty_len >= pool->dirty_watermark) {
31031- atomic_inc(&pool->req_ser);
31032+ atomic_inc_unchecked(&pool->req_ser);
31033 wake_up_process(pool->thread);
31034 }
31035 }
31036diff -urNp linux-2.6.32.43/drivers/infiniband/core/sysfs.c linux-2.6.32.43/drivers/infiniband/core/sysfs.c
31037--- linux-2.6.32.43/drivers/infiniband/core/sysfs.c 2011-03-27 14:31:47.000000000 -0400
31038+++ linux-2.6.32.43/drivers/infiniband/core/sysfs.c 2011-04-17 15:56:46.000000000 -0400
31039@@ -79,7 +79,7 @@ static ssize_t port_attr_show(struct kob
31040 return port_attr->show(p, port_attr, buf);
31041 }
31042
31043-static struct sysfs_ops port_sysfs_ops = {
31044+static const struct sysfs_ops port_sysfs_ops = {
31045 .show = port_attr_show
31046 };
31047
31048diff -urNp linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c
31049--- linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c 2011-03-27 14:31:47.000000000 -0400
31050+++ linux-2.6.32.43/drivers/infiniband/core/uverbs_marshall.c 2011-04-17 15:56:46.000000000 -0400
31051@@ -40,18 +40,21 @@ void ib_copy_ah_attr_to_user(struct ib_u
31052 dst->grh.sgid_index = src->grh.sgid_index;
31053 dst->grh.hop_limit = src->grh.hop_limit;
31054 dst->grh.traffic_class = src->grh.traffic_class;
31055+ memset(&dst->grh.reserved, 0, sizeof(dst->grh.reserved));
31056 dst->dlid = src->dlid;
31057 dst->sl = src->sl;
31058 dst->src_path_bits = src->src_path_bits;
31059 dst->static_rate = src->static_rate;
31060 dst->is_global = src->ah_flags & IB_AH_GRH ? 1 : 0;
31061 dst->port_num = src->port_num;
31062+ dst->reserved = 0;
31063 }
31064 EXPORT_SYMBOL(ib_copy_ah_attr_to_user);
31065
31066 void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
31067 struct ib_qp_attr *src)
31068 {
31069+ dst->qp_state = src->qp_state;
31070 dst->cur_qp_state = src->cur_qp_state;
31071 dst->path_mtu = src->path_mtu;
31072 dst->path_mig_state = src->path_mig_state;
31073@@ -83,6 +86,7 @@ void ib_copy_qp_attr_to_user(struct ib_u
31074 dst->rnr_retry = src->rnr_retry;
31075 dst->alt_port_num = src->alt_port_num;
31076 dst->alt_timeout = src->alt_timeout;
31077+ memset(dst->reserved, 0, sizeof(dst->reserved));
31078 }
31079 EXPORT_SYMBOL(ib_copy_qp_attr_to_user);
31080
31081diff -urNp linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c
31082--- linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c 2011-03-27 14:31:47.000000000 -0400
31083+++ linux-2.6.32.43/drivers/infiniband/hw/ipath/ipath_fs.c 2011-05-16 21:46:57.000000000 -0400
31084@@ -110,6 +110,8 @@ static ssize_t atomic_counters_read(stru
31085 struct infinipath_counters counters;
31086 struct ipath_devdata *dd;
31087
31088+ pax_track_stack();
31089+
31090 dd = file->f_path.dentry->d_inode->i_private;
31091 dd->ipath_f_read_counters(dd, &counters);
31092
31093diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c
31094--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c 2011-03-27 14:31:47.000000000 -0400
31095+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes.c 2011-05-04 17:56:28.000000000 -0400
31096@@ -102,7 +102,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limi
31097 LIST_HEAD(nes_adapter_list);
31098 static LIST_HEAD(nes_dev_list);
31099
31100-atomic_t qps_destroyed;
31101+atomic_unchecked_t qps_destroyed;
31102
31103 static unsigned int ee_flsh_adapter;
31104 static unsigned int sysfs_nonidx_addr;
31105@@ -259,7 +259,7 @@ static void nes_cqp_rem_ref_callback(str
31106 struct nes_adapter *nesadapter = nesdev->nesadapter;
31107 u32 qp_id;
31108
31109- atomic_inc(&qps_destroyed);
31110+ atomic_inc_unchecked(&qps_destroyed);
31111
31112 /* Free the control structures */
31113
31114diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c
31115--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c 2011-03-27 14:31:47.000000000 -0400
31116+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_cm.c 2011-05-04 17:56:28.000000000 -0400
31117@@ -69,11 +69,11 @@ u32 cm_packets_received;
31118 u32 cm_listens_created;
31119 u32 cm_listens_destroyed;
31120 u32 cm_backlog_drops;
31121-atomic_t cm_loopbacks;
31122-atomic_t cm_nodes_created;
31123-atomic_t cm_nodes_destroyed;
31124-atomic_t cm_accel_dropped_pkts;
31125-atomic_t cm_resets_recvd;
31126+atomic_unchecked_t cm_loopbacks;
31127+atomic_unchecked_t cm_nodes_created;
31128+atomic_unchecked_t cm_nodes_destroyed;
31129+atomic_unchecked_t cm_accel_dropped_pkts;
31130+atomic_unchecked_t cm_resets_recvd;
31131
31132 static inline int mini_cm_accelerated(struct nes_cm_core *,
31133 struct nes_cm_node *);
31134@@ -149,13 +149,13 @@ static struct nes_cm_ops nes_cm_api = {
31135
31136 static struct nes_cm_core *g_cm_core;
31137
31138-atomic_t cm_connects;
31139-atomic_t cm_accepts;
31140-atomic_t cm_disconnects;
31141-atomic_t cm_closes;
31142-atomic_t cm_connecteds;
31143-atomic_t cm_connect_reqs;
31144-atomic_t cm_rejects;
31145+atomic_unchecked_t cm_connects;
31146+atomic_unchecked_t cm_accepts;
31147+atomic_unchecked_t cm_disconnects;
31148+atomic_unchecked_t cm_closes;
31149+atomic_unchecked_t cm_connecteds;
31150+atomic_unchecked_t cm_connect_reqs;
31151+atomic_unchecked_t cm_rejects;
31152
31153
31154 /**
31155@@ -1195,7 +1195,7 @@ static struct nes_cm_node *make_cm_node(
31156 cm_node->rem_mac);
31157
31158 add_hte_node(cm_core, cm_node);
31159- atomic_inc(&cm_nodes_created);
31160+ atomic_inc_unchecked(&cm_nodes_created);
31161
31162 return cm_node;
31163 }
31164@@ -1253,7 +1253,7 @@ static int rem_ref_cm_node(struct nes_cm
31165 }
31166
31167 atomic_dec(&cm_core->node_cnt);
31168- atomic_inc(&cm_nodes_destroyed);
31169+ atomic_inc_unchecked(&cm_nodes_destroyed);
31170 nesqp = cm_node->nesqp;
31171 if (nesqp) {
31172 nesqp->cm_node = NULL;
31173@@ -1320,7 +1320,7 @@ static int process_options(struct nes_cm
31174
31175 static void drop_packet(struct sk_buff *skb)
31176 {
31177- atomic_inc(&cm_accel_dropped_pkts);
31178+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
31179 dev_kfree_skb_any(skb);
31180 }
31181
31182@@ -1377,7 +1377,7 @@ static void handle_rst_pkt(struct nes_cm
31183
31184 int reset = 0; /* whether to send reset in case of err.. */
31185 int passive_state;
31186- atomic_inc(&cm_resets_recvd);
31187+ atomic_inc_unchecked(&cm_resets_recvd);
31188 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
31189 " refcnt=%d\n", cm_node, cm_node->state,
31190 atomic_read(&cm_node->ref_count));
31191@@ -2000,7 +2000,7 @@ static struct nes_cm_node *mini_cm_conne
31192 rem_ref_cm_node(cm_node->cm_core, cm_node);
31193 return NULL;
31194 }
31195- atomic_inc(&cm_loopbacks);
31196+ atomic_inc_unchecked(&cm_loopbacks);
31197 loopbackremotenode->loopbackpartner = cm_node;
31198 loopbackremotenode->tcp_cntxt.rcv_wscale =
31199 NES_CM_DEFAULT_RCV_WND_SCALE;
31200@@ -2262,7 +2262,7 @@ static int mini_cm_recv_pkt(struct nes_c
31201 add_ref_cm_node(cm_node);
31202 } else if (cm_node->state == NES_CM_STATE_TSA) {
31203 rem_ref_cm_node(cm_core, cm_node);
31204- atomic_inc(&cm_accel_dropped_pkts);
31205+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
31206 dev_kfree_skb_any(skb);
31207 break;
31208 }
31209@@ -2568,7 +2568,7 @@ static int nes_cm_disconn_true(struct ne
31210
31211 if ((cm_id) && (cm_id->event_handler)) {
31212 if (issue_disconn) {
31213- atomic_inc(&cm_disconnects);
31214+ atomic_inc_unchecked(&cm_disconnects);
31215 cm_event.event = IW_CM_EVENT_DISCONNECT;
31216 cm_event.status = disconn_status;
31217 cm_event.local_addr = cm_id->local_addr;
31218@@ -2590,7 +2590,7 @@ static int nes_cm_disconn_true(struct ne
31219 }
31220
31221 if (issue_close) {
31222- atomic_inc(&cm_closes);
31223+ atomic_inc_unchecked(&cm_closes);
31224 nes_disconnect(nesqp, 1);
31225
31226 cm_id->provider_data = nesqp;
31227@@ -2710,7 +2710,7 @@ int nes_accept(struct iw_cm_id *cm_id, s
31228
31229 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
31230 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
31231- atomic_inc(&cm_accepts);
31232+ atomic_inc_unchecked(&cm_accepts);
31233
31234 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
31235 atomic_read(&nesvnic->netdev->refcnt));
31236@@ -2919,7 +2919,7 @@ int nes_reject(struct iw_cm_id *cm_id, c
31237
31238 struct nes_cm_core *cm_core;
31239
31240- atomic_inc(&cm_rejects);
31241+ atomic_inc_unchecked(&cm_rejects);
31242 cm_node = (struct nes_cm_node *) cm_id->provider_data;
31243 loopback = cm_node->loopbackpartner;
31244 cm_core = cm_node->cm_core;
31245@@ -2982,7 +2982,7 @@ int nes_connect(struct iw_cm_id *cm_id,
31246 ntohl(cm_id->local_addr.sin_addr.s_addr),
31247 ntohs(cm_id->local_addr.sin_port));
31248
31249- atomic_inc(&cm_connects);
31250+ atomic_inc_unchecked(&cm_connects);
31251 nesqp->active_conn = 1;
31252
31253 /* cache the cm_id in the qp */
31254@@ -3195,7 +3195,7 @@ static void cm_event_connected(struct ne
31255 if (nesqp->destroyed) {
31256 return;
31257 }
31258- atomic_inc(&cm_connecteds);
31259+ atomic_inc_unchecked(&cm_connecteds);
31260 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
31261 " local port 0x%04X. jiffies = %lu.\n",
31262 nesqp->hwqp.qp_id,
31263@@ -3403,7 +3403,7 @@ static void cm_event_reset(struct nes_cm
31264
31265 ret = cm_id->event_handler(cm_id, &cm_event);
31266 cm_id->add_ref(cm_id);
31267- atomic_inc(&cm_closes);
31268+ atomic_inc_unchecked(&cm_closes);
31269 cm_event.event = IW_CM_EVENT_CLOSE;
31270 cm_event.status = IW_CM_EVENT_STATUS_OK;
31271 cm_event.provider_data = cm_id->provider_data;
31272@@ -3439,7 +3439,7 @@ static void cm_event_mpa_req(struct nes_
31273 return;
31274 cm_id = cm_node->cm_id;
31275
31276- atomic_inc(&cm_connect_reqs);
31277+ atomic_inc_unchecked(&cm_connect_reqs);
31278 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31279 cm_node, cm_id, jiffies);
31280
31281@@ -3477,7 +3477,7 @@ static void cm_event_mpa_reject(struct n
31282 return;
31283 cm_id = cm_node->cm_id;
31284
31285- atomic_inc(&cm_connect_reqs);
31286+ atomic_inc_unchecked(&cm_connect_reqs);
31287 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
31288 cm_node, cm_id, jiffies);
31289
31290diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h
31291--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h 2011-03-27 14:31:47.000000000 -0400
31292+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes.h 2011-05-04 17:56:28.000000000 -0400
31293@@ -174,17 +174,17 @@ extern unsigned int nes_debug_level;
31294 extern unsigned int wqm_quanta;
31295 extern struct list_head nes_adapter_list;
31296
31297-extern atomic_t cm_connects;
31298-extern atomic_t cm_accepts;
31299-extern atomic_t cm_disconnects;
31300-extern atomic_t cm_closes;
31301-extern atomic_t cm_connecteds;
31302-extern atomic_t cm_connect_reqs;
31303-extern atomic_t cm_rejects;
31304-extern atomic_t mod_qp_timouts;
31305-extern atomic_t qps_created;
31306-extern atomic_t qps_destroyed;
31307-extern atomic_t sw_qps_destroyed;
31308+extern atomic_unchecked_t cm_connects;
31309+extern atomic_unchecked_t cm_accepts;
31310+extern atomic_unchecked_t cm_disconnects;
31311+extern atomic_unchecked_t cm_closes;
31312+extern atomic_unchecked_t cm_connecteds;
31313+extern atomic_unchecked_t cm_connect_reqs;
31314+extern atomic_unchecked_t cm_rejects;
31315+extern atomic_unchecked_t mod_qp_timouts;
31316+extern atomic_unchecked_t qps_created;
31317+extern atomic_unchecked_t qps_destroyed;
31318+extern atomic_unchecked_t sw_qps_destroyed;
31319 extern u32 mh_detected;
31320 extern u32 mh_pauses_sent;
31321 extern u32 cm_packets_sent;
31322@@ -196,11 +196,11 @@ extern u32 cm_packets_retrans;
31323 extern u32 cm_listens_created;
31324 extern u32 cm_listens_destroyed;
31325 extern u32 cm_backlog_drops;
31326-extern atomic_t cm_loopbacks;
31327-extern atomic_t cm_nodes_created;
31328-extern atomic_t cm_nodes_destroyed;
31329-extern atomic_t cm_accel_dropped_pkts;
31330-extern atomic_t cm_resets_recvd;
31331+extern atomic_unchecked_t cm_loopbacks;
31332+extern atomic_unchecked_t cm_nodes_created;
31333+extern atomic_unchecked_t cm_nodes_destroyed;
31334+extern atomic_unchecked_t cm_accel_dropped_pkts;
31335+extern atomic_unchecked_t cm_resets_recvd;
31336
31337 extern u32 int_mod_timer_init;
31338 extern u32 int_mod_cq_depth_256;
31339diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c
31340--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c 2011-03-27 14:31:47.000000000 -0400
31341+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_nic.c 2011-05-04 17:56:28.000000000 -0400
31342@@ -1210,17 +1210,17 @@ static void nes_netdev_get_ethtool_stats
31343 target_stat_values[++index] = mh_detected;
31344 target_stat_values[++index] = mh_pauses_sent;
31345 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
31346- target_stat_values[++index] = atomic_read(&cm_connects);
31347- target_stat_values[++index] = atomic_read(&cm_accepts);
31348- target_stat_values[++index] = atomic_read(&cm_disconnects);
31349- target_stat_values[++index] = atomic_read(&cm_connecteds);
31350- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
31351- target_stat_values[++index] = atomic_read(&cm_rejects);
31352- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
31353- target_stat_values[++index] = atomic_read(&qps_created);
31354- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
31355- target_stat_values[++index] = atomic_read(&qps_destroyed);
31356- target_stat_values[++index] = atomic_read(&cm_closes);
31357+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
31358+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
31359+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
31360+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
31361+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
31362+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
31363+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
31364+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
31365+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
31366+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
31367+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
31368 target_stat_values[++index] = cm_packets_sent;
31369 target_stat_values[++index] = cm_packets_bounced;
31370 target_stat_values[++index] = cm_packets_created;
31371@@ -1230,11 +1230,11 @@ static void nes_netdev_get_ethtool_stats
31372 target_stat_values[++index] = cm_listens_created;
31373 target_stat_values[++index] = cm_listens_destroyed;
31374 target_stat_values[++index] = cm_backlog_drops;
31375- target_stat_values[++index] = atomic_read(&cm_loopbacks);
31376- target_stat_values[++index] = atomic_read(&cm_nodes_created);
31377- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
31378- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
31379- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
31380+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
31381+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
31382+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
31383+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
31384+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
31385 target_stat_values[++index] = int_mod_timer_init;
31386 target_stat_values[++index] = int_mod_cq_depth_1;
31387 target_stat_values[++index] = int_mod_cq_depth_4;
31388diff -urNp linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c
31389--- linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c 2011-03-27 14:31:47.000000000 -0400
31390+++ linux-2.6.32.43/drivers/infiniband/hw/nes/nes_verbs.c 2011-05-04 17:56:28.000000000 -0400
31391@@ -45,9 +45,9 @@
31392
31393 #include <rdma/ib_umem.h>
31394
31395-atomic_t mod_qp_timouts;
31396-atomic_t qps_created;
31397-atomic_t sw_qps_destroyed;
31398+atomic_unchecked_t mod_qp_timouts;
31399+atomic_unchecked_t qps_created;
31400+atomic_unchecked_t sw_qps_destroyed;
31401
31402 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
31403
31404@@ -1240,7 +1240,7 @@ static struct ib_qp *nes_create_qp(struc
31405 if (init_attr->create_flags)
31406 return ERR_PTR(-EINVAL);
31407
31408- atomic_inc(&qps_created);
31409+ atomic_inc_unchecked(&qps_created);
31410 switch (init_attr->qp_type) {
31411 case IB_QPT_RC:
31412 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
31413@@ -1568,7 +1568,7 @@ static int nes_destroy_qp(struct ib_qp *
31414 struct iw_cm_event cm_event;
31415 int ret;
31416
31417- atomic_inc(&sw_qps_destroyed);
31418+ atomic_inc_unchecked(&sw_qps_destroyed);
31419 nesqp->destroyed = 1;
31420
31421 /* Blow away the connection if it exists. */
31422diff -urNp linux-2.6.32.43/drivers/input/gameport/gameport.c linux-2.6.32.43/drivers/input/gameport/gameport.c
31423--- linux-2.6.32.43/drivers/input/gameport/gameport.c 2011-03-27 14:31:47.000000000 -0400
31424+++ linux-2.6.32.43/drivers/input/gameport/gameport.c 2011-05-04 17:56:28.000000000 -0400
31425@@ -515,13 +515,13 @@ EXPORT_SYMBOL(gameport_set_phys);
31426 */
31427 static void gameport_init_port(struct gameport *gameport)
31428 {
31429- static atomic_t gameport_no = ATOMIC_INIT(0);
31430+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
31431
31432 __module_get(THIS_MODULE);
31433
31434 mutex_init(&gameport->drv_mutex);
31435 device_initialize(&gameport->dev);
31436- dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return(&gameport_no) - 1);
31437+ dev_set_name(&gameport->dev, "gameport%lu", (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
31438 gameport->dev.bus = &gameport_bus;
31439 gameport->dev.release = gameport_release_port;
31440 if (gameport->parent)
31441diff -urNp linux-2.6.32.43/drivers/input/input.c linux-2.6.32.43/drivers/input/input.c
31442--- linux-2.6.32.43/drivers/input/input.c 2011-03-27 14:31:47.000000000 -0400
31443+++ linux-2.6.32.43/drivers/input/input.c 2011-05-04 17:56:28.000000000 -0400
31444@@ -1558,7 +1558,7 @@ EXPORT_SYMBOL(input_set_capability);
31445 */
31446 int input_register_device(struct input_dev *dev)
31447 {
31448- static atomic_t input_no = ATOMIC_INIT(0);
31449+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
31450 struct input_handler *handler;
31451 const char *path;
31452 int error;
31453@@ -1585,7 +1585,7 @@ int input_register_device(struct input_d
31454 dev->setkeycode = input_default_setkeycode;
31455
31456 dev_set_name(&dev->dev, "input%ld",
31457- (unsigned long) atomic_inc_return(&input_no) - 1);
31458+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
31459
31460 error = device_add(&dev->dev);
31461 if (error)
31462diff -urNp linux-2.6.32.43/drivers/input/joystick/sidewinder.c linux-2.6.32.43/drivers/input/joystick/sidewinder.c
31463--- linux-2.6.32.43/drivers/input/joystick/sidewinder.c 2011-03-27 14:31:47.000000000 -0400
31464+++ linux-2.6.32.43/drivers/input/joystick/sidewinder.c 2011-05-18 20:09:36.000000000 -0400
31465@@ -30,6 +30,7 @@
31466 #include <linux/kernel.h>
31467 #include <linux/module.h>
31468 #include <linux/slab.h>
31469+#include <linux/sched.h>
31470 #include <linux/init.h>
31471 #include <linux/input.h>
31472 #include <linux/gameport.h>
31473@@ -428,6 +429,8 @@ static int sw_read(struct sw *sw)
31474 unsigned char buf[SW_LENGTH];
31475 int i;
31476
31477+ pax_track_stack();
31478+
31479 i = sw_read_packet(sw->gameport, buf, sw->length, 0);
31480
31481 if (sw->type == SW_ID_3DP && sw->length == 66 && i != 66) { /* Broken packet, try to fix */
31482diff -urNp linux-2.6.32.43/drivers/input/joystick/xpad.c linux-2.6.32.43/drivers/input/joystick/xpad.c
31483--- linux-2.6.32.43/drivers/input/joystick/xpad.c 2011-03-27 14:31:47.000000000 -0400
31484+++ linux-2.6.32.43/drivers/input/joystick/xpad.c 2011-05-04 17:56:28.000000000 -0400
31485@@ -621,7 +621,7 @@ static void xpad_led_set(struct led_clas
31486
31487 static int xpad_led_probe(struct usb_xpad *xpad)
31488 {
31489- static atomic_t led_seq = ATOMIC_INIT(0);
31490+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
31491 long led_no;
31492 struct xpad_led *led;
31493 struct led_classdev *led_cdev;
31494@@ -634,7 +634,7 @@ static int xpad_led_probe(struct usb_xpa
31495 if (!led)
31496 return -ENOMEM;
31497
31498- led_no = (long)atomic_inc_return(&led_seq) - 1;
31499+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
31500
31501 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
31502 led->xpad = xpad;
31503diff -urNp linux-2.6.32.43/drivers/input/serio/serio.c linux-2.6.32.43/drivers/input/serio/serio.c
31504--- linux-2.6.32.43/drivers/input/serio/serio.c 2011-03-27 14:31:47.000000000 -0400
31505+++ linux-2.6.32.43/drivers/input/serio/serio.c 2011-05-04 17:56:28.000000000 -0400
31506@@ -527,7 +527,7 @@ static void serio_release_port(struct de
31507 */
31508 static void serio_init_port(struct serio *serio)
31509 {
31510- static atomic_t serio_no = ATOMIC_INIT(0);
31511+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
31512
31513 __module_get(THIS_MODULE);
31514
31515@@ -536,7 +536,7 @@ static void serio_init_port(struct serio
31516 mutex_init(&serio->drv_mutex);
31517 device_initialize(&serio->dev);
31518 dev_set_name(&serio->dev, "serio%ld",
31519- (long)atomic_inc_return(&serio_no) - 1);
31520+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
31521 serio->dev.bus = &serio_bus;
31522 serio->dev.release = serio_release_port;
31523 if (serio->parent) {
31524diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/common.c linux-2.6.32.43/drivers/isdn/gigaset/common.c
31525--- linux-2.6.32.43/drivers/isdn/gigaset/common.c 2011-03-27 14:31:47.000000000 -0400
31526+++ linux-2.6.32.43/drivers/isdn/gigaset/common.c 2011-04-17 15:56:46.000000000 -0400
31527@@ -712,7 +712,7 @@ struct cardstate *gigaset_initcs(struct
31528 cs->commands_pending = 0;
31529 cs->cur_at_seq = 0;
31530 cs->gotfwver = -1;
31531- cs->open_count = 0;
31532+ local_set(&cs->open_count, 0);
31533 cs->dev = NULL;
31534 cs->tty = NULL;
31535 cs->tty_dev = NULL;
31536diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h
31537--- linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h 2011-03-27 14:31:47.000000000 -0400
31538+++ linux-2.6.32.43/drivers/isdn/gigaset/gigaset.h 2011-04-17 15:56:46.000000000 -0400
31539@@ -34,6 +34,7 @@
31540 #include <linux/tty_driver.h>
31541 #include <linux/list.h>
31542 #include <asm/atomic.h>
31543+#include <asm/local.h>
31544
31545 #define GIG_VERSION {0,5,0,0}
31546 #define GIG_COMPAT {0,4,0,0}
31547@@ -446,7 +447,7 @@ struct cardstate {
31548 spinlock_t cmdlock;
31549 unsigned curlen, cmdbytes;
31550
31551- unsigned open_count;
31552+ local_t open_count;
31553 struct tty_struct *tty;
31554 struct tasklet_struct if_wake_tasklet;
31555 unsigned control_state;
31556diff -urNp linux-2.6.32.43/drivers/isdn/gigaset/interface.c linux-2.6.32.43/drivers/isdn/gigaset/interface.c
31557--- linux-2.6.32.43/drivers/isdn/gigaset/interface.c 2011-03-27 14:31:47.000000000 -0400
31558+++ linux-2.6.32.43/drivers/isdn/gigaset/interface.c 2011-04-17 15:56:46.000000000 -0400
31559@@ -165,9 +165,7 @@ static int if_open(struct tty_struct *tt
31560 return -ERESTARTSYS; // FIXME -EINTR?
31561 tty->driver_data = cs;
31562
31563- ++cs->open_count;
31564-
31565- if (cs->open_count == 1) {
31566+ if (local_inc_return(&cs->open_count) == 1) {
31567 spin_lock_irqsave(&cs->lock, flags);
31568 cs->tty = tty;
31569 spin_unlock_irqrestore(&cs->lock, flags);
31570@@ -195,10 +193,10 @@ static void if_close(struct tty_struct *
31571
31572 if (!cs->connected)
31573 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31574- else if (!cs->open_count)
31575+ else if (!local_read(&cs->open_count))
31576 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31577 else {
31578- if (!--cs->open_count) {
31579+ if (!local_dec_return(&cs->open_count)) {
31580 spin_lock_irqsave(&cs->lock, flags);
31581 cs->tty = NULL;
31582 spin_unlock_irqrestore(&cs->lock, flags);
31583@@ -233,7 +231,7 @@ static int if_ioctl(struct tty_struct *t
31584 if (!cs->connected) {
31585 gig_dbg(DEBUG_IF, "not connected");
31586 retval = -ENODEV;
31587- } else if (!cs->open_count)
31588+ } else if (!local_read(&cs->open_count))
31589 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31590 else {
31591 retval = 0;
31592@@ -361,7 +359,7 @@ static int if_write(struct tty_struct *t
31593 if (!cs->connected) {
31594 gig_dbg(DEBUG_IF, "not connected");
31595 retval = -ENODEV;
31596- } else if (!cs->open_count)
31597+ } else if (!local_read(&cs->open_count))
31598 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31599 else if (cs->mstate != MS_LOCKED) {
31600 dev_warn(cs->dev, "can't write to unlocked device\n");
31601@@ -395,7 +393,7 @@ static int if_write_room(struct tty_stru
31602 if (!cs->connected) {
31603 gig_dbg(DEBUG_IF, "not connected");
31604 retval = -ENODEV;
31605- } else if (!cs->open_count)
31606+ } else if (!local_read(&cs->open_count))
31607 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31608 else if (cs->mstate != MS_LOCKED) {
31609 dev_warn(cs->dev, "can't write to unlocked device\n");
31610@@ -425,7 +423,7 @@ static int if_chars_in_buffer(struct tty
31611
31612 if (!cs->connected)
31613 gig_dbg(DEBUG_IF, "not connected");
31614- else if (!cs->open_count)
31615+ else if (!local_read(&cs->open_count))
31616 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31617 else if (cs->mstate != MS_LOCKED)
31618 dev_warn(cs->dev, "can't write to unlocked device\n");
31619@@ -453,7 +451,7 @@ static void if_throttle(struct tty_struc
31620
31621 if (!cs->connected)
31622 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31623- else if (!cs->open_count)
31624+ else if (!local_read(&cs->open_count))
31625 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31626 else {
31627 //FIXME
31628@@ -478,7 +476,7 @@ static void if_unthrottle(struct tty_str
31629
31630 if (!cs->connected)
31631 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
31632- else if (!cs->open_count)
31633+ else if (!local_read(&cs->open_count))
31634 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31635 else {
31636 //FIXME
31637@@ -510,7 +508,7 @@ static void if_set_termios(struct tty_st
31638 goto out;
31639 }
31640
31641- if (!cs->open_count) {
31642+ if (!local_read(&cs->open_count)) {
31643 dev_warn(cs->dev, "%s: device not opened\n", __func__);
31644 goto out;
31645 }
31646diff -urNp linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c
31647--- linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c 2011-03-27 14:31:47.000000000 -0400
31648+++ linux-2.6.32.43/drivers/isdn/hardware/avm/b1.c 2011-04-17 15:56:46.000000000 -0400
31649@@ -173,7 +173,7 @@ int b1_load_t4file(avmcard *card, capilo
31650 }
31651 if (left) {
31652 if (t4file->user) {
31653- if (copy_from_user(buf, dp, left))
31654+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31655 return -EFAULT;
31656 } else {
31657 memcpy(buf, dp, left);
31658@@ -221,7 +221,7 @@ int b1_load_config(avmcard *card, capilo
31659 }
31660 if (left) {
31661 if (config->user) {
31662- if (copy_from_user(buf, dp, left))
31663+ if (left > sizeof buf || copy_from_user(buf, dp, left))
31664 return -EFAULT;
31665 } else {
31666 memcpy(buf, dp, left);
31667diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c
31668--- linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c 2011-03-27 14:31:47.000000000 -0400
31669+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/capidtmf.c 2011-05-16 21:46:57.000000000 -0400
31670@@ -498,6 +498,7 @@ void capidtmf_recv_block (t_capidtmf_sta
31671 byte goertzel_result_buffer[CAPIDTMF_RECV_TOTAL_FREQUENCY_COUNT];
31672 short windowed_sample_buffer[CAPIDTMF_RECV_WINDOWED_SAMPLES];
31673
31674+ pax_track_stack();
31675
31676 if (p_state->recv.state & CAPIDTMF_RECV_STATE_DTMF_ACTIVE)
31677 {
31678diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c
31679--- linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c 2011-03-27 14:31:47.000000000 -0400
31680+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/capifunc.c 2011-05-16 21:46:57.000000000 -0400
31681@@ -1055,6 +1055,8 @@ static int divacapi_connect_didd(void)
31682 IDI_SYNC_REQ req;
31683 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31684
31685+ pax_track_stack();
31686+
31687 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31688
31689 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31690diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c
31691--- linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c 2011-03-27 14:31:47.000000000 -0400
31692+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/diddfunc.c 2011-05-16 21:46:57.000000000 -0400
31693@@ -54,6 +54,8 @@ static int DIVA_INIT_FUNCTION connect_di
31694 IDI_SYNC_REQ req;
31695 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31696
31697+ pax_track_stack();
31698+
31699 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31700
31701 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31702diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c
31703--- linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c 2011-03-27 14:31:47.000000000 -0400
31704+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/divasfunc.c 2011-05-16 21:46:57.000000000 -0400
31705@@ -161,6 +161,8 @@ static int DIVA_INIT_FUNCTION connect_di
31706 IDI_SYNC_REQ req;
31707 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31708
31709+ pax_track_stack();
31710+
31711 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31712
31713 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31714diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/divasync.h linux-2.6.32.43/drivers/isdn/hardware/eicon/divasync.h
31715--- linux-2.6.32.43/drivers/isdn/hardware/eicon/divasync.h 2011-03-27 14:31:47.000000000 -0400
31716+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/divasync.h 2011-08-05 20:33:55.000000000 -0400
31717@@ -146,7 +146,7 @@ typedef struct _diva_didd_add_adapter {
31718 } diva_didd_add_adapter_t;
31719 typedef struct _diva_didd_remove_adapter {
31720 IDI_CALL p_request;
31721-} diva_didd_remove_adapter_t;
31722+} __no_const diva_didd_remove_adapter_t;
31723 typedef struct _diva_didd_read_adapter_array {
31724 void * buffer;
31725 dword length;
31726diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c
31727--- linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c 2011-03-27 14:31:47.000000000 -0400
31728+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/idifunc.c 2011-05-16 21:46:57.000000000 -0400
31729@@ -188,6 +188,8 @@ static int DIVA_INIT_FUNCTION connect_di
31730 IDI_SYNC_REQ req;
31731 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31732
31733+ pax_track_stack();
31734+
31735 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31736
31737 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31738diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c
31739--- linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c 2011-03-27 14:31:47.000000000 -0400
31740+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/message.c 2011-05-16 21:46:57.000000000 -0400
31741@@ -4889,6 +4889,8 @@ static void sig_ind(PLCI *plci)
31742 dword d;
31743 word w;
31744
31745+ pax_track_stack();
31746+
31747 a = plci->adapter;
31748 Id = ((word)plci->Id<<8)|a->Id;
31749 PUT_WORD(&SS_Ind[4],0x0000);
31750@@ -7484,6 +7486,8 @@ static word add_b1(PLCI *plci, API_PARSE
31751 word j, n, w;
31752 dword d;
31753
31754+ pax_track_stack();
31755+
31756
31757 for(i=0;i<8;i++) bp_parms[i].length = 0;
31758 for(i=0;i<2;i++) global_config[i].length = 0;
31759@@ -7958,6 +7962,8 @@ static word add_b23(PLCI *plci, API_PARS
31760 const byte llc3[] = {4,3,2,2,6,6,0};
31761 const byte header[] = {0,2,3,3,0,0,0};
31762
31763+ pax_track_stack();
31764+
31765 for(i=0;i<8;i++) bp_parms[i].length = 0;
31766 for(i=0;i<6;i++) b2_config_parms[i].length = 0;
31767 for(i=0;i<5;i++) b3_config_parms[i].length = 0;
31768@@ -14761,6 +14767,8 @@ static void group_optimization(DIVA_CAPI
31769 word appl_number_group_type[MAX_APPL];
31770 PLCI *auxplci;
31771
31772+ pax_track_stack();
31773+
31774 set_group_ind_mask (plci); /* all APPLs within this inc. call are allowed to dial in */
31775
31776 if(!a->group_optimization_enabled)
31777diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c
31778--- linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c 2011-03-27 14:31:47.000000000 -0400
31779+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/mntfunc.c 2011-05-16 21:46:57.000000000 -0400
31780@@ -79,6 +79,8 @@ static int DIVA_INIT_FUNCTION connect_di
31781 IDI_SYNC_REQ req;
31782 DESCRIPTOR DIDD_Table[MAX_DESCRIPTORS];
31783
31784+ pax_track_stack();
31785+
31786 DIVA_DIDD_Read(DIDD_Table, sizeof(DIDD_Table));
31787
31788 for (x = 0; x < MAX_DESCRIPTORS; x++) {
31789diff -urNp linux-2.6.32.43/drivers/isdn/hardware/eicon/xdi_adapter.h linux-2.6.32.43/drivers/isdn/hardware/eicon/xdi_adapter.h
31790--- linux-2.6.32.43/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-03-27 14:31:47.000000000 -0400
31791+++ linux-2.6.32.43/drivers/isdn/hardware/eicon/xdi_adapter.h 2011-08-05 20:33:55.000000000 -0400
31792@@ -44,7 +44,7 @@ typedef struct _xdi_mbox_t {
31793 typedef struct _diva_os_idi_adapter_interface {
31794 diva_init_card_proc_t cleanup_adapter_proc;
31795 diva_cmd_card_proc_t cmd_proc;
31796-} diva_os_idi_adapter_interface_t;
31797+} __no_const diva_os_idi_adapter_interface_t;
31798
31799 typedef struct _diva_os_xdi_adapter {
31800 struct list_head link;
31801diff -urNp linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c
31802--- linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c 2011-03-27 14:31:47.000000000 -0400
31803+++ linux-2.6.32.43/drivers/isdn/i4l/isdn_common.c 2011-05-16 21:46:57.000000000 -0400
31804@@ -1290,6 +1290,8 @@ isdn_ioctl(struct inode *inode, struct f
31805 } iocpar;
31806 void __user *argp = (void __user *)arg;
31807
31808+ pax_track_stack();
31809+
31810 #define name iocpar.name
31811 #define bname iocpar.bname
31812 #define iocts iocpar.iocts
31813diff -urNp linux-2.6.32.43/drivers/isdn/icn/icn.c linux-2.6.32.43/drivers/isdn/icn/icn.c
31814--- linux-2.6.32.43/drivers/isdn/icn/icn.c 2011-03-27 14:31:47.000000000 -0400
31815+++ linux-2.6.32.43/drivers/isdn/icn/icn.c 2011-04-17 15:56:46.000000000 -0400
31816@@ -1044,7 +1044,7 @@ icn_writecmd(const u_char * buf, int len
31817 if (count > len)
31818 count = len;
31819 if (user) {
31820- if (copy_from_user(msg, buf, count))
31821+ if (count > sizeof msg || copy_from_user(msg, buf, count))
31822 return -EFAULT;
31823 } else
31824 memcpy(msg, buf, count);
31825diff -urNp linux-2.6.32.43/drivers/isdn/mISDN/socket.c linux-2.6.32.43/drivers/isdn/mISDN/socket.c
31826--- linux-2.6.32.43/drivers/isdn/mISDN/socket.c 2011-03-27 14:31:47.000000000 -0400
31827+++ linux-2.6.32.43/drivers/isdn/mISDN/socket.c 2011-04-17 15:56:46.000000000 -0400
31828@@ -391,6 +391,7 @@ data_sock_ioctl(struct socket *sock, uns
31829 if (dev) {
31830 struct mISDN_devinfo di;
31831
31832+ memset(&di, 0, sizeof(di));
31833 di.id = dev->id;
31834 di.Dprotocols = dev->Dprotocols;
31835 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31836@@ -671,6 +672,7 @@ base_sock_ioctl(struct socket *sock, uns
31837 if (dev) {
31838 struct mISDN_devinfo di;
31839
31840+ memset(&di, 0, sizeof(di));
31841 di.id = dev->id;
31842 di.Dprotocols = dev->Dprotocols;
31843 di.Bprotocols = dev->Bprotocols | get_all_Bprotocols();
31844diff -urNp linux-2.6.32.43/drivers/isdn/sc/interrupt.c linux-2.6.32.43/drivers/isdn/sc/interrupt.c
31845--- linux-2.6.32.43/drivers/isdn/sc/interrupt.c 2011-03-27 14:31:47.000000000 -0400
31846+++ linux-2.6.32.43/drivers/isdn/sc/interrupt.c 2011-04-17 15:56:46.000000000 -0400
31847@@ -112,11 +112,19 @@ irqreturn_t interrupt_handler(int dummy,
31848 }
31849 else if(callid>=0x0000 && callid<=0x7FFF)
31850 {
31851+ int len;
31852+
31853 pr_debug("%s: Got Incoming Call\n",
31854 sc_adapter[card]->devicename);
31855- strcpy(setup.phone,&(rcvmsg.msg_data.byte_array[4]));
31856- strcpy(setup.eazmsn,
31857- sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn);
31858+ len = strlcpy(setup.phone, &(rcvmsg.msg_data.byte_array[4]),
31859+ sizeof(setup.phone));
31860+ if (len >= sizeof(setup.phone))
31861+ continue;
31862+ len = strlcpy(setup.eazmsn,
31863+ sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31864+ sizeof(setup.eazmsn));
31865+ if (len >= sizeof(setup.eazmsn))
31866+ continue;
31867 setup.si1 = 7;
31868 setup.si2 = 0;
31869 setup.plan = 0;
31870@@ -176,7 +184,9 @@ irqreturn_t interrupt_handler(int dummy,
31871 * Handle a GetMyNumber Rsp
31872 */
31873 if (IS_CE_MESSAGE(rcvmsg,Call,0,GetMyNumber)){
31874- strcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no-1].dn,rcvmsg.msg_data.byte_array);
31875+ strlcpy(sc_adapter[card]->channel[rcvmsg.phy_link_no - 1].dn,
31876+ rcvmsg.msg_data.byte_array,
31877+ sizeof(rcvmsg.msg_data.byte_array));
31878 continue;
31879 }
31880
31881diff -urNp linux-2.6.32.43/drivers/lguest/core.c linux-2.6.32.43/drivers/lguest/core.c
31882--- linux-2.6.32.43/drivers/lguest/core.c 2011-03-27 14:31:47.000000000 -0400
31883+++ linux-2.6.32.43/drivers/lguest/core.c 2011-04-17 15:56:46.000000000 -0400
31884@@ -91,9 +91,17 @@ static __init int map_switcher(void)
31885 * it's worked so far. The end address needs +1 because __get_vm_area
31886 * allocates an extra guard page, so we need space for that.
31887 */
31888+
31889+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
31890+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31891+ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR
31892+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31893+#else
31894 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
31895 VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR
31896 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
31897+#endif
31898+
31899 if (!switcher_vma) {
31900 err = -ENOMEM;
31901 printk("lguest: could not map switcher pages high\n");
31902@@ -118,7 +126,7 @@ static __init int map_switcher(void)
31903 * Now the Switcher is mapped at the right address, we can't fail!
31904 * Copy in the compiled-in Switcher code (from <arch>_switcher.S).
31905 */
31906- memcpy(switcher_vma->addr, start_switcher_text,
31907+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
31908 end_switcher_text - start_switcher_text);
31909
31910 printk(KERN_INFO "lguest: mapped switcher at %p\n",
31911diff -urNp linux-2.6.32.43/drivers/lguest/x86/core.c linux-2.6.32.43/drivers/lguest/x86/core.c
31912--- linux-2.6.32.43/drivers/lguest/x86/core.c 2011-03-27 14:31:47.000000000 -0400
31913+++ linux-2.6.32.43/drivers/lguest/x86/core.c 2011-04-17 15:56:46.000000000 -0400
31914@@ -59,7 +59,7 @@ static struct {
31915 /* Offset from where switcher.S was compiled to where we've copied it */
31916 static unsigned long switcher_offset(void)
31917 {
31918- return SWITCHER_ADDR - (unsigned long)start_switcher_text;
31919+ return SWITCHER_ADDR - (unsigned long)ktla_ktva(start_switcher_text);
31920 }
31921
31922 /* This cpu's struct lguest_pages. */
31923@@ -100,7 +100,13 @@ static void copy_in_guest_info(struct lg
31924 * These copies are pretty cheap, so we do them unconditionally: */
31925 /* Save the current Host top-level page directory.
31926 */
31927+
31928+#ifdef CONFIG_PAX_PER_CPU_PGD
31929+ pages->state.host_cr3 = read_cr3();
31930+#else
31931 pages->state.host_cr3 = __pa(current->mm->pgd);
31932+#endif
31933+
31934 /*
31935 * Set up the Guest's page tables to see this CPU's pages (and no
31936 * other CPU's pages).
31937@@ -535,7 +541,7 @@ void __init lguest_arch_host_init(void)
31938 * compiled-in switcher code and the high-mapped copy we just made.
31939 */
31940 for (i = 0; i < IDT_ENTRIES; i++)
31941- default_idt_entries[i] += switcher_offset();
31942+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
31943
31944 /*
31945 * Set up the Switcher's per-cpu areas.
31946@@ -618,7 +624,7 @@ void __init lguest_arch_host_init(void)
31947 * it will be undisturbed when we switch. To change %cs and jump we
31948 * need this structure to feed to Intel's "lcall" instruction.
31949 */
31950- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
31951+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
31952 lguest_entry.segment = LGUEST_CS;
31953
31954 /*
31955diff -urNp linux-2.6.32.43/drivers/lguest/x86/switcher_32.S linux-2.6.32.43/drivers/lguest/x86/switcher_32.S
31956--- linux-2.6.32.43/drivers/lguest/x86/switcher_32.S 2011-03-27 14:31:47.000000000 -0400
31957+++ linux-2.6.32.43/drivers/lguest/x86/switcher_32.S 2011-04-17 15:56:46.000000000 -0400
31958@@ -87,6 +87,7 @@
31959 #include <asm/page.h>
31960 #include <asm/segment.h>
31961 #include <asm/lguest.h>
31962+#include <asm/processor-flags.h>
31963
31964 // We mark the start of the code to copy
31965 // It's placed in .text tho it's never run here
31966@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
31967 // Changes type when we load it: damn Intel!
31968 // For after we switch over our page tables
31969 // That entry will be read-only: we'd crash.
31970+
31971+#ifdef CONFIG_PAX_KERNEXEC
31972+ mov %cr0, %edx
31973+ xor $X86_CR0_WP, %edx
31974+ mov %edx, %cr0
31975+#endif
31976+
31977 movl $(GDT_ENTRY_TSS*8), %edx
31978 ltr %dx
31979
31980@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
31981 // Let's clear it again for our return.
31982 // The GDT descriptor of the Host
31983 // Points to the table after two "size" bytes
31984- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
31985+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
31986 // Clear "used" from type field (byte 5, bit 2)
31987- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
31988+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
31989+
31990+#ifdef CONFIG_PAX_KERNEXEC
31991+ mov %cr0, %eax
31992+ xor $X86_CR0_WP, %eax
31993+ mov %eax, %cr0
31994+#endif
31995
31996 // Once our page table's switched, the Guest is live!
31997 // The Host fades as we run this final step.
31998@@ -295,13 +309,12 @@ deliver_to_host:
31999 // I consulted gcc, and it gave
32000 // These instructions, which I gladly credit:
32001 leal (%edx,%ebx,8), %eax
32002- movzwl (%eax),%edx
32003- movl 4(%eax), %eax
32004- xorw %ax, %ax
32005- orl %eax, %edx
32006+ movl 4(%eax), %edx
32007+ movw (%eax), %dx
32008 // Now the address of the handler's in %edx
32009 // We call it now: its "iret" drops us home.
32010- jmp *%edx
32011+ ljmp $__KERNEL_CS, $1f
32012+1: jmp *%edx
32013
32014 // Every interrupt can come to us here
32015 // But we must truly tell each apart.
32016diff -urNp linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c
32017--- linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c 2011-03-27 14:31:47.000000000 -0400
32018+++ linux-2.6.32.43/drivers/macintosh/via-pmu-backlight.c 2011-04-17 15:56:46.000000000 -0400
32019@@ -15,7 +15,7 @@
32020
32021 #define MAX_PMU_LEVEL 0xFF
32022
32023-static struct backlight_ops pmu_backlight_data;
32024+static const struct backlight_ops pmu_backlight_data;
32025 static DEFINE_SPINLOCK(pmu_backlight_lock);
32026 static int sleeping, uses_pmu_bl;
32027 static u8 bl_curve[FB_BACKLIGHT_LEVELS];
32028@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness(
32029 return bd->props.brightness;
32030 }
32031
32032-static struct backlight_ops pmu_backlight_data = {
32033+static const struct backlight_ops pmu_backlight_data = {
32034 .get_brightness = pmu_backlight_get_brightness,
32035 .update_status = pmu_backlight_update_status,
32036
32037diff -urNp linux-2.6.32.43/drivers/macintosh/via-pmu.c linux-2.6.32.43/drivers/macintosh/via-pmu.c
32038--- linux-2.6.32.43/drivers/macintosh/via-pmu.c 2011-03-27 14:31:47.000000000 -0400
32039+++ linux-2.6.32.43/drivers/macintosh/via-pmu.c 2011-04-17 15:56:46.000000000 -0400
32040@@ -2232,7 +2232,7 @@ static int pmu_sleep_valid(suspend_state
32041 && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0);
32042 }
32043
32044-static struct platform_suspend_ops pmu_pm_ops = {
32045+static const struct platform_suspend_ops pmu_pm_ops = {
32046 .enter = powerbook_sleep,
32047 .valid = pmu_sleep_valid,
32048 };
32049diff -urNp linux-2.6.32.43/drivers/md/dm.c linux-2.6.32.43/drivers/md/dm.c
32050--- linux-2.6.32.43/drivers/md/dm.c 2011-03-27 14:31:47.000000000 -0400
32051+++ linux-2.6.32.43/drivers/md/dm.c 2011-05-04 17:56:28.000000000 -0400
32052@@ -163,9 +163,9 @@ struct mapped_device {
32053 /*
32054 * Event handling.
32055 */
32056- atomic_t event_nr;
32057+ atomic_unchecked_t event_nr;
32058 wait_queue_head_t eventq;
32059- atomic_t uevent_seq;
32060+ atomic_unchecked_t uevent_seq;
32061 struct list_head uevent_list;
32062 spinlock_t uevent_lock; /* Protect access to uevent_list */
32063
32064@@ -1770,8 +1770,8 @@ static struct mapped_device *alloc_dev(i
32065 rwlock_init(&md->map_lock);
32066 atomic_set(&md->holders, 1);
32067 atomic_set(&md->open_count, 0);
32068- atomic_set(&md->event_nr, 0);
32069- atomic_set(&md->uevent_seq, 0);
32070+ atomic_set_unchecked(&md->event_nr, 0);
32071+ atomic_set_unchecked(&md->uevent_seq, 0);
32072 INIT_LIST_HEAD(&md->uevent_list);
32073 spin_lock_init(&md->uevent_lock);
32074
32075@@ -1921,7 +1921,7 @@ static void event_callback(void *context
32076
32077 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
32078
32079- atomic_inc(&md->event_nr);
32080+ atomic_inc_unchecked(&md->event_nr);
32081 wake_up(&md->eventq);
32082 }
32083
32084@@ -2556,18 +2556,18 @@ void dm_kobject_uevent(struct mapped_dev
32085
32086 uint32_t dm_next_uevent_seq(struct mapped_device *md)
32087 {
32088- return atomic_add_return(1, &md->uevent_seq);
32089+ return atomic_add_return_unchecked(1, &md->uevent_seq);
32090 }
32091
32092 uint32_t dm_get_event_nr(struct mapped_device *md)
32093 {
32094- return atomic_read(&md->event_nr);
32095+ return atomic_read_unchecked(&md->event_nr);
32096 }
32097
32098 int dm_wait_event(struct mapped_device *md, int event_nr)
32099 {
32100 return wait_event_interruptible(md->eventq,
32101- (event_nr != atomic_read(&md->event_nr)));
32102+ (event_nr != atomic_read_unchecked(&md->event_nr)));
32103 }
32104
32105 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
32106diff -urNp linux-2.6.32.43/drivers/md/dm-ioctl.c linux-2.6.32.43/drivers/md/dm-ioctl.c
32107--- linux-2.6.32.43/drivers/md/dm-ioctl.c 2011-03-27 14:31:47.000000000 -0400
32108+++ linux-2.6.32.43/drivers/md/dm-ioctl.c 2011-04-17 15:56:46.000000000 -0400
32109@@ -1437,7 +1437,7 @@ static int validate_params(uint cmd, str
32110 cmd == DM_LIST_VERSIONS_CMD)
32111 return 0;
32112
32113- if ((cmd == DM_DEV_CREATE_CMD)) {
32114+ if (cmd == DM_DEV_CREATE_CMD) {
32115 if (!*param->name) {
32116 DMWARN("name not supplied when creating device");
32117 return -EINVAL;
32118diff -urNp linux-2.6.32.43/drivers/md/dm-raid1.c linux-2.6.32.43/drivers/md/dm-raid1.c
32119--- linux-2.6.32.43/drivers/md/dm-raid1.c 2011-03-27 14:31:47.000000000 -0400
32120+++ linux-2.6.32.43/drivers/md/dm-raid1.c 2011-05-04 17:56:28.000000000 -0400
32121@@ -41,7 +41,7 @@ enum dm_raid1_error {
32122
32123 struct mirror {
32124 struct mirror_set *ms;
32125- atomic_t error_count;
32126+ atomic_unchecked_t error_count;
32127 unsigned long error_type;
32128 struct dm_dev *dev;
32129 sector_t offset;
32130@@ -203,7 +203,7 @@ static void fail_mirror(struct mirror *m
32131 * simple way to tell if a device has encountered
32132 * errors.
32133 */
32134- atomic_inc(&m->error_count);
32135+ atomic_inc_unchecked(&m->error_count);
32136
32137 if (test_and_set_bit(error_type, &m->error_type))
32138 return;
32139@@ -225,7 +225,7 @@ static void fail_mirror(struct mirror *m
32140 }
32141
32142 for (new = ms->mirror; new < ms->mirror + ms->nr_mirrors; new++)
32143- if (!atomic_read(&new->error_count)) {
32144+ if (!atomic_read_unchecked(&new->error_count)) {
32145 set_default_mirror(new);
32146 break;
32147 }
32148@@ -363,7 +363,7 @@ static struct mirror *choose_mirror(stru
32149 struct mirror *m = get_default_mirror(ms);
32150
32151 do {
32152- if (likely(!atomic_read(&m->error_count)))
32153+ if (likely(!atomic_read_unchecked(&m->error_count)))
32154 return m;
32155
32156 if (m-- == ms->mirror)
32157@@ -377,7 +377,7 @@ static int default_ok(struct mirror *m)
32158 {
32159 struct mirror *default_mirror = get_default_mirror(m->ms);
32160
32161- return !atomic_read(&default_mirror->error_count);
32162+ return !atomic_read_unchecked(&default_mirror->error_count);
32163 }
32164
32165 static int mirror_available(struct mirror_set *ms, struct bio *bio)
32166@@ -484,7 +484,7 @@ static void do_reads(struct mirror_set *
32167 */
32168 if (likely(region_in_sync(ms, region, 1)))
32169 m = choose_mirror(ms, bio->bi_sector);
32170- else if (m && atomic_read(&m->error_count))
32171+ else if (m && atomic_read_unchecked(&m->error_count))
32172 m = NULL;
32173
32174 if (likely(m))
32175@@ -855,7 +855,7 @@ static int get_mirror(struct mirror_set
32176 }
32177
32178 ms->mirror[mirror].ms = ms;
32179- atomic_set(&(ms->mirror[mirror].error_count), 0);
32180+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
32181 ms->mirror[mirror].error_type = 0;
32182 ms->mirror[mirror].offset = offset;
32183
32184@@ -1241,7 +1241,7 @@ static void mirror_resume(struct dm_targ
32185 */
32186 static char device_status_char(struct mirror *m)
32187 {
32188- if (!atomic_read(&(m->error_count)))
32189+ if (!atomic_read_unchecked(&(m->error_count)))
32190 return 'A';
32191
32192 return (test_bit(DM_RAID1_WRITE_ERROR, &(m->error_type))) ? 'D' :
32193diff -urNp linux-2.6.32.43/drivers/md/dm-stripe.c linux-2.6.32.43/drivers/md/dm-stripe.c
32194--- linux-2.6.32.43/drivers/md/dm-stripe.c 2011-03-27 14:31:47.000000000 -0400
32195+++ linux-2.6.32.43/drivers/md/dm-stripe.c 2011-05-04 17:56:28.000000000 -0400
32196@@ -20,7 +20,7 @@ struct stripe {
32197 struct dm_dev *dev;
32198 sector_t physical_start;
32199
32200- atomic_t error_count;
32201+ atomic_unchecked_t error_count;
32202 };
32203
32204 struct stripe_c {
32205@@ -188,7 +188,7 @@ static int stripe_ctr(struct dm_target *
32206 kfree(sc);
32207 return r;
32208 }
32209- atomic_set(&(sc->stripe[i].error_count), 0);
32210+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
32211 }
32212
32213 ti->private = sc;
32214@@ -257,7 +257,7 @@ static int stripe_status(struct dm_targe
32215 DMEMIT("%d ", sc->stripes);
32216 for (i = 0; i < sc->stripes; i++) {
32217 DMEMIT("%s ", sc->stripe[i].dev->name);
32218- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
32219+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
32220 'D' : 'A';
32221 }
32222 buffer[i] = '\0';
32223@@ -304,8 +304,8 @@ static int stripe_end_io(struct dm_targe
32224 */
32225 for (i = 0; i < sc->stripes; i++)
32226 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
32227- atomic_inc(&(sc->stripe[i].error_count));
32228- if (atomic_read(&(sc->stripe[i].error_count)) <
32229+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
32230+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
32231 DM_IO_ERROR_THRESHOLD)
32232 queue_work(kstriped, &sc->kstriped_ws);
32233 }
32234diff -urNp linux-2.6.32.43/drivers/md/dm-sysfs.c linux-2.6.32.43/drivers/md/dm-sysfs.c
32235--- linux-2.6.32.43/drivers/md/dm-sysfs.c 2011-03-27 14:31:47.000000000 -0400
32236+++ linux-2.6.32.43/drivers/md/dm-sysfs.c 2011-04-17 15:56:46.000000000 -0400
32237@@ -75,7 +75,7 @@ static struct attribute *dm_attrs[] = {
32238 NULL,
32239 };
32240
32241-static struct sysfs_ops dm_sysfs_ops = {
32242+static const struct sysfs_ops dm_sysfs_ops = {
32243 .show = dm_attr_show,
32244 };
32245
32246diff -urNp linux-2.6.32.43/drivers/md/dm-table.c linux-2.6.32.43/drivers/md/dm-table.c
32247--- linux-2.6.32.43/drivers/md/dm-table.c 2011-06-25 12:55:34.000000000 -0400
32248+++ linux-2.6.32.43/drivers/md/dm-table.c 2011-06-25 12:56:37.000000000 -0400
32249@@ -376,7 +376,7 @@ static int device_area_is_invalid(struct
32250 if (!dev_size)
32251 return 0;
32252
32253- if ((start >= dev_size) || (start + len > dev_size)) {
32254+ if ((start >= dev_size) || (len > dev_size - start)) {
32255 DMWARN("%s: %s too small for target: "
32256 "start=%llu, len=%llu, dev_size=%llu",
32257 dm_device_name(ti->table->md), bdevname(bdev, b),
32258diff -urNp linux-2.6.32.43/drivers/md/md.c linux-2.6.32.43/drivers/md/md.c
32259--- linux-2.6.32.43/drivers/md/md.c 2011-07-13 17:23:04.000000000 -0400
32260+++ linux-2.6.32.43/drivers/md/md.c 2011-07-13 17:23:18.000000000 -0400
32261@@ -153,10 +153,10 @@ static int start_readonly;
32262 * start build, activate spare
32263 */
32264 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
32265-static atomic_t md_event_count;
32266+static atomic_unchecked_t md_event_count;
32267 void md_new_event(mddev_t *mddev)
32268 {
32269- atomic_inc(&md_event_count);
32270+ atomic_inc_unchecked(&md_event_count);
32271 wake_up(&md_event_waiters);
32272 }
32273 EXPORT_SYMBOL_GPL(md_new_event);
32274@@ -166,7 +166,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
32275 */
32276 static void md_new_event_inintr(mddev_t *mddev)
32277 {
32278- atomic_inc(&md_event_count);
32279+ atomic_inc_unchecked(&md_event_count);
32280 wake_up(&md_event_waiters);
32281 }
32282
32283@@ -1218,7 +1218,7 @@ static int super_1_load(mdk_rdev_t *rdev
32284
32285 rdev->preferred_minor = 0xffff;
32286 rdev->data_offset = le64_to_cpu(sb->data_offset);
32287- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32288+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
32289
32290 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
32291 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
32292@@ -1392,7 +1392,7 @@ static void super_1_sync(mddev_t *mddev,
32293 else
32294 sb->resync_offset = cpu_to_le64(0);
32295
32296- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
32297+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
32298
32299 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
32300 sb->size = cpu_to_le64(mddev->dev_sectors);
32301@@ -2214,7 +2214,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_sho
32302 static ssize_t
32303 errors_show(mdk_rdev_t *rdev, char *page)
32304 {
32305- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
32306+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
32307 }
32308
32309 static ssize_t
32310@@ -2223,7 +2223,7 @@ errors_store(mdk_rdev_t *rdev, const cha
32311 char *e;
32312 unsigned long n = simple_strtoul(buf, &e, 10);
32313 if (*buf && (*e == 0 || *e == '\n')) {
32314- atomic_set(&rdev->corrected_errors, n);
32315+ atomic_set_unchecked(&rdev->corrected_errors, n);
32316 return len;
32317 }
32318 return -EINVAL;
32319@@ -2517,7 +2517,7 @@ static void rdev_free(struct kobject *ko
32320 mdk_rdev_t *rdev = container_of(ko, mdk_rdev_t, kobj);
32321 kfree(rdev);
32322 }
32323-static struct sysfs_ops rdev_sysfs_ops = {
32324+static const struct sysfs_ops rdev_sysfs_ops = {
32325 .show = rdev_attr_show,
32326 .store = rdev_attr_store,
32327 };
32328@@ -2566,8 +2566,8 @@ static mdk_rdev_t *md_import_device(dev_
32329 rdev->data_offset = 0;
32330 rdev->sb_events = 0;
32331 atomic_set(&rdev->nr_pending, 0);
32332- atomic_set(&rdev->read_errors, 0);
32333- atomic_set(&rdev->corrected_errors, 0);
32334+ atomic_set_unchecked(&rdev->read_errors, 0);
32335+ atomic_set_unchecked(&rdev->corrected_errors, 0);
32336
32337 size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS;
32338 if (!size) {
32339@@ -3887,7 +3887,7 @@ static void md_free(struct kobject *ko)
32340 kfree(mddev);
32341 }
32342
32343-static struct sysfs_ops md_sysfs_ops = {
32344+static const struct sysfs_ops md_sysfs_ops = {
32345 .show = md_attr_show,
32346 .store = md_attr_store,
32347 };
32348@@ -4474,7 +4474,8 @@ out:
32349 err = 0;
32350 blk_integrity_unregister(disk);
32351 md_new_event(mddev);
32352- sysfs_notify_dirent(mddev->sysfs_state);
32353+ if (mddev->sysfs_state)
32354+ sysfs_notify_dirent(mddev->sysfs_state);
32355 return err;
32356 }
32357
32358@@ -5954,7 +5955,7 @@ static int md_seq_show(struct seq_file *
32359
32360 spin_unlock(&pers_lock);
32361 seq_printf(seq, "\n");
32362- mi->event = atomic_read(&md_event_count);
32363+ mi->event = atomic_read_unchecked(&md_event_count);
32364 return 0;
32365 }
32366 if (v == (void*)2) {
32367@@ -6043,7 +6044,7 @@ static int md_seq_show(struct seq_file *
32368 chunk_kb ? "KB" : "B");
32369 if (bitmap->file) {
32370 seq_printf(seq, ", file: ");
32371- seq_path(seq, &bitmap->file->f_path, " \t\n");
32372+ seq_path(seq, &bitmap->file->f_path, " \t\n\\");
32373 }
32374
32375 seq_printf(seq, "\n");
32376@@ -6077,7 +6078,7 @@ static int md_seq_open(struct inode *ino
32377 else {
32378 struct seq_file *p = file->private_data;
32379 p->private = mi;
32380- mi->event = atomic_read(&md_event_count);
32381+ mi->event = atomic_read_unchecked(&md_event_count);
32382 }
32383 return error;
32384 }
32385@@ -6093,7 +6094,7 @@ static unsigned int mdstat_poll(struct f
32386 /* always allow read */
32387 mask = POLLIN | POLLRDNORM;
32388
32389- if (mi->event != atomic_read(&md_event_count))
32390+ if (mi->event != atomic_read_unchecked(&md_event_count))
32391 mask |= POLLERR | POLLPRI;
32392 return mask;
32393 }
32394@@ -6137,7 +6138,7 @@ static int is_mddev_idle(mddev_t *mddev,
32395 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
32396 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
32397 (int)part_stat_read(&disk->part0, sectors[1]) -
32398- atomic_read(&disk->sync_io);
32399+ atomic_read_unchecked(&disk->sync_io);
32400 /* sync IO will cause sync_io to increase before the disk_stats
32401 * as sync_io is counted when a request starts, and
32402 * disk_stats is counted when it completes.
32403diff -urNp linux-2.6.32.43/drivers/md/md.h linux-2.6.32.43/drivers/md/md.h
32404--- linux-2.6.32.43/drivers/md/md.h 2011-03-27 14:31:47.000000000 -0400
32405+++ linux-2.6.32.43/drivers/md/md.h 2011-05-04 17:56:20.000000000 -0400
32406@@ -94,10 +94,10 @@ struct mdk_rdev_s
32407 * only maintained for arrays that
32408 * support hot removal
32409 */
32410- atomic_t read_errors; /* number of consecutive read errors that
32411+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
32412 * we have tried to ignore.
32413 */
32414- atomic_t corrected_errors; /* number of corrected read errors,
32415+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
32416 * for reporting to userspace and storing
32417 * in superblock.
32418 */
32419@@ -304,7 +304,7 @@ static inline void rdev_dec_pending(mdk_
32420
32421 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
32422 {
32423- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32424+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
32425 }
32426
32427 struct mdk_personality
32428diff -urNp linux-2.6.32.43/drivers/md/raid10.c linux-2.6.32.43/drivers/md/raid10.c
32429--- linux-2.6.32.43/drivers/md/raid10.c 2011-03-27 14:31:47.000000000 -0400
32430+++ linux-2.6.32.43/drivers/md/raid10.c 2011-05-04 17:56:28.000000000 -0400
32431@@ -1255,7 +1255,7 @@ static void end_sync_read(struct bio *bi
32432 if (test_bit(BIO_UPTODATE, &bio->bi_flags))
32433 set_bit(R10BIO_Uptodate, &r10_bio->state);
32434 else {
32435- atomic_add(r10_bio->sectors,
32436+ atomic_add_unchecked(r10_bio->sectors,
32437 &conf->mirrors[d].rdev->corrected_errors);
32438 if (!test_bit(MD_RECOVERY_SYNC, &conf->mddev->recovery))
32439 md_error(r10_bio->mddev,
32440@@ -1520,7 +1520,7 @@ static void fix_read_error(conf_t *conf,
32441 test_bit(In_sync, &rdev->flags)) {
32442 atomic_inc(&rdev->nr_pending);
32443 rcu_read_unlock();
32444- atomic_add(s, &rdev->corrected_errors);
32445+ atomic_add_unchecked(s, &rdev->corrected_errors);
32446 if (sync_page_io(rdev->bdev,
32447 r10_bio->devs[sl].addr +
32448 sect + rdev->data_offset,
32449diff -urNp linux-2.6.32.43/drivers/md/raid1.c linux-2.6.32.43/drivers/md/raid1.c
32450--- linux-2.6.32.43/drivers/md/raid1.c 2011-03-27 14:31:47.000000000 -0400
32451+++ linux-2.6.32.43/drivers/md/raid1.c 2011-05-04 17:56:28.000000000 -0400
32452@@ -1415,7 +1415,7 @@ static void sync_request_write(mddev_t *
32453 if (r1_bio->bios[d]->bi_end_io != end_sync_read)
32454 continue;
32455 rdev = conf->mirrors[d].rdev;
32456- atomic_add(s, &rdev->corrected_errors);
32457+ atomic_add_unchecked(s, &rdev->corrected_errors);
32458 if (sync_page_io(rdev->bdev,
32459 sect + rdev->data_offset,
32460 s<<9,
32461@@ -1564,7 +1564,7 @@ static void fix_read_error(conf_t *conf,
32462 /* Well, this device is dead */
32463 md_error(mddev, rdev);
32464 else {
32465- atomic_add(s, &rdev->corrected_errors);
32466+ atomic_add_unchecked(s, &rdev->corrected_errors);
32467 printk(KERN_INFO
32468 "raid1:%s: read error corrected "
32469 "(%d sectors at %llu on %s)\n",
32470diff -urNp linux-2.6.32.43/drivers/md/raid5.c linux-2.6.32.43/drivers/md/raid5.c
32471--- linux-2.6.32.43/drivers/md/raid5.c 2011-06-25 12:55:34.000000000 -0400
32472+++ linux-2.6.32.43/drivers/md/raid5.c 2011-06-25 12:58:39.000000000 -0400
32473@@ -482,7 +482,7 @@ static void ops_run_io(struct stripe_hea
32474 bi->bi_next = NULL;
32475 if ((rw & WRITE) &&
32476 test_bit(R5_ReWrite, &sh->dev[i].flags))
32477- atomic_add(STRIPE_SECTORS,
32478+ atomic_add_unchecked(STRIPE_SECTORS,
32479 &rdev->corrected_errors);
32480 generic_make_request(bi);
32481 } else {
32482@@ -1517,15 +1517,15 @@ static void raid5_end_read_request(struc
32483 clear_bit(R5_ReadError, &sh->dev[i].flags);
32484 clear_bit(R5_ReWrite, &sh->dev[i].flags);
32485 }
32486- if (atomic_read(&conf->disks[i].rdev->read_errors))
32487- atomic_set(&conf->disks[i].rdev->read_errors, 0);
32488+ if (atomic_read_unchecked(&conf->disks[i].rdev->read_errors))
32489+ atomic_set_unchecked(&conf->disks[i].rdev->read_errors, 0);
32490 } else {
32491 const char *bdn = bdevname(conf->disks[i].rdev->bdev, b);
32492 int retry = 0;
32493 rdev = conf->disks[i].rdev;
32494
32495 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
32496- atomic_inc(&rdev->read_errors);
32497+ atomic_inc_unchecked(&rdev->read_errors);
32498 if (conf->mddev->degraded >= conf->max_degraded)
32499 printk_rl(KERN_WARNING
32500 "raid5:%s: read error not correctable "
32501@@ -1543,7 +1543,7 @@ static void raid5_end_read_request(struc
32502 (unsigned long long)(sh->sector
32503 + rdev->data_offset),
32504 bdn);
32505- else if (atomic_read(&rdev->read_errors)
32506+ else if (atomic_read_unchecked(&rdev->read_errors)
32507 > conf->max_nr_stripes)
32508 printk(KERN_WARNING
32509 "raid5:%s: Too many read errors, failing device %s.\n",
32510@@ -1870,6 +1870,7 @@ static sector_t compute_blocknr(struct s
32511 sector_t r_sector;
32512 struct stripe_head sh2;
32513
32514+ pax_track_stack();
32515
32516 chunk_offset = sector_div(new_sector, sectors_per_chunk);
32517 stripe = new_sector;
32518diff -urNp linux-2.6.32.43/drivers/media/common/saa7146_fops.c linux-2.6.32.43/drivers/media/common/saa7146_fops.c
32519--- linux-2.6.32.43/drivers/media/common/saa7146_fops.c 2011-03-27 14:31:47.000000000 -0400
32520+++ linux-2.6.32.43/drivers/media/common/saa7146_fops.c 2011-08-05 20:33:55.000000000 -0400
32521@@ -458,7 +458,7 @@ int saa7146_vv_init(struct saa7146_dev*
32522 ERR(("out of memory. aborting.\n"));
32523 return -ENOMEM;
32524 }
32525- ext_vv->ops = saa7146_video_ioctl_ops;
32526+ memcpy((void *)&ext_vv->ops, &saa7146_video_ioctl_ops, sizeof(saa7146_video_ioctl_ops));
32527 ext_vv->core_ops = &saa7146_video_ioctl_ops;
32528
32529 DEB_EE(("dev:%p\n",dev));
32530diff -urNp linux-2.6.32.43/drivers/media/common/saa7146_hlp.c linux-2.6.32.43/drivers/media/common/saa7146_hlp.c
32531--- linux-2.6.32.43/drivers/media/common/saa7146_hlp.c 2011-03-27 14:31:47.000000000 -0400
32532+++ linux-2.6.32.43/drivers/media/common/saa7146_hlp.c 2011-05-16 21:46:57.000000000 -0400
32533@@ -353,6 +353,8 @@ static void calculate_clipping_registers
32534
32535 int x[32], y[32], w[32], h[32];
32536
32537+ pax_track_stack();
32538+
32539 /* clear out memory */
32540 memset(&line_list[0], 0x00, sizeof(u32)*32);
32541 memset(&pixel_list[0], 0x00, sizeof(u32)*32);
32542diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c
32543--- linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-03-27 14:31:47.000000000 -0400
32544+++ linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_ca_en50221.c 2011-05-16 21:46:57.000000000 -0400
32545@@ -590,6 +590,8 @@ static int dvb_ca_en50221_read_data(stru
32546 u8 buf[HOST_LINK_BUF_SIZE];
32547 int i;
32548
32549+ pax_track_stack();
32550+
32551 dprintk("%s\n", __func__);
32552
32553 /* check if we have space for a link buf in the rx_buffer */
32554@@ -1285,6 +1287,8 @@ static ssize_t dvb_ca_en50221_io_write(s
32555 unsigned long timeout;
32556 int written;
32557
32558+ pax_track_stack();
32559+
32560 dprintk("%s\n", __func__);
32561
32562 /* Incoming packet has a 2 byte header. hdr[0] = slot_id, hdr[1] = connection_id */
32563diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_demux.h linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_demux.h
32564--- linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_demux.h 2011-03-27 14:31:47.000000000 -0400
32565+++ linux-2.6.32.43/drivers/media/dvb/dvb-core/dvb_demux.h 2011-08-05 20:33:55.000000000 -0400
32566@@ -71,7 +71,7 @@ struct dvb_demux_feed {
32567 union {
32568 dmx_ts_cb ts;
32569 dmx_section_cb sec;
32570- } cb;
32571+ } __no_const cb;
32572
32573 struct dvb_demux *demux;
32574 void *priv;
32575diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c
32576--- linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c 2011-03-27 14:31:47.000000000 -0400
32577+++ linux-2.6.32.43/drivers/media/dvb/dvb-core/dvbdev.c 2011-08-05 20:33:55.000000000 -0400
32578@@ -228,8 +228,8 @@ int dvb_register_device(struct dvb_adapt
32579 dvbdev->fops = dvbdevfops;
32580 init_waitqueue_head (&dvbdev->wait_queue);
32581
32582- memcpy(dvbdevfops, template->fops, sizeof(struct file_operations));
32583- dvbdevfops->owner = adap->module;
32584+ memcpy((void *)dvbdevfops, template->fops, sizeof(struct file_operations));
32585+ *(void **)&dvbdevfops->owner = adap->module;
32586
32587 list_add_tail (&dvbdev->list_head, &adap->device_list);
32588
32589diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-usb/cxusb.c linux-2.6.32.43/drivers/media/dvb/dvb-usb/cxusb.c
32590--- linux-2.6.32.43/drivers/media/dvb/dvb-usb/cxusb.c 2011-03-27 14:31:47.000000000 -0400
32591+++ linux-2.6.32.43/drivers/media/dvb/dvb-usb/cxusb.c 2011-08-05 20:33:55.000000000 -0400
32592@@ -1040,7 +1040,7 @@ static struct dib0070_config dib7070p_di
32593 struct dib0700_adapter_state {
32594 int (*set_param_save) (struct dvb_frontend *,
32595 struct dvb_frontend_parameters *);
32596-};
32597+} __no_const;
32598
32599 static int dib7070_set_param_override(struct dvb_frontend *fe,
32600 struct dvb_frontend_parameters *fep)
32601diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c
32602--- linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-03-27 14:31:47.000000000 -0400
32603+++ linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_core.c 2011-05-16 21:46:57.000000000 -0400
32604@@ -332,6 +332,8 @@ int dib0700_download_firmware(struct usb
32605
32606 u8 buf[260];
32607
32608+ pax_track_stack();
32609+
32610 while ((ret = dvb_usb_get_hexline(fw, &hx, &pos)) > 0) {
32611 deb_fwdata("writing to address 0x%08x (buffer: 0x%02x %02x)\n",hx.addr, hx.len, hx.chk);
32612
32613diff -urNp linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_devices.c linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_devices.c
32614--- linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-05-10 22:12:01.000000000 -0400
32615+++ linux-2.6.32.43/drivers/media/dvb/dvb-usb/dib0700_devices.c 2011-08-05 20:33:55.000000000 -0400
32616@@ -28,7 +28,7 @@ MODULE_PARM_DESC(force_lna_activation, "
32617
32618 struct dib0700_adapter_state {
32619 int (*set_param_save) (struct dvb_frontend *, struct dvb_frontend_parameters *);
32620-};
32621+} __no_const;
32622
32623 /* Hauppauge Nova-T 500 (aka Bristol)
32624 * has a LNA on GPIO0 which is enabled by setting 1 */
32625diff -urNp linux-2.6.32.43/drivers/media/dvb/frontends/dib3000.h linux-2.6.32.43/drivers/media/dvb/frontends/dib3000.h
32626--- linux-2.6.32.43/drivers/media/dvb/frontends/dib3000.h 2011-03-27 14:31:47.000000000 -0400
32627+++ linux-2.6.32.43/drivers/media/dvb/frontends/dib3000.h 2011-08-05 20:33:55.000000000 -0400
32628@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
32629 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
32630 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
32631 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
32632-};
32633+} __no_const;
32634
32635 #if defined(CONFIG_DVB_DIB3000MB) || (defined(CONFIG_DVB_DIB3000MB_MODULE) && defined(MODULE))
32636 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
32637diff -urNp linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c
32638--- linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c 2011-03-27 14:31:47.000000000 -0400
32639+++ linux-2.6.32.43/drivers/media/dvb/frontends/or51211.c 2011-05-16 21:46:57.000000000 -0400
32640@@ -113,6 +113,8 @@ static int or51211_load_firmware (struct
32641 u8 tudata[585];
32642 int i;
32643
32644+ pax_track_stack();
32645+
32646 dprintk("Firmware is %zd bytes\n",fw->size);
32647
32648 /* Get eprom data */
32649diff -urNp linux-2.6.32.43/drivers/media/dvb/ttpci/av7110_v4l.c linux-2.6.32.43/drivers/media/dvb/ttpci/av7110_v4l.c
32650--- linux-2.6.32.43/drivers/media/dvb/ttpci/av7110_v4l.c 2011-03-27 14:31:47.000000000 -0400
32651+++ linux-2.6.32.43/drivers/media/dvb/ttpci/av7110_v4l.c 2011-08-05 20:33:55.000000000 -0400
32652@@ -796,18 +796,18 @@ int av7110_init_v4l(struct av7110 *av711
32653 ERR(("cannot init capture device. skipping.\n"));
32654 return -ENODEV;
32655 }
32656- vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32657- vv_data->ops.vidioc_g_input = vidioc_g_input;
32658- vv_data->ops.vidioc_s_input = vidioc_s_input;
32659- vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32660- vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32661- vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32662- vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32663- vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32664- vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32665- vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32666- vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32667- vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32668+ *(void **)&vv_data->ops.vidioc_enum_input = vidioc_enum_input;
32669+ *(void **)&vv_data->ops.vidioc_g_input = vidioc_g_input;
32670+ *(void **)&vv_data->ops.vidioc_s_input = vidioc_s_input;
32671+ *(void **)&vv_data->ops.vidioc_g_tuner = vidioc_g_tuner;
32672+ *(void **)&vv_data->ops.vidioc_s_tuner = vidioc_s_tuner;
32673+ *(void **)&vv_data->ops.vidioc_g_frequency = vidioc_g_frequency;
32674+ *(void **)&vv_data->ops.vidioc_s_frequency = vidioc_s_frequency;
32675+ *(void **)&vv_data->ops.vidioc_g_audio = vidioc_g_audio;
32676+ *(void **)&vv_data->ops.vidioc_s_audio = vidioc_s_audio;
32677+ *(void **)&vv_data->ops.vidioc_g_sliced_vbi_cap = vidioc_g_sliced_vbi_cap;
32678+ *(void **)&vv_data->ops.vidioc_g_fmt_sliced_vbi_out = vidioc_g_fmt_sliced_vbi_out;
32679+ *(void **)&vv_data->ops.vidioc_s_fmt_sliced_vbi_out = vidioc_s_fmt_sliced_vbi_out;
32680
32681 if (saa7146_register_device(&av7110->v4l_dev, dev, "av7110", VFL_TYPE_GRABBER)) {
32682 ERR(("cannot register capture device. skipping.\n"));
32683diff -urNp linux-2.6.32.43/drivers/media/dvb/ttpci/budget-av.c linux-2.6.32.43/drivers/media/dvb/ttpci/budget-av.c
32684--- linux-2.6.32.43/drivers/media/dvb/ttpci/budget-av.c 2011-03-27 14:31:47.000000000 -0400
32685+++ linux-2.6.32.43/drivers/media/dvb/ttpci/budget-av.c 2011-08-05 20:33:55.000000000 -0400
32686@@ -1477,9 +1477,9 @@ static int budget_av_attach(struct saa71
32687 ERR(("cannot init vv subsystem.\n"));
32688 return err;
32689 }
32690- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32691- vv_data.ops.vidioc_g_input = vidioc_g_input;
32692- vv_data.ops.vidioc_s_input = vidioc_s_input;
32693+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32694+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32695+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32696
32697 if ((err = saa7146_register_device(&budget_av->vd, dev, "knc1", VFL_TYPE_GRABBER))) {
32698 /* fixme: proper cleanup here */
32699diff -urNp linux-2.6.32.43/drivers/media/radio/radio-cadet.c linux-2.6.32.43/drivers/media/radio/radio-cadet.c
32700--- linux-2.6.32.43/drivers/media/radio/radio-cadet.c 2011-03-27 14:31:47.000000000 -0400
32701+++ linux-2.6.32.43/drivers/media/radio/radio-cadet.c 2011-04-17 15:56:46.000000000 -0400
32702@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f
32703 while (i < count && dev->rdsin != dev->rdsout)
32704 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
32705
32706- if (copy_to_user(data, readbuf, i))
32707+ if (i > sizeof readbuf || copy_to_user(data, readbuf, i))
32708 return -EFAULT;
32709 return i;
32710 }
32711diff -urNp linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c
32712--- linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c 2011-03-27 14:31:47.000000000 -0400
32713+++ linux-2.6.32.43/drivers/media/video/cx18/cx18-driver.c 2011-05-16 21:46:57.000000000 -0400
32714@@ -56,7 +56,7 @@ static struct pci_device_id cx18_pci_tbl
32715
32716 MODULE_DEVICE_TABLE(pci, cx18_pci_tbl);
32717
32718-static atomic_t cx18_instance = ATOMIC_INIT(0);
32719+static atomic_unchecked_t cx18_instance = ATOMIC_INIT(0);
32720
32721 /* Parameter declarations */
32722 static int cardtype[CX18_MAX_CARDS];
32723@@ -288,6 +288,8 @@ void cx18_read_eeprom(struct cx18 *cx, s
32724 struct i2c_client c;
32725 u8 eedata[256];
32726
32727+ pax_track_stack();
32728+
32729 memset(&c, 0, sizeof(c));
32730 strlcpy(c.name, "cx18 tveeprom tmp", sizeof(c.name));
32731 c.adapter = &cx->i2c_adap[0];
32732@@ -800,7 +802,7 @@ static int __devinit cx18_probe(struct p
32733 struct cx18 *cx;
32734
32735 /* FIXME - module parameter arrays constrain max instances */
32736- i = atomic_inc_return(&cx18_instance) - 1;
32737+ i = atomic_inc_return_unchecked(&cx18_instance) - 1;
32738 if (i >= CX18_MAX_CARDS) {
32739 printk(KERN_ERR "cx18: cannot manage card %d, driver has a "
32740 "limit of 0 - %d\n", i, CX18_MAX_CARDS - 1);
32741diff -urNp linux-2.6.32.43/drivers/media/video/hexium_gemini.c linux-2.6.32.43/drivers/media/video/hexium_gemini.c
32742--- linux-2.6.32.43/drivers/media/video/hexium_gemini.c 2011-03-27 14:31:47.000000000 -0400
32743+++ linux-2.6.32.43/drivers/media/video/hexium_gemini.c 2011-08-05 20:33:55.000000000 -0400
32744@@ -394,12 +394,12 @@ static int hexium_attach(struct saa7146_
32745 hexium->cur_input = 0;
32746
32747 saa7146_vv_init(dev, &vv_data);
32748- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32749- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32750- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32751- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32752- vv_data.ops.vidioc_g_input = vidioc_g_input;
32753- vv_data.ops.vidioc_s_input = vidioc_s_input;
32754+ *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32755+ *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32756+ *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32757+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32758+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32759+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32760 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium gemini", VFL_TYPE_GRABBER)) {
32761 printk("hexium_gemini: cannot register capture v4l2 device. skipping.\n");
32762 return -1;
32763diff -urNp linux-2.6.32.43/drivers/media/video/hexium_orion.c linux-2.6.32.43/drivers/media/video/hexium_orion.c
32764--- linux-2.6.32.43/drivers/media/video/hexium_orion.c 2011-03-27 14:31:47.000000000 -0400
32765+++ linux-2.6.32.43/drivers/media/video/hexium_orion.c 2011-08-05 20:33:55.000000000 -0400
32766@@ -369,9 +369,9 @@ static int hexium_attach(struct saa7146_
32767 DEB_EE((".\n"));
32768
32769 saa7146_vv_init(dev, &vv_data);
32770- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32771- vv_data.ops.vidioc_g_input = vidioc_g_input;
32772- vv_data.ops.vidioc_s_input = vidioc_s_input;
32773+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32774+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32775+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32776 if (0 != saa7146_register_device(&hexium->video_dev, dev, "hexium orion", VFL_TYPE_GRABBER)) {
32777 printk("hexium_orion: cannot register capture v4l2 device. skipping.\n");
32778 return -1;
32779diff -urNp linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c
32780--- linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c 2011-03-27 14:31:47.000000000 -0400
32781+++ linux-2.6.32.43/drivers/media/video/ivtv/ivtv-driver.c 2011-05-04 17:56:28.000000000 -0400
32782@@ -79,7 +79,7 @@ static struct pci_device_id ivtv_pci_tbl
32783 MODULE_DEVICE_TABLE(pci,ivtv_pci_tbl);
32784
32785 /* ivtv instance counter */
32786-static atomic_t ivtv_instance = ATOMIC_INIT(0);
32787+static atomic_unchecked_t ivtv_instance = ATOMIC_INIT(0);
32788
32789 /* Parameter declarations */
32790 static int cardtype[IVTV_MAX_CARDS];
32791diff -urNp linux-2.6.32.43/drivers/media/video/mxb.c linux-2.6.32.43/drivers/media/video/mxb.c
32792--- linux-2.6.32.43/drivers/media/video/mxb.c 2011-03-27 14:31:47.000000000 -0400
32793+++ linux-2.6.32.43/drivers/media/video/mxb.c 2011-08-05 20:33:55.000000000 -0400
32794@@ -703,23 +703,23 @@ static int mxb_attach(struct saa7146_dev
32795 already did this in "mxb_vl42_probe" */
32796
32797 saa7146_vv_init(dev, &vv_data);
32798- vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32799- vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32800- vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32801- vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32802- vv_data.ops.vidioc_g_input = vidioc_g_input;
32803- vv_data.ops.vidioc_s_input = vidioc_s_input;
32804- vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32805- vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32806- vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32807- vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32808- vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32809- vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32810+ *(void **)&vv_data.ops.vidioc_queryctrl = vidioc_queryctrl;
32811+ *(void **)&vv_data.ops.vidioc_g_ctrl = vidioc_g_ctrl;
32812+ *(void **)&vv_data.ops.vidioc_s_ctrl = vidioc_s_ctrl;
32813+ *(void **)&vv_data.ops.vidioc_enum_input = vidioc_enum_input;
32814+ *(void **)&vv_data.ops.vidioc_g_input = vidioc_g_input;
32815+ *(void **)&vv_data.ops.vidioc_s_input = vidioc_s_input;
32816+ *(void **)&vv_data.ops.vidioc_g_tuner = vidioc_g_tuner;
32817+ *(void **)&vv_data.ops.vidioc_s_tuner = vidioc_s_tuner;
32818+ *(void **)&vv_data.ops.vidioc_g_frequency = vidioc_g_frequency;
32819+ *(void **)&vv_data.ops.vidioc_s_frequency = vidioc_s_frequency;
32820+ *(void **)&vv_data.ops.vidioc_g_audio = vidioc_g_audio;
32821+ *(void **)&vv_data.ops.vidioc_s_audio = vidioc_s_audio;
32822 #ifdef CONFIG_VIDEO_ADV_DEBUG
32823- vv_data.ops.vidioc_g_register = vidioc_g_register;
32824- vv_data.ops.vidioc_s_register = vidioc_s_register;
32825+ *(void **)&vv_data.ops.vidioc_g_register = vidioc_g_register;
32826+ *(void **)&vv_data.ops.vidioc_s_register = vidioc_s_register;
32827 #endif
32828- vv_data.ops.vidioc_default = vidioc_default;
32829+ *(void **)&vv_data.ops.vidioc_default = vidioc_default;
32830 if (saa7146_register_device(&mxb->video_dev, dev, "mxb", VFL_TYPE_GRABBER)) {
32831 ERR(("cannot register capture v4l2 device. skipping.\n"));
32832 return -1;
32833diff -urNp linux-2.6.32.43/drivers/media/video/omap24xxcam.c linux-2.6.32.43/drivers/media/video/omap24xxcam.c
32834--- linux-2.6.32.43/drivers/media/video/omap24xxcam.c 2011-03-27 14:31:47.000000000 -0400
32835+++ linux-2.6.32.43/drivers/media/video/omap24xxcam.c 2011-05-04 17:56:28.000000000 -0400
32836@@ -401,7 +401,7 @@ static void omap24xxcam_vbq_complete(str
32837 spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);
32838
32839 do_gettimeofday(&vb->ts);
32840- vb->field_count = atomic_add_return(2, &fh->field_count);
32841+ vb->field_count = atomic_add_return_unchecked(2, &fh->field_count);
32842 if (csr & csr_error) {
32843 vb->state = VIDEOBUF_ERROR;
32844 if (!atomic_read(&fh->cam->in_reset)) {
32845diff -urNp linux-2.6.32.43/drivers/media/video/omap24xxcam.h linux-2.6.32.43/drivers/media/video/omap24xxcam.h
32846--- linux-2.6.32.43/drivers/media/video/omap24xxcam.h 2011-03-27 14:31:47.000000000 -0400
32847+++ linux-2.6.32.43/drivers/media/video/omap24xxcam.h 2011-05-04 17:56:28.000000000 -0400
32848@@ -533,7 +533,7 @@ struct omap24xxcam_fh {
32849 spinlock_t vbq_lock; /* spinlock for the videobuf queue */
32850 struct videobuf_queue vbq;
32851 struct v4l2_pix_format pix; /* serialise pix by vbq->lock */
32852- atomic_t field_count; /* field counter for videobuf_buffer */
32853+ atomic_unchecked_t field_count; /* field counter for videobuf_buffer */
32854 /* accessing cam here doesn't need serialisation: it's constant */
32855 struct omap24xxcam_device *cam;
32856 };
32857diff -urNp linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c
32858--- linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-03-27 14:31:47.000000000 -0400
32859+++ linux-2.6.32.43/drivers/media/video/pvrusb2/pvrusb2-eeprom.c 2011-05-16 21:46:57.000000000 -0400
32860@@ -119,6 +119,8 @@ int pvr2_eeprom_analyze(struct pvr2_hdw
32861 u8 *eeprom;
32862 struct tveeprom tvdata;
32863
32864+ pax_track_stack();
32865+
32866 memset(&tvdata,0,sizeof(tvdata));
32867
32868 eeprom = pvr2_eeprom_fetch(hdw);
32869diff -urNp linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c
32870--- linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c 2011-03-27 14:31:47.000000000 -0400
32871+++ linux-2.6.32.43/drivers/media/video/saa7134/saa6752hs.c 2011-05-16 21:46:57.000000000 -0400
32872@@ -683,6 +683,8 @@ static int saa6752hs_init(struct v4l2_su
32873 unsigned char localPAT[256];
32874 unsigned char localPMT[256];
32875
32876+ pax_track_stack();
32877+
32878 /* Set video format - must be done first as it resets other settings */
32879 set_reg8(client, 0x41, h->video_format);
32880
32881diff -urNp linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c
32882--- linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c 2011-03-27 14:31:47.000000000 -0400
32883+++ linux-2.6.32.43/drivers/media/video/saa7164/saa7164-cmd.c 2011-05-16 21:46:57.000000000 -0400
32884@@ -87,6 +87,8 @@ int saa7164_irq_dequeue(struct saa7164_d
32885 wait_queue_head_t *q = 0;
32886 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32887
32888+ pax_track_stack();
32889+
32890 /* While any outstand message on the bus exists... */
32891 do {
32892
32893@@ -126,6 +128,8 @@ int saa7164_cmd_dequeue(struct saa7164_d
32894 u8 tmp[512];
32895 dprintk(DBGLVL_CMD, "%s()\n", __func__);
32896
32897+ pax_track_stack();
32898+
32899 while (loop) {
32900
32901 tmComResInfo_t tRsp = { 0, 0, 0, 0, 0, 0 };
32902diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/ibmcam.c linux-2.6.32.43/drivers/media/video/usbvideo/ibmcam.c
32903--- linux-2.6.32.43/drivers/media/video/usbvideo/ibmcam.c 2011-03-27 14:31:47.000000000 -0400
32904+++ linux-2.6.32.43/drivers/media/video/usbvideo/ibmcam.c 2011-08-05 20:33:55.000000000 -0400
32905@@ -3947,15 +3947,15 @@ static struct usb_device_id id_table[] =
32906 static int __init ibmcam_init(void)
32907 {
32908 struct usbvideo_cb cbTbl;
32909- memset(&cbTbl, 0, sizeof(cbTbl));
32910- cbTbl.probe = ibmcam_probe;
32911- cbTbl.setupOnOpen = ibmcam_setup_on_open;
32912- cbTbl.videoStart = ibmcam_video_start;
32913- cbTbl.videoStop = ibmcam_video_stop;
32914- cbTbl.processData = ibmcam_ProcessIsocData;
32915- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32916- cbTbl.adjustPicture = ibmcam_adjust_picture;
32917- cbTbl.getFPS = ibmcam_calculate_fps;
32918+ memset((void *)&cbTbl, 0, sizeof(cbTbl));
32919+ *(void **)&cbTbl.probe = ibmcam_probe;
32920+ *(void **)&cbTbl.setupOnOpen = ibmcam_setup_on_open;
32921+ *(void **)&cbTbl.videoStart = ibmcam_video_start;
32922+ *(void **)&cbTbl.videoStop = ibmcam_video_stop;
32923+ *(void **)&cbTbl.processData = ibmcam_ProcessIsocData;
32924+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32925+ *(void **)&cbTbl.adjustPicture = ibmcam_adjust_picture;
32926+ *(void **)&cbTbl.getFPS = ibmcam_calculate_fps;
32927 return usbvideo_register(
32928 &cams,
32929 MAX_IBMCAM,
32930diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c
32931--- linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c 2011-03-27 14:31:47.000000000 -0400
32932+++ linux-2.6.32.43/drivers/media/video/usbvideo/konicawc.c 2011-08-05 20:33:55.000000000 -0400
32933@@ -225,7 +225,7 @@ static void konicawc_register_input(stru
32934 int error;
32935
32936 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32937- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32938+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32939
32940 cam->input = input_dev = input_allocate_device();
32941 if (!input_dev) {
32942@@ -935,16 +935,16 @@ static int __init konicawc_init(void)
32943 struct usbvideo_cb cbTbl;
32944 printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":"
32945 DRIVER_DESC "\n");
32946- memset(&cbTbl, 0, sizeof(cbTbl));
32947- cbTbl.probe = konicawc_probe;
32948- cbTbl.setupOnOpen = konicawc_setup_on_open;
32949- cbTbl.processData = konicawc_process_isoc;
32950- cbTbl.getFPS = konicawc_calculate_fps;
32951- cbTbl.setVideoMode = konicawc_set_video_mode;
32952- cbTbl.startDataPump = konicawc_start_data;
32953- cbTbl.stopDataPump = konicawc_stop_data;
32954- cbTbl.adjustPicture = konicawc_adjust_picture;
32955- cbTbl.userFree = konicawc_free_uvd;
32956+ memset((void * )&cbTbl, 0, sizeof(cbTbl));
32957+ *(void **)&cbTbl.probe = konicawc_probe;
32958+ *(void **)&cbTbl.setupOnOpen = konicawc_setup_on_open;
32959+ *(void **)&cbTbl.processData = konicawc_process_isoc;
32960+ *(void **)&cbTbl.getFPS = konicawc_calculate_fps;
32961+ *(void **)&cbTbl.setVideoMode = konicawc_set_video_mode;
32962+ *(void **)&cbTbl.startDataPump = konicawc_start_data;
32963+ *(void **)&cbTbl.stopDataPump = konicawc_stop_data;
32964+ *(void **)&cbTbl.adjustPicture = konicawc_adjust_picture;
32965+ *(void **)&cbTbl.userFree = konicawc_free_uvd;
32966 return usbvideo_register(
32967 &cams,
32968 MAX_CAMERAS,
32969diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c
32970--- linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c 2011-03-27 14:31:47.000000000 -0400
32971+++ linux-2.6.32.43/drivers/media/video/usbvideo/quickcam_messenger.c 2011-04-17 15:56:46.000000000 -0400
32972@@ -89,7 +89,7 @@ static void qcm_register_input(struct qc
32973 int error;
32974
32975 usb_make_path(dev, cam->input_physname, sizeof(cam->input_physname));
32976- strncat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32977+ strlcat(cam->input_physname, "/input0", sizeof(cam->input_physname));
32978
32979 cam->input = input_dev = input_allocate_device();
32980 if (!input_dev) {
32981diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/ultracam.c linux-2.6.32.43/drivers/media/video/usbvideo/ultracam.c
32982--- linux-2.6.32.43/drivers/media/video/usbvideo/ultracam.c 2011-03-27 14:31:47.000000000 -0400
32983+++ linux-2.6.32.43/drivers/media/video/usbvideo/ultracam.c 2011-08-05 20:33:55.000000000 -0400
32984@@ -655,14 +655,14 @@ static int __init ultracam_init(void)
32985 {
32986 struct usbvideo_cb cbTbl;
32987 memset(&cbTbl, 0, sizeof(cbTbl));
32988- cbTbl.probe = ultracam_probe;
32989- cbTbl.setupOnOpen = ultracam_setup_on_open;
32990- cbTbl.videoStart = ultracam_video_start;
32991- cbTbl.videoStop = ultracam_video_stop;
32992- cbTbl.processData = ultracam_ProcessIsocData;
32993- cbTbl.postProcess = usbvideo_DeinterlaceFrame;
32994- cbTbl.adjustPicture = ultracam_adjust_picture;
32995- cbTbl.getFPS = ultracam_calculate_fps;
32996+ *(void **)&cbTbl.probe = ultracam_probe;
32997+ *(void **)&cbTbl.setupOnOpen = ultracam_setup_on_open;
32998+ *(void **)&cbTbl.videoStart = ultracam_video_start;
32999+ *(void **)&cbTbl.videoStop = ultracam_video_stop;
33000+ *(void **)&cbTbl.processData = ultracam_ProcessIsocData;
33001+ *(void **)&cbTbl.postProcess = usbvideo_DeinterlaceFrame;
33002+ *(void **)&cbTbl.adjustPicture = ultracam_adjust_picture;
33003+ *(void **)&cbTbl.getFPS = ultracam_calculate_fps;
33004 return usbvideo_register(
33005 &cams,
33006 MAX_CAMERAS,
33007diff -urNp linux-2.6.32.43/drivers/media/video/usbvideo/usbvideo.c linux-2.6.32.43/drivers/media/video/usbvideo/usbvideo.c
33008--- linux-2.6.32.43/drivers/media/video/usbvideo/usbvideo.c 2011-03-27 14:31:47.000000000 -0400
33009+++ linux-2.6.32.43/drivers/media/video/usbvideo/usbvideo.c 2011-08-05 20:33:55.000000000 -0400
33010@@ -697,15 +697,15 @@ int usbvideo_register(
33011 __func__, cams, base_size, num_cams);
33012
33013 /* Copy callbacks, apply defaults for those that are not set */
33014- memmove(&cams->cb, cbTbl, sizeof(cams->cb));
33015+ memmove((void *)&cams->cb, cbTbl, sizeof(cams->cb));
33016 if (cams->cb.getFrame == NULL)
33017- cams->cb.getFrame = usbvideo_GetFrame;
33018+ *(void **)&cams->cb.getFrame = usbvideo_GetFrame;
33019 if (cams->cb.disconnect == NULL)
33020- cams->cb.disconnect = usbvideo_Disconnect;
33021+ *(void **)&cams->cb.disconnect = usbvideo_Disconnect;
33022 if (cams->cb.startDataPump == NULL)
33023- cams->cb.startDataPump = usbvideo_StartDataPump;
33024+ *(void **)&cams->cb.startDataPump = usbvideo_StartDataPump;
33025 if (cams->cb.stopDataPump == NULL)
33026- cams->cb.stopDataPump = usbvideo_StopDataPump;
33027+ *(void **)&cams->cb.stopDataPump = usbvideo_StopDataPump;
33028
33029 cams->num_cameras = num_cams;
33030 cams->cam = (struct uvd *) &cams[1];
33031diff -urNp linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c
33032--- linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c 2011-03-27 14:31:47.000000000 -0400
33033+++ linux-2.6.32.43/drivers/media/video/usbvision/usbvision-core.c 2011-05-16 21:46:57.000000000 -0400
33034@@ -820,6 +820,8 @@ static enum ParseState usbvision_parse_c
33035 unsigned char rv, gv, bv;
33036 static unsigned char *Y, *U, *V;
33037
33038+ pax_track_stack();
33039+
33040 frame = usbvision->curFrame;
33041 imageSize = frame->frmwidth * frame->frmheight;
33042 if ( (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) ||
33043diff -urNp linux-2.6.32.43/drivers/media/video/v4l2-device.c linux-2.6.32.43/drivers/media/video/v4l2-device.c
33044--- linux-2.6.32.43/drivers/media/video/v4l2-device.c 2011-03-27 14:31:47.000000000 -0400
33045+++ linux-2.6.32.43/drivers/media/video/v4l2-device.c 2011-05-04 17:56:28.000000000 -0400
33046@@ -50,9 +50,9 @@ int v4l2_device_register(struct device *
33047 EXPORT_SYMBOL_GPL(v4l2_device_register);
33048
33049 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
33050- atomic_t *instance)
33051+ atomic_unchecked_t *instance)
33052 {
33053- int num = atomic_inc_return(instance) - 1;
33054+ int num = atomic_inc_return_unchecked(instance) - 1;
33055 int len = strlen(basename);
33056
33057 if (basename[len - 1] >= '0' && basename[len - 1] <= '9')
33058diff -urNp linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c
33059--- linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c 2011-03-27 14:31:47.000000000 -0400
33060+++ linux-2.6.32.43/drivers/media/video/videobuf-dma-sg.c 2011-05-16 21:46:57.000000000 -0400
33061@@ -693,6 +693,8 @@ void *videobuf_sg_alloc(size_t size)
33062 {
33063 struct videobuf_queue q;
33064
33065+ pax_track_stack();
33066+
33067 /* Required to make generic handler to call __videobuf_alloc */
33068 q.int_ops = &sg_ops;
33069
33070diff -urNp linux-2.6.32.43/drivers/message/fusion/mptbase.c linux-2.6.32.43/drivers/message/fusion/mptbase.c
33071--- linux-2.6.32.43/drivers/message/fusion/mptbase.c 2011-03-27 14:31:47.000000000 -0400
33072+++ linux-2.6.32.43/drivers/message/fusion/mptbase.c 2011-04-17 15:56:46.000000000 -0400
33073@@ -6709,8 +6709,14 @@ procmpt_iocinfo_read(char *buf, char **s
33074 len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
33075 len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
33076
33077+#ifdef CONFIG_GRKERNSEC_HIDESYM
33078+ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33079+ NULL, NULL);
33080+#else
33081 len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
33082 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
33083+#endif
33084+
33085 /*
33086 * Rounding UP to nearest 4-kB boundary here...
33087 */
33088diff -urNp linux-2.6.32.43/drivers/message/fusion/mptsas.c linux-2.6.32.43/drivers/message/fusion/mptsas.c
33089--- linux-2.6.32.43/drivers/message/fusion/mptsas.c 2011-03-27 14:31:47.000000000 -0400
33090+++ linux-2.6.32.43/drivers/message/fusion/mptsas.c 2011-04-17 15:56:46.000000000 -0400
33091@@ -436,6 +436,23 @@ mptsas_is_end_device(struct mptsas_devin
33092 return 0;
33093 }
33094
33095+static inline void
33096+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33097+{
33098+ if (phy_info->port_details) {
33099+ phy_info->port_details->rphy = rphy;
33100+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33101+ ioc->name, rphy));
33102+ }
33103+
33104+ if (rphy) {
33105+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33106+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33107+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33108+ ioc->name, rphy, rphy->dev.release));
33109+ }
33110+}
33111+
33112 /* no mutex */
33113 static void
33114 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
33115@@ -474,23 +491,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p
33116 return NULL;
33117 }
33118
33119-static inline void
33120-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
33121-{
33122- if (phy_info->port_details) {
33123- phy_info->port_details->rphy = rphy;
33124- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
33125- ioc->name, rphy));
33126- }
33127-
33128- if (rphy) {
33129- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
33130- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
33131- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
33132- ioc->name, rphy, rphy->dev.release));
33133- }
33134-}
33135-
33136 static inline struct sas_port *
33137 mptsas_get_port(struct mptsas_phyinfo *phy_info)
33138 {
33139diff -urNp linux-2.6.32.43/drivers/message/fusion/mptscsih.c linux-2.6.32.43/drivers/message/fusion/mptscsih.c
33140--- linux-2.6.32.43/drivers/message/fusion/mptscsih.c 2011-03-27 14:31:47.000000000 -0400
33141+++ linux-2.6.32.43/drivers/message/fusion/mptscsih.c 2011-04-17 15:56:46.000000000 -0400
33142@@ -1248,15 +1248,16 @@ mptscsih_info(struct Scsi_Host *SChost)
33143
33144 h = shost_priv(SChost);
33145
33146- if (h) {
33147- if (h->info_kbuf == NULL)
33148- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33149- return h->info_kbuf;
33150- h->info_kbuf[0] = '\0';
33151+ if (!h)
33152+ return NULL;
33153
33154- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33155- h->info_kbuf[size-1] = '\0';
33156- }
33157+ if (h->info_kbuf == NULL)
33158+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
33159+ return h->info_kbuf;
33160+ h->info_kbuf[0] = '\0';
33161+
33162+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
33163+ h->info_kbuf[size-1] = '\0';
33164
33165 return h->info_kbuf;
33166 }
33167diff -urNp linux-2.6.32.43/drivers/message/i2o/i2o_config.c linux-2.6.32.43/drivers/message/i2o/i2o_config.c
33168--- linux-2.6.32.43/drivers/message/i2o/i2o_config.c 2011-03-27 14:31:47.000000000 -0400
33169+++ linux-2.6.32.43/drivers/message/i2o/i2o_config.c 2011-05-16 21:46:57.000000000 -0400
33170@@ -787,6 +787,8 @@ static int i2o_cfg_passthru(unsigned lon
33171 struct i2o_message *msg;
33172 unsigned int iop;
33173
33174+ pax_track_stack();
33175+
33176 if (get_user(iop, &cmd->iop) || get_user(user_msg, &cmd->msg))
33177 return -EFAULT;
33178
33179diff -urNp linux-2.6.32.43/drivers/message/i2o/i2o_proc.c linux-2.6.32.43/drivers/message/i2o/i2o_proc.c
33180--- linux-2.6.32.43/drivers/message/i2o/i2o_proc.c 2011-03-27 14:31:47.000000000 -0400
33181+++ linux-2.6.32.43/drivers/message/i2o/i2o_proc.c 2011-04-17 15:56:46.000000000 -0400
33182@@ -259,13 +259,6 @@ static char *scsi_devices[] = {
33183 "Array Controller Device"
33184 };
33185
33186-static char *chtostr(u8 * chars, int n)
33187-{
33188- char tmp[256];
33189- tmp[0] = 0;
33190- return strncat(tmp, (char *)chars, n);
33191-}
33192-
33193 static int i2o_report_query_status(struct seq_file *seq, int block_status,
33194 char *group)
33195 {
33196@@ -842,8 +835,7 @@ static int i2o_seq_show_ddm_table(struct
33197
33198 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
33199 seq_printf(seq, "%-#8x", ddm_table.module_id);
33200- seq_printf(seq, "%-29s",
33201- chtostr(ddm_table.module_name_version, 28));
33202+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
33203 seq_printf(seq, "%9d ", ddm_table.data_size);
33204 seq_printf(seq, "%8d", ddm_table.code_size);
33205
33206@@ -944,8 +936,8 @@ static int i2o_seq_show_drivers_stored(s
33207
33208 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
33209 seq_printf(seq, "%-#8x", dst->module_id);
33210- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28));
33211- seq_printf(seq, "%-9s", chtostr(dst->date, 8));
33212+ seq_printf(seq, "%-.28s", dst->module_name_version);
33213+ seq_printf(seq, "%-.8s", dst->date);
33214 seq_printf(seq, "%8d ", dst->module_size);
33215 seq_printf(seq, "%8d ", dst->mpb_size);
33216 seq_printf(seq, "0x%04x", dst->module_flags);
33217@@ -1276,14 +1268,10 @@ static int i2o_seq_show_dev_identity(str
33218 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
33219 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
33220 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
33221- seq_printf(seq, "Vendor info : %s\n",
33222- chtostr((u8 *) (work32 + 2), 16));
33223- seq_printf(seq, "Product info : %s\n",
33224- chtostr((u8 *) (work32 + 6), 16));
33225- seq_printf(seq, "Description : %s\n",
33226- chtostr((u8 *) (work32 + 10), 16));
33227- seq_printf(seq, "Product rev. : %s\n",
33228- chtostr((u8 *) (work32 + 14), 8));
33229+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
33230+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
33231+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
33232+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
33233
33234 seq_printf(seq, "Serial number : ");
33235 print_serial_number(seq, (u8 *) (work32 + 16),
33236@@ -1328,10 +1316,8 @@ static int i2o_seq_show_ddm_identity(str
33237 }
33238
33239 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
33240- seq_printf(seq, "Module name : %s\n",
33241- chtostr(result.module_name, 24));
33242- seq_printf(seq, "Module revision : %s\n",
33243- chtostr(result.module_rev, 8));
33244+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
33245+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
33246
33247 seq_printf(seq, "Serial number : ");
33248 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
33249@@ -1362,14 +1348,10 @@ static int i2o_seq_show_uinfo(struct seq
33250 return 0;
33251 }
33252
33253- seq_printf(seq, "Device name : %s\n",
33254- chtostr(result.device_name, 64));
33255- seq_printf(seq, "Service name : %s\n",
33256- chtostr(result.service_name, 64));
33257- seq_printf(seq, "Physical name : %s\n",
33258- chtostr(result.physical_location, 64));
33259- seq_printf(seq, "Instance number : %s\n",
33260- chtostr(result.instance_number, 4));
33261+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
33262+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
33263+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
33264+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
33265
33266 return 0;
33267 }
33268diff -urNp linux-2.6.32.43/drivers/message/i2o/iop.c linux-2.6.32.43/drivers/message/i2o/iop.c
33269--- linux-2.6.32.43/drivers/message/i2o/iop.c 2011-03-27 14:31:47.000000000 -0400
33270+++ linux-2.6.32.43/drivers/message/i2o/iop.c 2011-05-04 17:56:28.000000000 -0400
33271@@ -110,10 +110,10 @@ u32 i2o_cntxt_list_add(struct i2o_contro
33272
33273 spin_lock_irqsave(&c->context_list_lock, flags);
33274
33275- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
33276- atomic_inc(&c->context_list_counter);
33277+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
33278+ atomic_inc_unchecked(&c->context_list_counter);
33279
33280- entry->context = atomic_read(&c->context_list_counter);
33281+ entry->context = atomic_read_unchecked(&c->context_list_counter);
33282
33283 list_add(&entry->list, &c->context_list);
33284
33285@@ -1076,7 +1076,7 @@ struct i2o_controller *i2o_iop_alloc(voi
33286
33287 #if BITS_PER_LONG == 64
33288 spin_lock_init(&c->context_list_lock);
33289- atomic_set(&c->context_list_counter, 0);
33290+ atomic_set_unchecked(&c->context_list_counter, 0);
33291 INIT_LIST_HEAD(&c->context_list);
33292 #endif
33293
33294diff -urNp linux-2.6.32.43/drivers/mfd/wm8350-i2c.c linux-2.6.32.43/drivers/mfd/wm8350-i2c.c
33295--- linux-2.6.32.43/drivers/mfd/wm8350-i2c.c 2011-03-27 14:31:47.000000000 -0400
33296+++ linux-2.6.32.43/drivers/mfd/wm8350-i2c.c 2011-05-16 21:46:57.000000000 -0400
33297@@ -43,6 +43,8 @@ static int wm8350_i2c_write_device(struc
33298 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
33299 int ret;
33300
33301+ pax_track_stack();
33302+
33303 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
33304 return -EINVAL;
33305
33306diff -urNp linux-2.6.32.43/drivers/misc/kgdbts.c linux-2.6.32.43/drivers/misc/kgdbts.c
33307--- linux-2.6.32.43/drivers/misc/kgdbts.c 2011-03-27 14:31:47.000000000 -0400
33308+++ linux-2.6.32.43/drivers/misc/kgdbts.c 2011-04-17 15:56:46.000000000 -0400
33309@@ -118,7 +118,7 @@
33310 } while (0)
33311 #define MAX_CONFIG_LEN 40
33312
33313-static struct kgdb_io kgdbts_io_ops;
33314+static const struct kgdb_io kgdbts_io_ops;
33315 static char get_buf[BUFMAX];
33316 static int get_buf_cnt;
33317 static char put_buf[BUFMAX];
33318@@ -1102,7 +1102,7 @@ static void kgdbts_post_exp_handler(void
33319 module_put(THIS_MODULE);
33320 }
33321
33322-static struct kgdb_io kgdbts_io_ops = {
33323+static const struct kgdb_io kgdbts_io_ops = {
33324 .name = "kgdbts",
33325 .read_char = kgdbts_get_char,
33326 .write_char = kgdbts_put_char,
33327diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c
33328--- linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c 2011-03-27 14:31:47.000000000 -0400
33329+++ linux-2.6.32.43/drivers/misc/sgi-gru/gruhandles.c 2011-04-17 15:56:46.000000000 -0400
33330@@ -39,8 +39,8 @@ struct mcs_op_statistic mcs_op_statistic
33331
33332 static void update_mcs_stats(enum mcs_op op, unsigned long clks)
33333 {
33334- atomic_long_inc(&mcs_op_statistics[op].count);
33335- atomic_long_add(clks, &mcs_op_statistics[op].total);
33336+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
33337+ atomic_long_add_unchecked(clks, &mcs_op_statistics[op].total);
33338 if (mcs_op_statistics[op].max < clks)
33339 mcs_op_statistics[op].max = clks;
33340 }
33341diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c
33342--- linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c 2011-03-27 14:31:47.000000000 -0400
33343+++ linux-2.6.32.43/drivers/misc/sgi-gru/gruprocfs.c 2011-04-17 15:56:46.000000000 -0400
33344@@ -32,9 +32,9 @@
33345
33346 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
33347
33348-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
33349+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
33350 {
33351- unsigned long val = atomic_long_read(v);
33352+ unsigned long val = atomic_long_read_unchecked(v);
33353
33354 if (val)
33355 seq_printf(s, "%16lu %s\n", val, id);
33356@@ -136,8 +136,8 @@ static int mcs_statistics_show(struct se
33357 "cch_interrupt_sync", "cch_deallocate", "tgh_invalidate"};
33358
33359 for (op = 0; op < mcsop_last; op++) {
33360- count = atomic_long_read(&mcs_op_statistics[op].count);
33361- total = atomic_long_read(&mcs_op_statistics[op].total);
33362+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
33363+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
33364 max = mcs_op_statistics[op].max;
33365 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
33366 count ? total / count : 0, max);
33367diff -urNp linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h
33368--- linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h 2011-03-27 14:31:47.000000000 -0400
33369+++ linux-2.6.32.43/drivers/misc/sgi-gru/grutables.h 2011-04-17 15:56:46.000000000 -0400
33370@@ -167,84 +167,84 @@ extern unsigned int gru_max_gids;
33371 * GRU statistics.
33372 */
33373 struct gru_stats_s {
33374- atomic_long_t vdata_alloc;
33375- atomic_long_t vdata_free;
33376- atomic_long_t gts_alloc;
33377- atomic_long_t gts_free;
33378- atomic_long_t vdata_double_alloc;
33379- atomic_long_t gts_double_allocate;
33380- atomic_long_t assign_context;
33381- atomic_long_t assign_context_failed;
33382- atomic_long_t free_context;
33383- atomic_long_t load_user_context;
33384- atomic_long_t load_kernel_context;
33385- atomic_long_t lock_kernel_context;
33386- atomic_long_t unlock_kernel_context;
33387- atomic_long_t steal_user_context;
33388- atomic_long_t steal_kernel_context;
33389- atomic_long_t steal_context_failed;
33390- atomic_long_t nopfn;
33391- atomic_long_t break_cow;
33392- atomic_long_t asid_new;
33393- atomic_long_t asid_next;
33394- atomic_long_t asid_wrap;
33395- atomic_long_t asid_reuse;
33396- atomic_long_t intr;
33397- atomic_long_t intr_mm_lock_failed;
33398- atomic_long_t call_os;
33399- atomic_long_t call_os_offnode_reference;
33400- atomic_long_t call_os_check_for_bug;
33401- atomic_long_t call_os_wait_queue;
33402- atomic_long_t user_flush_tlb;
33403- atomic_long_t user_unload_context;
33404- atomic_long_t user_exception;
33405- atomic_long_t set_context_option;
33406- atomic_long_t migrate_check;
33407- atomic_long_t migrated_retarget;
33408- atomic_long_t migrated_unload;
33409- atomic_long_t migrated_unload_delay;
33410- atomic_long_t migrated_nopfn_retarget;
33411- atomic_long_t migrated_nopfn_unload;
33412- atomic_long_t tlb_dropin;
33413- atomic_long_t tlb_dropin_fail_no_asid;
33414- atomic_long_t tlb_dropin_fail_upm;
33415- atomic_long_t tlb_dropin_fail_invalid;
33416- atomic_long_t tlb_dropin_fail_range_active;
33417- atomic_long_t tlb_dropin_fail_idle;
33418- atomic_long_t tlb_dropin_fail_fmm;
33419- atomic_long_t tlb_dropin_fail_no_exception;
33420- atomic_long_t tlb_dropin_fail_no_exception_war;
33421- atomic_long_t tfh_stale_on_fault;
33422- atomic_long_t mmu_invalidate_range;
33423- atomic_long_t mmu_invalidate_page;
33424- atomic_long_t mmu_clear_flush_young;
33425- atomic_long_t flush_tlb;
33426- atomic_long_t flush_tlb_gru;
33427- atomic_long_t flush_tlb_gru_tgh;
33428- atomic_long_t flush_tlb_gru_zero_asid;
33429-
33430- atomic_long_t copy_gpa;
33431-
33432- atomic_long_t mesq_receive;
33433- atomic_long_t mesq_receive_none;
33434- atomic_long_t mesq_send;
33435- atomic_long_t mesq_send_failed;
33436- atomic_long_t mesq_noop;
33437- atomic_long_t mesq_send_unexpected_error;
33438- atomic_long_t mesq_send_lb_overflow;
33439- atomic_long_t mesq_send_qlimit_reached;
33440- atomic_long_t mesq_send_amo_nacked;
33441- atomic_long_t mesq_send_put_nacked;
33442- atomic_long_t mesq_qf_not_full;
33443- atomic_long_t mesq_qf_locked;
33444- atomic_long_t mesq_qf_noop_not_full;
33445- atomic_long_t mesq_qf_switch_head_failed;
33446- atomic_long_t mesq_qf_unexpected_error;
33447- atomic_long_t mesq_noop_unexpected_error;
33448- atomic_long_t mesq_noop_lb_overflow;
33449- atomic_long_t mesq_noop_qlimit_reached;
33450- atomic_long_t mesq_noop_amo_nacked;
33451- atomic_long_t mesq_noop_put_nacked;
33452+ atomic_long_unchecked_t vdata_alloc;
33453+ atomic_long_unchecked_t vdata_free;
33454+ atomic_long_unchecked_t gts_alloc;
33455+ atomic_long_unchecked_t gts_free;
33456+ atomic_long_unchecked_t vdata_double_alloc;
33457+ atomic_long_unchecked_t gts_double_allocate;
33458+ atomic_long_unchecked_t assign_context;
33459+ atomic_long_unchecked_t assign_context_failed;
33460+ atomic_long_unchecked_t free_context;
33461+ atomic_long_unchecked_t load_user_context;
33462+ atomic_long_unchecked_t load_kernel_context;
33463+ atomic_long_unchecked_t lock_kernel_context;
33464+ atomic_long_unchecked_t unlock_kernel_context;
33465+ atomic_long_unchecked_t steal_user_context;
33466+ atomic_long_unchecked_t steal_kernel_context;
33467+ atomic_long_unchecked_t steal_context_failed;
33468+ atomic_long_unchecked_t nopfn;
33469+ atomic_long_unchecked_t break_cow;
33470+ atomic_long_unchecked_t asid_new;
33471+ atomic_long_unchecked_t asid_next;
33472+ atomic_long_unchecked_t asid_wrap;
33473+ atomic_long_unchecked_t asid_reuse;
33474+ atomic_long_unchecked_t intr;
33475+ atomic_long_unchecked_t intr_mm_lock_failed;
33476+ atomic_long_unchecked_t call_os;
33477+ atomic_long_unchecked_t call_os_offnode_reference;
33478+ atomic_long_unchecked_t call_os_check_for_bug;
33479+ atomic_long_unchecked_t call_os_wait_queue;
33480+ atomic_long_unchecked_t user_flush_tlb;
33481+ atomic_long_unchecked_t user_unload_context;
33482+ atomic_long_unchecked_t user_exception;
33483+ atomic_long_unchecked_t set_context_option;
33484+ atomic_long_unchecked_t migrate_check;
33485+ atomic_long_unchecked_t migrated_retarget;
33486+ atomic_long_unchecked_t migrated_unload;
33487+ atomic_long_unchecked_t migrated_unload_delay;
33488+ atomic_long_unchecked_t migrated_nopfn_retarget;
33489+ atomic_long_unchecked_t migrated_nopfn_unload;
33490+ atomic_long_unchecked_t tlb_dropin;
33491+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
33492+ atomic_long_unchecked_t tlb_dropin_fail_upm;
33493+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
33494+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
33495+ atomic_long_unchecked_t tlb_dropin_fail_idle;
33496+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
33497+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
33498+ atomic_long_unchecked_t tlb_dropin_fail_no_exception_war;
33499+ atomic_long_unchecked_t tfh_stale_on_fault;
33500+ atomic_long_unchecked_t mmu_invalidate_range;
33501+ atomic_long_unchecked_t mmu_invalidate_page;
33502+ atomic_long_unchecked_t mmu_clear_flush_young;
33503+ atomic_long_unchecked_t flush_tlb;
33504+ atomic_long_unchecked_t flush_tlb_gru;
33505+ atomic_long_unchecked_t flush_tlb_gru_tgh;
33506+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
33507+
33508+ atomic_long_unchecked_t copy_gpa;
33509+
33510+ atomic_long_unchecked_t mesq_receive;
33511+ atomic_long_unchecked_t mesq_receive_none;
33512+ atomic_long_unchecked_t mesq_send;
33513+ atomic_long_unchecked_t mesq_send_failed;
33514+ atomic_long_unchecked_t mesq_noop;
33515+ atomic_long_unchecked_t mesq_send_unexpected_error;
33516+ atomic_long_unchecked_t mesq_send_lb_overflow;
33517+ atomic_long_unchecked_t mesq_send_qlimit_reached;
33518+ atomic_long_unchecked_t mesq_send_amo_nacked;
33519+ atomic_long_unchecked_t mesq_send_put_nacked;
33520+ atomic_long_unchecked_t mesq_qf_not_full;
33521+ atomic_long_unchecked_t mesq_qf_locked;
33522+ atomic_long_unchecked_t mesq_qf_noop_not_full;
33523+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
33524+ atomic_long_unchecked_t mesq_qf_unexpected_error;
33525+ atomic_long_unchecked_t mesq_noop_unexpected_error;
33526+ atomic_long_unchecked_t mesq_noop_lb_overflow;
33527+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
33528+ atomic_long_unchecked_t mesq_noop_amo_nacked;
33529+ atomic_long_unchecked_t mesq_noop_put_nacked;
33530
33531 };
33532
33533@@ -252,8 +252,8 @@ enum mcs_op {cchop_allocate, cchop_start
33534 cchop_deallocate, tghop_invalidate, mcsop_last};
33535
33536 struct mcs_op_statistic {
33537- atomic_long_t count;
33538- atomic_long_t total;
33539+ atomic_long_unchecked_t count;
33540+ atomic_long_unchecked_t total;
33541 unsigned long max;
33542 };
33543
33544@@ -276,7 +276,7 @@ extern struct mcs_op_statistic mcs_op_st
33545
33546 #define STAT(id) do { \
33547 if (gru_options & OPT_STATS) \
33548- atomic_long_inc(&gru_stats.id); \
33549+ atomic_long_inc_unchecked(&gru_stats.id); \
33550 } while (0)
33551
33552 #ifdef CONFIG_SGI_GRU_DEBUG
33553diff -urNp linux-2.6.32.43/drivers/misc/sgi-xp/xpc.h linux-2.6.32.43/drivers/misc/sgi-xp/xpc.h
33554--- linux-2.6.32.43/drivers/misc/sgi-xp/xpc.h 2011-03-27 14:31:47.000000000 -0400
33555+++ linux-2.6.32.43/drivers/misc/sgi-xp/xpc.h 2011-08-05 20:33:55.000000000 -0400
33556@@ -876,7 +876,7 @@ extern struct xpc_registration xpc_regis
33557 /* found in xpc_main.c */
33558 extern struct device *xpc_part;
33559 extern struct device *xpc_chan;
33560-extern struct xpc_arch_operations xpc_arch_ops;
33561+extern const struct xpc_arch_operations xpc_arch_ops;
33562 extern int xpc_disengage_timelimit;
33563 extern int xpc_disengage_timedout;
33564 extern int xpc_activate_IRQ_rcvd;
33565diff -urNp linux-2.6.32.43/drivers/misc/sgi-xp/xpc_main.c linux-2.6.32.43/drivers/misc/sgi-xp/xpc_main.c
33566--- linux-2.6.32.43/drivers/misc/sgi-xp/xpc_main.c 2011-03-27 14:31:47.000000000 -0400
33567+++ linux-2.6.32.43/drivers/misc/sgi-xp/xpc_main.c 2011-08-05 20:33:55.000000000 -0400
33568@@ -169,7 +169,7 @@ static struct notifier_block xpc_die_not
33569 .notifier_call = xpc_system_die,
33570 };
33571
33572-struct xpc_arch_operations xpc_arch_ops;
33573+const struct xpc_arch_operations xpc_arch_ops;
33574
33575 /*
33576 * Timer function to enforce the timelimit on the partition disengage.
33577diff -urNp linux-2.6.32.43/drivers/misc/sgi-xp/xpc_sn2.c linux-2.6.32.43/drivers/misc/sgi-xp/xpc_sn2.c
33578--- linux-2.6.32.43/drivers/misc/sgi-xp/xpc_sn2.c 2011-03-27 14:31:47.000000000 -0400
33579+++ linux-2.6.32.43/drivers/misc/sgi-xp/xpc_sn2.c 2011-08-05 20:33:55.000000000 -0400
33580@@ -2350,7 +2350,7 @@ xpc_received_payload_sn2(struct xpc_chan
33581 xpc_acknowledge_msgs_sn2(ch, get, msg->flags);
33582 }
33583
33584-static struct xpc_arch_operations xpc_arch_ops_sn2 = {
33585+static const struct xpc_arch_operations xpc_arch_ops_sn2 = {
33586 .setup_partitions = xpc_setup_partitions_sn2,
33587 .teardown_partitions = xpc_teardown_partitions_sn2,
33588 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_sn2,
33589@@ -2413,7 +2413,9 @@ xpc_init_sn2(void)
33590 int ret;
33591 size_t buf_size;
33592
33593- xpc_arch_ops = xpc_arch_ops_sn2;
33594+ pax_open_kernel();
33595+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_sn2, sizeof(xpc_arch_ops_sn2));
33596+ pax_close_kernel();
33597
33598 if (offsetof(struct xpc_msg_sn2, payload) > XPC_MSG_HDR_MAX_SIZE) {
33599 dev_err(xpc_part, "header portion of struct xpc_msg_sn2 is "
33600diff -urNp linux-2.6.32.43/drivers/misc/sgi-xp/xpc_uv.c linux-2.6.32.43/drivers/misc/sgi-xp/xpc_uv.c
33601--- linux-2.6.32.43/drivers/misc/sgi-xp/xpc_uv.c 2011-03-27 14:31:47.000000000 -0400
33602+++ linux-2.6.32.43/drivers/misc/sgi-xp/xpc_uv.c 2011-08-05 20:33:55.000000000 -0400
33603@@ -1669,7 +1669,7 @@ xpc_received_payload_uv(struct xpc_chann
33604 XPC_DEACTIVATE_PARTITION(&xpc_partitions[ch->partid], ret);
33605 }
33606
33607-static struct xpc_arch_operations xpc_arch_ops_uv = {
33608+static const struct xpc_arch_operations xpc_arch_ops_uv = {
33609 .setup_partitions = xpc_setup_partitions_uv,
33610 .teardown_partitions = xpc_teardown_partitions_uv,
33611 .process_activate_IRQ_rcvd = xpc_process_activate_IRQ_rcvd_uv,
33612@@ -1729,7 +1729,9 @@ static struct xpc_arch_operations xpc_ar
33613 int
33614 xpc_init_uv(void)
33615 {
33616- xpc_arch_ops = xpc_arch_ops_uv;
33617+ pax_open_kernel();
33618+ memcpy((void *)&xpc_arch_ops, &xpc_arch_ops_uv, sizeof(xpc_arch_ops_uv));
33619+ pax_close_kernel();
33620
33621 if (sizeof(struct xpc_notify_mq_msghdr_uv) > XPC_MSG_HDR_MAX_SIZE) {
33622 dev_err(xpc_part, "xpc_notify_mq_msghdr_uv is larger than %d\n",
33623diff -urNp linux-2.6.32.43/drivers/misc/sgi-xp/xp.h linux-2.6.32.43/drivers/misc/sgi-xp/xp.h
33624--- linux-2.6.32.43/drivers/misc/sgi-xp/xp.h 2011-03-27 14:31:47.000000000 -0400
33625+++ linux-2.6.32.43/drivers/misc/sgi-xp/xp.h 2011-08-05 20:33:55.000000000 -0400
33626@@ -289,7 +289,7 @@ struct xpc_interface {
33627 xpc_notify_func, void *);
33628 void (*received) (short, int, void *);
33629 enum xp_retval (*partid_to_nasids) (short, void *);
33630-};
33631+} __no_const;
33632
33633 extern struct xpc_interface xpc_interface;
33634
33635diff -urNp linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c
33636--- linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c 2011-03-27 14:31:47.000000000 -0400
33637+++ linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0001.c 2011-05-16 21:46:57.000000000 -0400
33638@@ -743,6 +743,8 @@ static int chip_ready (struct map_info *
33639 struct cfi_pri_intelext *cfip = cfi->cmdset_priv;
33640 unsigned long timeo = jiffies + HZ;
33641
33642+ pax_track_stack();
33643+
33644 /* Prevent setting state FL_SYNCING for chip in suspended state. */
33645 if (mode == FL_SYNCING && chip->oldstate != FL_READY)
33646 goto sleep;
33647@@ -1642,6 +1644,8 @@ static int __xipram do_write_buffer(stru
33648 unsigned long initial_adr;
33649 int initial_len = len;
33650
33651+ pax_track_stack();
33652+
33653 wbufsize = cfi_interleave(cfi) << cfi->cfiq->MaxBufWriteSize;
33654 adr += chip->start;
33655 initial_adr = adr;
33656@@ -1860,6 +1864,8 @@ static int __xipram do_erase_oneblock(st
33657 int retries = 3;
33658 int ret;
33659
33660+ pax_track_stack();
33661+
33662 adr += chip->start;
33663
33664 retry:
33665diff -urNp linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c
33666--- linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c 2011-03-27 14:31:47.000000000 -0400
33667+++ linux-2.6.32.43/drivers/mtd/chips/cfi_cmdset_0020.c 2011-05-16 21:46:57.000000000 -0400
33668@@ -255,6 +255,8 @@ static inline int do_read_onechip(struct
33669 unsigned long cmd_addr;
33670 struct cfi_private *cfi = map->fldrv_priv;
33671
33672+ pax_track_stack();
33673+
33674 adr += chip->start;
33675
33676 /* Ensure cmd read/writes are aligned. */
33677@@ -428,6 +430,8 @@ static inline int do_write_buffer(struct
33678 DECLARE_WAITQUEUE(wait, current);
33679 int wbufsize, z;
33680
33681+ pax_track_stack();
33682+
33683 /* M58LW064A requires bus alignment for buffer wriets -- saw */
33684 if (adr & (map_bankwidth(map)-1))
33685 return -EINVAL;
33686@@ -742,6 +746,8 @@ static inline int do_erase_oneblock(stru
33687 DECLARE_WAITQUEUE(wait, current);
33688 int ret = 0;
33689
33690+ pax_track_stack();
33691+
33692 adr += chip->start;
33693
33694 /* Let's determine this according to the interleave only once */
33695@@ -1047,6 +1053,8 @@ static inline int do_lock_oneblock(struc
33696 unsigned long timeo = jiffies + HZ;
33697 DECLARE_WAITQUEUE(wait, current);
33698
33699+ pax_track_stack();
33700+
33701 adr += chip->start;
33702
33703 /* Let's determine this according to the interleave only once */
33704@@ -1196,6 +1204,8 @@ static inline int do_unlock_oneblock(str
33705 unsigned long timeo = jiffies + HZ;
33706 DECLARE_WAITQUEUE(wait, current);
33707
33708+ pax_track_stack();
33709+
33710 adr += chip->start;
33711
33712 /* Let's determine this according to the interleave only once */
33713diff -urNp linux-2.6.32.43/drivers/mtd/devices/doc2000.c linux-2.6.32.43/drivers/mtd/devices/doc2000.c
33714--- linux-2.6.32.43/drivers/mtd/devices/doc2000.c 2011-03-27 14:31:47.000000000 -0400
33715+++ linux-2.6.32.43/drivers/mtd/devices/doc2000.c 2011-04-17 15:56:46.000000000 -0400
33716@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt
33717
33718 /* The ECC will not be calculated correctly if less than 512 is written */
33719 /* DBB-
33720- if (len != 0x200 && eccbuf)
33721+ if (len != 0x200)
33722 printk(KERN_WARNING
33723 "ECC needs a full sector write (adr: %lx size %lx)\n",
33724 (long) to, (long) len);
33725diff -urNp linux-2.6.32.43/drivers/mtd/devices/doc2001.c linux-2.6.32.43/drivers/mtd/devices/doc2001.c
33726--- linux-2.6.32.43/drivers/mtd/devices/doc2001.c 2011-03-27 14:31:47.000000000 -0400
33727+++ linux-2.6.32.43/drivers/mtd/devices/doc2001.c 2011-04-17 15:56:46.000000000 -0400
33728@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt
33729 struct Nand *mychip = &this->chips[from >> (this->chipshift)];
33730
33731 /* Don't allow read past end of device */
33732- if (from >= this->totlen)
33733+ if (from >= this->totlen || !len)
33734 return -EINVAL;
33735
33736 /* Don't allow a single read to cross a 512-byte block boundary */
33737diff -urNp linux-2.6.32.43/drivers/mtd/ftl.c linux-2.6.32.43/drivers/mtd/ftl.c
33738--- linux-2.6.32.43/drivers/mtd/ftl.c 2011-03-27 14:31:47.000000000 -0400
33739+++ linux-2.6.32.43/drivers/mtd/ftl.c 2011-05-16 21:46:57.000000000 -0400
33740@@ -474,6 +474,8 @@ static int copy_erase_unit(partition_t *
33741 loff_t offset;
33742 uint16_t srcunitswap = cpu_to_le16(srcunit);
33743
33744+ pax_track_stack();
33745+
33746 eun = &part->EUNInfo[srcunit];
33747 xfer = &part->XferInfo[xferunit];
33748 DEBUG(2, "ftl_cs: copying block 0x%x to 0x%x\n",
33749diff -urNp linux-2.6.32.43/drivers/mtd/inftlcore.c linux-2.6.32.43/drivers/mtd/inftlcore.c
33750--- linux-2.6.32.43/drivers/mtd/inftlcore.c 2011-03-27 14:31:47.000000000 -0400
33751+++ linux-2.6.32.43/drivers/mtd/inftlcore.c 2011-05-16 21:46:57.000000000 -0400
33752@@ -260,6 +260,8 @@ static u16 INFTL_foldchain(struct INFTLr
33753 struct inftl_oob oob;
33754 size_t retlen;
33755
33756+ pax_track_stack();
33757+
33758 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: INFTL_foldchain(inftl=%p,thisVUC=%d,"
33759 "pending=%d)\n", inftl, thisVUC, pendingblock);
33760
33761diff -urNp linux-2.6.32.43/drivers/mtd/inftlmount.c linux-2.6.32.43/drivers/mtd/inftlmount.c
33762--- linux-2.6.32.43/drivers/mtd/inftlmount.c 2011-03-27 14:31:47.000000000 -0400
33763+++ linux-2.6.32.43/drivers/mtd/inftlmount.c 2011-05-16 21:46:57.000000000 -0400
33764@@ -54,6 +54,8 @@ static int find_boot_record(struct INFTL
33765 struct INFTLPartition *ip;
33766 size_t retlen;
33767
33768+ pax_track_stack();
33769+
33770 DEBUG(MTD_DEBUG_LEVEL3, "INFTL: find_boot_record(inftl=%p)\n", inftl);
33771
33772 /*
33773diff -urNp linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c
33774--- linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c 2011-03-27 14:31:47.000000000 -0400
33775+++ linux-2.6.32.43/drivers/mtd/lpddr/qinfo_probe.c 2011-05-16 21:46:57.000000000 -0400
33776@@ -106,6 +106,8 @@ static int lpddr_pfow_present(struct map
33777 {
33778 map_word pfow_val[4];
33779
33780+ pax_track_stack();
33781+
33782 /* Check identification string */
33783 pfow_val[0] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_P);
33784 pfow_val[1] = map_read(map, map->pfow_base + PFOW_QUERY_STRING_F);
33785diff -urNp linux-2.6.32.43/drivers/mtd/mtdchar.c linux-2.6.32.43/drivers/mtd/mtdchar.c
33786--- linux-2.6.32.43/drivers/mtd/mtdchar.c 2011-03-27 14:31:47.000000000 -0400
33787+++ linux-2.6.32.43/drivers/mtd/mtdchar.c 2011-05-16 21:46:57.000000000 -0400
33788@@ -460,6 +460,8 @@ static int mtd_ioctl(struct inode *inode
33789 u_long size;
33790 struct mtd_info_user info;
33791
33792+ pax_track_stack();
33793+
33794 DEBUG(MTD_DEBUG_LEVEL0, "MTD_ioctl\n");
33795
33796 size = (cmd & IOCSIZE_MASK) >> IOCSIZE_SHIFT;
33797diff -urNp linux-2.6.32.43/drivers/mtd/nftlcore.c linux-2.6.32.43/drivers/mtd/nftlcore.c
33798--- linux-2.6.32.43/drivers/mtd/nftlcore.c 2011-03-27 14:31:47.000000000 -0400
33799+++ linux-2.6.32.43/drivers/mtd/nftlcore.c 2011-05-16 21:46:57.000000000 -0400
33800@@ -254,6 +254,8 @@ static u16 NFTL_foldchain (struct NFTLre
33801 int inplace = 1;
33802 size_t retlen;
33803
33804+ pax_track_stack();
33805+
33806 memset(BlockMap, 0xff, sizeof(BlockMap));
33807 memset(BlockFreeFound, 0, sizeof(BlockFreeFound));
33808
33809diff -urNp linux-2.6.32.43/drivers/mtd/nftlmount.c linux-2.6.32.43/drivers/mtd/nftlmount.c
33810--- linux-2.6.32.43/drivers/mtd/nftlmount.c 2011-03-27 14:31:47.000000000 -0400
33811+++ linux-2.6.32.43/drivers/mtd/nftlmount.c 2011-05-18 20:09:37.000000000 -0400
33812@@ -23,6 +23,7 @@
33813 #include <asm/errno.h>
33814 #include <linux/delay.h>
33815 #include <linux/slab.h>
33816+#include <linux/sched.h>
33817 #include <linux/mtd/mtd.h>
33818 #include <linux/mtd/nand.h>
33819 #include <linux/mtd/nftl.h>
33820@@ -44,6 +45,8 @@ static int find_boot_record(struct NFTLr
33821 struct mtd_info *mtd = nftl->mbd.mtd;
33822 unsigned int i;
33823
33824+ pax_track_stack();
33825+
33826 /* Assume logical EraseSize == physical erasesize for starting the scan.
33827 We'll sort it out later if we find a MediaHeader which says otherwise */
33828 /* Actually, we won't. The new DiskOnChip driver has already scanned
33829diff -urNp linux-2.6.32.43/drivers/mtd/ubi/build.c linux-2.6.32.43/drivers/mtd/ubi/build.c
33830--- linux-2.6.32.43/drivers/mtd/ubi/build.c 2011-03-27 14:31:47.000000000 -0400
33831+++ linux-2.6.32.43/drivers/mtd/ubi/build.c 2011-04-17 15:56:46.000000000 -0400
33832@@ -1255,7 +1255,7 @@ module_exit(ubi_exit);
33833 static int __init bytes_str_to_int(const char *str)
33834 {
33835 char *endp;
33836- unsigned long result;
33837+ unsigned long result, scale = 1;
33838
33839 result = simple_strtoul(str, &endp, 0);
33840 if (str == endp || result >= INT_MAX) {
33841@@ -1266,11 +1266,11 @@ static int __init bytes_str_to_int(const
33842
33843 switch (*endp) {
33844 case 'G':
33845- result *= 1024;
33846+ scale *= 1024;
33847 case 'M':
33848- result *= 1024;
33849+ scale *= 1024;
33850 case 'K':
33851- result *= 1024;
33852+ scale *= 1024;
33853 if (endp[1] == 'i' && endp[2] == 'B')
33854 endp += 2;
33855 case '\0':
33856@@ -1281,7 +1281,13 @@ static int __init bytes_str_to_int(const
33857 return -EINVAL;
33858 }
33859
33860- return result;
33861+ if ((intoverflow_t)result*scale >= INT_MAX) {
33862+ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n",
33863+ str);
33864+ return -EINVAL;
33865+ }
33866+
33867+ return result*scale;
33868 }
33869
33870 /**
33871diff -urNp linux-2.6.32.43/drivers/net/bnx2.c linux-2.6.32.43/drivers/net/bnx2.c
33872--- linux-2.6.32.43/drivers/net/bnx2.c 2011-03-27 14:31:47.000000000 -0400
33873+++ linux-2.6.32.43/drivers/net/bnx2.c 2011-05-16 21:46:57.000000000 -0400
33874@@ -5809,6 +5809,8 @@ bnx2_test_nvram(struct bnx2 *bp)
33875 int rc = 0;
33876 u32 magic, csum;
33877
33878+ pax_track_stack();
33879+
33880 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
33881 goto test_nvram_done;
33882
33883diff -urNp linux-2.6.32.43/drivers/net/cxgb3/l2t.h linux-2.6.32.43/drivers/net/cxgb3/l2t.h
33884--- linux-2.6.32.43/drivers/net/cxgb3/l2t.h 2011-03-27 14:31:47.000000000 -0400
33885+++ linux-2.6.32.43/drivers/net/cxgb3/l2t.h 2011-08-05 20:33:55.000000000 -0400
33886@@ -86,7 +86,7 @@ typedef void (*arp_failure_handler_func)
33887 */
33888 struct l2t_skb_cb {
33889 arp_failure_handler_func arp_failure_handler;
33890-};
33891+} __no_const;
33892
33893 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
33894
33895diff -urNp linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c
33896--- linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c 2011-03-27 14:31:47.000000000 -0400
33897+++ linux-2.6.32.43/drivers/net/cxgb3/t3_hw.c 2011-05-16 21:46:57.000000000 -0400
33898@@ -699,6 +699,8 @@ static int get_vpd_params(struct adapter
33899 int i, addr, ret;
33900 struct t3_vpd vpd;
33901
33902+ pax_track_stack();
33903+
33904 /*
33905 * Card information is normally at VPD_BASE but some early cards had
33906 * it at 0.
33907diff -urNp linux-2.6.32.43/drivers/net/e1000e/82571.c linux-2.6.32.43/drivers/net/e1000e/82571.c
33908--- linux-2.6.32.43/drivers/net/e1000e/82571.c 2011-03-27 14:31:47.000000000 -0400
33909+++ linux-2.6.32.43/drivers/net/e1000e/82571.c 2011-08-05 20:33:55.000000000 -0400
33910@@ -245,22 +245,22 @@ static s32 e1000_init_mac_params_82571(s
33911 /* check for link */
33912 switch (hw->phy.media_type) {
33913 case e1000_media_type_copper:
33914- func->setup_physical_interface = e1000_setup_copper_link_82571;
33915- func->check_for_link = e1000e_check_for_copper_link;
33916- func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33917+ *(void **)&func->setup_physical_interface = e1000_setup_copper_link_82571;
33918+ *(void **)&func->check_for_link = e1000e_check_for_copper_link;
33919+ *(void **)&func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
33920 break;
33921 case e1000_media_type_fiber:
33922- func->setup_physical_interface =
33923+ *(void **)&func->setup_physical_interface =
33924 e1000_setup_fiber_serdes_link_82571;
33925- func->check_for_link = e1000e_check_for_fiber_link;
33926- func->get_link_up_info =
33927+ *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
33928+ *(void **)&func->get_link_up_info =
33929 e1000e_get_speed_and_duplex_fiber_serdes;
33930 break;
33931 case e1000_media_type_internal_serdes:
33932- func->setup_physical_interface =
33933+ *(void **)&func->setup_physical_interface =
33934 e1000_setup_fiber_serdes_link_82571;
33935- func->check_for_link = e1000_check_for_serdes_link_82571;
33936- func->get_link_up_info =
33937+ *(void **)&func->check_for_link = e1000_check_for_serdes_link_82571;
33938+ *(void **)&func->get_link_up_info =
33939 e1000e_get_speed_and_duplex_fiber_serdes;
33940 break;
33941 default:
33942@@ -271,12 +271,12 @@ static s32 e1000_init_mac_params_82571(s
33943 switch (hw->mac.type) {
33944 case e1000_82574:
33945 case e1000_82583:
33946- func->check_mng_mode = e1000_check_mng_mode_82574;
33947- func->led_on = e1000_led_on_82574;
33948+ *(void **)&func->check_mng_mode = e1000_check_mng_mode_82574;
33949+ *(void **)&func->led_on = e1000_led_on_82574;
33950 break;
33951 default:
33952- func->check_mng_mode = e1000e_check_mng_mode_generic;
33953- func->led_on = e1000e_led_on_generic;
33954+ *(void **)&func->check_mng_mode = e1000e_check_mng_mode_generic;
33955+ *(void **)&func->led_on = e1000e_led_on_generic;
33956 break;
33957 }
33958
33959@@ -1656,7 +1656,7 @@ static void e1000_clear_hw_cntrs_82571(s
33960 temp = er32(ICRXDMTC);
33961 }
33962
33963-static struct e1000_mac_operations e82571_mac_ops = {
33964+static const struct e1000_mac_operations e82571_mac_ops = {
33965 /* .check_mng_mode: mac type dependent */
33966 /* .check_for_link: media type dependent */
33967 .id_led_init = e1000e_id_led_init,
33968@@ -1674,7 +1674,7 @@ static struct e1000_mac_operations e8257
33969 .setup_led = e1000e_setup_led_generic,
33970 };
33971
33972-static struct e1000_phy_operations e82_phy_ops_igp = {
33973+static const struct e1000_phy_operations e82_phy_ops_igp = {
33974 .acquire_phy = e1000_get_hw_semaphore_82571,
33975 .check_reset_block = e1000e_check_reset_block_generic,
33976 .commit_phy = NULL,
33977@@ -1691,7 +1691,7 @@ static struct e1000_phy_operations e82_p
33978 .cfg_on_link_up = NULL,
33979 };
33980
33981-static struct e1000_phy_operations e82_phy_ops_m88 = {
33982+static const struct e1000_phy_operations e82_phy_ops_m88 = {
33983 .acquire_phy = e1000_get_hw_semaphore_82571,
33984 .check_reset_block = e1000e_check_reset_block_generic,
33985 .commit_phy = e1000e_phy_sw_reset,
33986@@ -1708,7 +1708,7 @@ static struct e1000_phy_operations e82_p
33987 .cfg_on_link_up = NULL,
33988 };
33989
33990-static struct e1000_phy_operations e82_phy_ops_bm = {
33991+static const struct e1000_phy_operations e82_phy_ops_bm = {
33992 .acquire_phy = e1000_get_hw_semaphore_82571,
33993 .check_reset_block = e1000e_check_reset_block_generic,
33994 .commit_phy = e1000e_phy_sw_reset,
33995@@ -1725,7 +1725,7 @@ static struct e1000_phy_operations e82_p
33996 .cfg_on_link_up = NULL,
33997 };
33998
33999-static struct e1000_nvm_operations e82571_nvm_ops = {
34000+static const struct e1000_nvm_operations e82571_nvm_ops = {
34001 .acquire_nvm = e1000_acquire_nvm_82571,
34002 .read_nvm = e1000e_read_nvm_eerd,
34003 .release_nvm = e1000_release_nvm_82571,
34004diff -urNp linux-2.6.32.43/drivers/net/e1000e/e1000.h linux-2.6.32.43/drivers/net/e1000e/e1000.h
34005--- linux-2.6.32.43/drivers/net/e1000e/e1000.h 2011-03-27 14:31:47.000000000 -0400
34006+++ linux-2.6.32.43/drivers/net/e1000e/e1000.h 2011-04-17 15:56:46.000000000 -0400
34007@@ -375,9 +375,9 @@ struct e1000_info {
34008 u32 pba;
34009 u32 max_hw_frame_size;
34010 s32 (*get_variants)(struct e1000_adapter *);
34011- struct e1000_mac_operations *mac_ops;
34012- struct e1000_phy_operations *phy_ops;
34013- struct e1000_nvm_operations *nvm_ops;
34014+ const struct e1000_mac_operations *mac_ops;
34015+ const struct e1000_phy_operations *phy_ops;
34016+ const struct e1000_nvm_operations *nvm_ops;
34017 };
34018
34019 /* hardware capability, feature, and workaround flags */
34020diff -urNp linux-2.6.32.43/drivers/net/e1000e/es2lan.c linux-2.6.32.43/drivers/net/e1000e/es2lan.c
34021--- linux-2.6.32.43/drivers/net/e1000e/es2lan.c 2011-03-27 14:31:47.000000000 -0400
34022+++ linux-2.6.32.43/drivers/net/e1000e/es2lan.c 2011-08-05 20:33:55.000000000 -0400
34023@@ -229,16 +229,16 @@ static s32 e1000_init_mac_params_80003es
34024 /* check for link */
34025 switch (hw->phy.media_type) {
34026 case e1000_media_type_copper:
34027- func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
34028- func->check_for_link = e1000e_check_for_copper_link;
34029+ *(void **)&func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
34030+ *(void **)&func->check_for_link = e1000e_check_for_copper_link;
34031 break;
34032 case e1000_media_type_fiber:
34033- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
34034- func->check_for_link = e1000e_check_for_fiber_link;
34035+ *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
34036+ *(void **)&func->check_for_link = e1000e_check_for_fiber_link;
34037 break;
34038 case e1000_media_type_internal_serdes:
34039- func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
34040- func->check_for_link = e1000e_check_for_serdes_link;
34041+ *(void **)&func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
34042+ *(void **)&func->check_for_link = e1000e_check_for_serdes_link;
34043 break;
34044 default:
34045 return -E1000_ERR_CONFIG;
34046@@ -1365,7 +1365,7 @@ static void e1000_clear_hw_cntrs_80003es
34047 temp = er32(ICRXDMTC);
34048 }
34049
34050-static struct e1000_mac_operations es2_mac_ops = {
34051+static const struct e1000_mac_operations es2_mac_ops = {
34052 .id_led_init = e1000e_id_led_init,
34053 .check_mng_mode = e1000e_check_mng_mode_generic,
34054 /* check_for_link dependent on media type */
34055@@ -1383,7 +1383,7 @@ static struct e1000_mac_operations es2_m
34056 .setup_led = e1000e_setup_led_generic,
34057 };
34058
34059-static struct e1000_phy_operations es2_phy_ops = {
34060+static const struct e1000_phy_operations es2_phy_ops = {
34061 .acquire_phy = e1000_acquire_phy_80003es2lan,
34062 .check_reset_block = e1000e_check_reset_block_generic,
34063 .commit_phy = e1000e_phy_sw_reset,
34064@@ -1400,7 +1400,7 @@ static struct e1000_phy_operations es2_p
34065 .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan,
34066 };
34067
34068-static struct e1000_nvm_operations es2_nvm_ops = {
34069+static const struct e1000_nvm_operations es2_nvm_ops = {
34070 .acquire_nvm = e1000_acquire_nvm_80003es2lan,
34071 .read_nvm = e1000e_read_nvm_eerd,
34072 .release_nvm = e1000_release_nvm_80003es2lan,
34073diff -urNp linux-2.6.32.43/drivers/net/e1000e/hw.h linux-2.6.32.43/drivers/net/e1000e/hw.h
34074--- linux-2.6.32.43/drivers/net/e1000e/hw.h 2011-03-27 14:31:47.000000000 -0400
34075+++ linux-2.6.32.43/drivers/net/e1000e/hw.h 2011-04-17 15:56:46.000000000 -0400
34076@@ -756,34 +756,34 @@ struct e1000_mac_operations {
34077
34078 /* Function pointers for the PHY. */
34079 struct e1000_phy_operations {
34080- s32 (*acquire_phy)(struct e1000_hw *);
34081- s32 (*check_polarity)(struct e1000_hw *);
34082- s32 (*check_reset_block)(struct e1000_hw *);
34083- s32 (*commit_phy)(struct e1000_hw *);
34084- s32 (*force_speed_duplex)(struct e1000_hw *);
34085- s32 (*get_cfg_done)(struct e1000_hw *hw);
34086- s32 (*get_cable_length)(struct e1000_hw *);
34087- s32 (*get_phy_info)(struct e1000_hw *);
34088- s32 (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
34089- s32 (*read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
34090- void (*release_phy)(struct e1000_hw *);
34091- s32 (*reset_phy)(struct e1000_hw *);
34092- s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
34093- s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
34094- s32 (*write_phy_reg)(struct e1000_hw *, u32, u16);
34095- s32 (*write_phy_reg_locked)(struct e1000_hw *, u32, u16);
34096- s32 (*cfg_on_link_up)(struct e1000_hw *);
34097+ s32 (* acquire_phy)(struct e1000_hw *);
34098+ s32 (* check_polarity)(struct e1000_hw *);
34099+ s32 (* check_reset_block)(struct e1000_hw *);
34100+ s32 (* commit_phy)(struct e1000_hw *);
34101+ s32 (* force_speed_duplex)(struct e1000_hw *);
34102+ s32 (* get_cfg_done)(struct e1000_hw *hw);
34103+ s32 (* get_cable_length)(struct e1000_hw *);
34104+ s32 (* get_phy_info)(struct e1000_hw *);
34105+ s32 (* read_phy_reg)(struct e1000_hw *, u32, u16 *);
34106+ s32 (* read_phy_reg_locked)(struct e1000_hw *, u32, u16 *);
34107+ void (* release_phy)(struct e1000_hw *);
34108+ s32 (* reset_phy)(struct e1000_hw *);
34109+ s32 (* set_d0_lplu_state)(struct e1000_hw *, bool);
34110+ s32 (* set_d3_lplu_state)(struct e1000_hw *, bool);
34111+ s32 (* write_phy_reg)(struct e1000_hw *, u32, u16);
34112+ s32 (* write_phy_reg_locked)(struct e1000_hw *, u32, u16);
34113+ s32 (* cfg_on_link_up)(struct e1000_hw *);
34114 };
34115
34116 /* Function pointers for the NVM. */
34117 struct e1000_nvm_operations {
34118- s32 (*acquire_nvm)(struct e1000_hw *);
34119- s32 (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
34120- void (*release_nvm)(struct e1000_hw *);
34121- s32 (*update_nvm)(struct e1000_hw *);
34122- s32 (*valid_led_default)(struct e1000_hw *, u16 *);
34123- s32 (*validate_nvm)(struct e1000_hw *);
34124- s32 (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
34125+ s32 (* const acquire_nvm)(struct e1000_hw *);
34126+ s32 (* const read_nvm)(struct e1000_hw *, u16, u16, u16 *);
34127+ void (* const release_nvm)(struct e1000_hw *);
34128+ s32 (* const update_nvm)(struct e1000_hw *);
34129+ s32 (* const valid_led_default)(struct e1000_hw *, u16 *);
34130+ s32 (* const validate_nvm)(struct e1000_hw *);
34131+ s32 (* const write_nvm)(struct e1000_hw *, u16, u16, u16 *);
34132 };
34133
34134 struct e1000_mac_info {
34135diff -urNp linux-2.6.32.43/drivers/net/e1000e/ich8lan.c linux-2.6.32.43/drivers/net/e1000e/ich8lan.c
34136--- linux-2.6.32.43/drivers/net/e1000e/ich8lan.c 2011-05-10 22:12:01.000000000 -0400
34137+++ linux-2.6.32.43/drivers/net/e1000e/ich8lan.c 2011-08-05 20:33:55.000000000 -0400
34138@@ -265,13 +265,13 @@ static s32 e1000_init_phy_params_pchlan(
34139 phy->addr = 1;
34140 phy->reset_delay_us = 100;
34141
34142- phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34143- phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
34144- phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
34145- phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
34146- phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
34147- phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
34148- phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
34149+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34150+ *(void **)&phy->ops.read_phy_reg = e1000_read_phy_reg_hv;
34151+ *(void **)&phy->ops.read_phy_reg_locked = e1000_read_phy_reg_hv_locked;
34152+ *(void **)&phy->ops.set_d0_lplu_state = e1000_set_lplu_state_pchlan;
34153+ *(void **)&phy->ops.set_d3_lplu_state = e1000_set_lplu_state_pchlan;
34154+ *(void **)&phy->ops.write_phy_reg = e1000_write_phy_reg_hv;
34155+ *(void **)&phy->ops.write_phy_reg_locked = e1000_write_phy_reg_hv_locked;
34156 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
34157
34158 /*
34159@@ -289,12 +289,12 @@ static s32 e1000_init_phy_params_pchlan(
34160 phy->type = e1000e_get_phy_type_from_id(phy->id);
34161
34162 if (phy->type == e1000_phy_82577) {
34163- phy->ops.check_polarity = e1000_check_polarity_82577;
34164- phy->ops.force_speed_duplex =
34165+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_82577;
34166+ *(void **)&phy->ops.force_speed_duplex =
34167 e1000_phy_force_speed_duplex_82577;
34168- phy->ops.get_cable_length = e1000_get_cable_length_82577;
34169- phy->ops.get_phy_info = e1000_get_phy_info_82577;
34170- phy->ops.commit_phy = e1000e_phy_sw_reset;
34171+ *(void **)&phy->ops.get_cable_length = e1000_get_cable_length_82577;
34172+ *(void **)&phy->ops.get_phy_info = e1000_get_phy_info_82577;
34173+ *(void **)&phy->ops.commit_phy = e1000e_phy_sw_reset;
34174 }
34175
34176 out:
34177@@ -322,8 +322,8 @@ static s32 e1000_init_phy_params_ich8lan
34178 */
34179 ret_val = e1000e_determine_phy_address(hw);
34180 if (ret_val) {
34181- hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34182- hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34183+ *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34184+ *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34185 ret_val = e1000e_determine_phy_address(hw);
34186 if (ret_val)
34187 return ret_val;
34188@@ -343,8 +343,8 @@ static s32 e1000_init_phy_params_ich8lan
34189 case IGP03E1000_E_PHY_ID:
34190 phy->type = e1000_phy_igp_3;
34191 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
34192- phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
34193- phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
34194+ *(void **)&phy->ops.read_phy_reg_locked = e1000e_read_phy_reg_igp_locked;
34195+ *(void **)&phy->ops.write_phy_reg_locked = e1000e_write_phy_reg_igp_locked;
34196 break;
34197 case IFE_E_PHY_ID:
34198 case IFE_PLUS_E_PHY_ID:
34199@@ -355,16 +355,16 @@ static s32 e1000_init_phy_params_ich8lan
34200 case BME1000_E_PHY_ID:
34201 phy->type = e1000_phy_bm;
34202 phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
34203- hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34204- hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34205- hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
34206+ *(void **)&hw->phy.ops.read_phy_reg = e1000e_read_phy_reg_bm;
34207+ *(void **)&hw->phy.ops.write_phy_reg = e1000e_write_phy_reg_bm;
34208+ *(void **)&hw->phy.ops.commit_phy = e1000e_phy_sw_reset;
34209 break;
34210 default:
34211 return -E1000_ERR_PHY;
34212 break;
34213 }
34214
34215- phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34216+ *(void **)&phy->ops.check_polarity = e1000_check_polarity_ife_ich8lan;
34217
34218 return 0;
34219 }
34220@@ -455,25 +455,25 @@ static s32 e1000_init_mac_params_ich8lan
34221 case e1000_ich9lan:
34222 case e1000_ich10lan:
34223 /* ID LED init */
34224- mac->ops.id_led_init = e1000e_id_led_init;
34225+ *(void **)&mac->ops.id_led_init = e1000e_id_led_init;
34226 /* setup LED */
34227- mac->ops.setup_led = e1000e_setup_led_generic;
34228+ *(void **)&mac->ops.setup_led = e1000e_setup_led_generic;
34229 /* cleanup LED */
34230- mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
34231+ *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
34232 /* turn on/off LED */
34233- mac->ops.led_on = e1000_led_on_ich8lan;
34234- mac->ops.led_off = e1000_led_off_ich8lan;
34235+ *(void **)&mac->ops.led_on = e1000_led_on_ich8lan;
34236+ *(void **)&mac->ops.led_off = e1000_led_off_ich8lan;
34237 break;
34238 case e1000_pchlan:
34239 /* ID LED init */
34240- mac->ops.id_led_init = e1000_id_led_init_pchlan;
34241+ *(void **)&mac->ops.id_led_init = e1000_id_led_init_pchlan;
34242 /* setup LED */
34243- mac->ops.setup_led = e1000_setup_led_pchlan;
34244+ *(void **)&mac->ops.setup_led = e1000_setup_led_pchlan;
34245 /* cleanup LED */
34246- mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
34247+ *(void **)&mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
34248 /* turn on/off LED */
34249- mac->ops.led_on = e1000_led_on_pchlan;
34250- mac->ops.led_off = e1000_led_off_pchlan;
34251+ *(void **)&mac->ops.led_on = e1000_led_on_pchlan;
34252+ *(void **)&mac->ops.led_off = e1000_led_off_pchlan;
34253 break;
34254 default:
34255 break;
34256@@ -3463,7 +3463,7 @@ static void e1000_clear_hw_cntrs_ich8lan
34257 }
34258 }
34259
34260-static struct e1000_mac_operations ich8_mac_ops = {
34261+static const struct e1000_mac_operations ich8_mac_ops = {
34262 .id_led_init = e1000e_id_led_init,
34263 .check_mng_mode = e1000_check_mng_mode_ich8lan,
34264 .check_for_link = e1000_check_for_copper_link_ich8lan,
34265@@ -3481,7 +3481,7 @@ static struct e1000_mac_operations ich8_
34266 /* id_led_init dependent on mac type */
34267 };
34268
34269-static struct e1000_phy_operations ich8_phy_ops = {
34270+static const struct e1000_phy_operations ich8_phy_ops = {
34271 .acquire_phy = e1000_acquire_swflag_ich8lan,
34272 .check_reset_block = e1000_check_reset_block_ich8lan,
34273 .commit_phy = NULL,
34274@@ -3497,7 +3497,7 @@ static struct e1000_phy_operations ich8_
34275 .write_phy_reg = e1000e_write_phy_reg_igp,
34276 };
34277
34278-static struct e1000_nvm_operations ich8_nvm_ops = {
34279+static const struct e1000_nvm_operations ich8_nvm_ops = {
34280 .acquire_nvm = e1000_acquire_nvm_ich8lan,
34281 .read_nvm = e1000_read_nvm_ich8lan,
34282 .release_nvm = e1000_release_nvm_ich8lan,
34283diff -urNp linux-2.6.32.43/drivers/net/e1000e/netdev.c linux-2.6.32.43/drivers/net/e1000e/netdev.c
34284--- linux-2.6.32.43/drivers/net/e1000e/netdev.c 2011-03-27 14:31:47.000000000 -0400
34285+++ linux-2.6.32.43/drivers/net/e1000e/netdev.c 2011-08-05 20:33:55.000000000 -0400
34286@@ -5071,9 +5071,9 @@ static int __devinit e1000_probe(struct
34287
34288 err = -EIO;
34289
34290- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34291- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34292- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34293+ memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34294+ memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34295+ memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34296
34297 err = ei->get_variants(adapter);
34298 if (err)
34299diff -urNp linux-2.6.32.43/drivers/net/hamradio/6pack.c linux-2.6.32.43/drivers/net/hamradio/6pack.c
34300--- linux-2.6.32.43/drivers/net/hamradio/6pack.c 2011-07-13 17:23:04.000000000 -0400
34301+++ linux-2.6.32.43/drivers/net/hamradio/6pack.c 2011-07-13 17:23:18.000000000 -0400
34302@@ -461,6 +461,8 @@ static void sixpack_receive_buf(struct t
34303 unsigned char buf[512];
34304 int count1;
34305
34306+ pax_track_stack();
34307+
34308 if (!count)
34309 return;
34310
34311diff -urNp linux-2.6.32.43/drivers/net/ibmveth.c linux-2.6.32.43/drivers/net/ibmveth.c
34312--- linux-2.6.32.43/drivers/net/ibmveth.c 2011-03-27 14:31:47.000000000 -0400
34313+++ linux-2.6.32.43/drivers/net/ibmveth.c 2011-04-17 15:56:46.000000000 -0400
34314@@ -1577,7 +1577,7 @@ static struct attribute * veth_pool_attr
34315 NULL,
34316 };
34317
34318-static struct sysfs_ops veth_pool_ops = {
34319+static const struct sysfs_ops veth_pool_ops = {
34320 .show = veth_pool_show,
34321 .store = veth_pool_store,
34322 };
34323diff -urNp linux-2.6.32.43/drivers/net/igb/e1000_82575.c linux-2.6.32.43/drivers/net/igb/e1000_82575.c
34324--- linux-2.6.32.43/drivers/net/igb/e1000_82575.c 2011-03-27 14:31:47.000000000 -0400
34325+++ linux-2.6.32.43/drivers/net/igb/e1000_82575.c 2011-08-05 20:33:55.000000000 -0400
34326@@ -135,7 +135,7 @@ static s32 igb_get_invariants_82575(stru
34327 ? true : false;
34328
34329 /* physical interface link setup */
34330- mac->ops.setup_physical_interface =
34331+ *(void **)&mac->ops.setup_physical_interface =
34332 (hw->phy.media_type == e1000_media_type_copper)
34333 ? igb_setup_copper_link_82575
34334 : igb_setup_serdes_link_82575;
34335@@ -191,13 +191,13 @@ static s32 igb_get_invariants_82575(stru
34336
34337 /* PHY function pointers */
34338 if (igb_sgmii_active_82575(hw)) {
34339- phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
34340- phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
34341- phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
34342+ *(void **)&phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
34343+ *(void **)&phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
34344+ *(void **)&phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
34345 } else {
34346- phy->ops.reset = igb_phy_hw_reset;
34347- phy->ops.read_reg = igb_read_phy_reg_igp;
34348- phy->ops.write_reg = igb_write_phy_reg_igp;
34349+ *(void **)&phy->ops.reset = igb_phy_hw_reset;
34350+ *(void **)&phy->ops.read_reg = igb_read_phy_reg_igp;
34351+ *(void **)&phy->ops.write_reg = igb_write_phy_reg_igp;
34352 }
34353
34354 /* set lan id */
34355@@ -213,17 +213,17 @@ static s32 igb_get_invariants_82575(stru
34356 switch (phy->id) {
34357 case M88E1111_I_PHY_ID:
34358 phy->type = e1000_phy_m88;
34359- phy->ops.get_phy_info = igb_get_phy_info_m88;
34360- phy->ops.get_cable_length = igb_get_cable_length_m88;
34361- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
34362+ *(void **)&phy->ops.get_phy_info = igb_get_phy_info_m88;
34363+ *(void **)&phy->ops.get_cable_length = igb_get_cable_length_m88;
34364+ *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
34365 break;
34366 case IGP03E1000_E_PHY_ID:
34367 phy->type = e1000_phy_igp_3;
34368- phy->ops.get_phy_info = igb_get_phy_info_igp;
34369- phy->ops.get_cable_length = igb_get_cable_length_igp_2;
34370- phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
34371- phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
34372- phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
34373+ *(void **)&phy->ops.get_phy_info = igb_get_phy_info_igp;
34374+ *(void **)&phy->ops.get_cable_length = igb_get_cable_length_igp_2;
34375+ *(void **)&phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
34376+ *(void **)&phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
34377+ *(void **)&phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
34378 break;
34379 default:
34380 return -E1000_ERR_PHY;
34381@@ -1410,7 +1410,7 @@ void igb_vmdq_set_replication_pf(struct
34382 wr32(E1000_VT_CTL, vt_ctl);
34383 }
34384
34385-static struct e1000_mac_operations e1000_mac_ops_82575 = {
34386+static const struct e1000_mac_operations e1000_mac_ops_82575 = {
34387 .reset_hw = igb_reset_hw_82575,
34388 .init_hw = igb_init_hw_82575,
34389 .check_for_link = igb_check_for_link_82575,
34390@@ -1419,13 +1419,13 @@ static struct e1000_mac_operations e1000
34391 .get_speed_and_duplex = igb_get_speed_and_duplex_copper,
34392 };
34393
34394-static struct e1000_phy_operations e1000_phy_ops_82575 = {
34395+static const struct e1000_phy_operations e1000_phy_ops_82575 = {
34396 .acquire = igb_acquire_phy_82575,
34397 .get_cfg_done = igb_get_cfg_done_82575,
34398 .release = igb_release_phy_82575,
34399 };
34400
34401-static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34402+static const struct e1000_nvm_operations e1000_nvm_ops_82575 = {
34403 .acquire = igb_acquire_nvm_82575,
34404 .read = igb_read_nvm_eerd,
34405 .release = igb_release_nvm_82575,
34406diff -urNp linux-2.6.32.43/drivers/net/igb/e1000_hw.h linux-2.6.32.43/drivers/net/igb/e1000_hw.h
34407--- linux-2.6.32.43/drivers/net/igb/e1000_hw.h 2011-03-27 14:31:47.000000000 -0400
34408+++ linux-2.6.32.43/drivers/net/igb/e1000_hw.h 2011-04-17 15:56:46.000000000 -0400
34409@@ -305,17 +305,17 @@ struct e1000_phy_operations {
34410 };
34411
34412 struct e1000_nvm_operations {
34413- s32 (*acquire)(struct e1000_hw *);
34414- s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
34415- void (*release)(struct e1000_hw *);
34416- s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
34417+ s32 (* const acquire)(struct e1000_hw *);
34418+ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *);
34419+ void (* const release)(struct e1000_hw *);
34420+ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *);
34421 };
34422
34423 struct e1000_info {
34424 s32 (*get_invariants)(struct e1000_hw *);
34425- struct e1000_mac_operations *mac_ops;
34426- struct e1000_phy_operations *phy_ops;
34427- struct e1000_nvm_operations *nvm_ops;
34428+ const struct e1000_mac_operations *mac_ops;
34429+ const struct e1000_phy_operations *phy_ops;
34430+ const struct e1000_nvm_operations *nvm_ops;
34431 };
34432
34433 extern const struct e1000_info e1000_82575_info;
34434diff -urNp linux-2.6.32.43/drivers/net/igb/e1000_mbx.c linux-2.6.32.43/drivers/net/igb/e1000_mbx.c
34435--- linux-2.6.32.43/drivers/net/igb/e1000_mbx.c 2011-03-27 14:31:47.000000000 -0400
34436+++ linux-2.6.32.43/drivers/net/igb/e1000_mbx.c 2011-08-05 20:33:55.000000000 -0400
34437@@ -414,13 +414,13 @@ s32 igb_init_mbx_params_pf(struct e1000_
34438
34439 mbx->size = E1000_VFMAILBOX_SIZE;
34440
34441- mbx->ops.read = igb_read_mbx_pf;
34442- mbx->ops.write = igb_write_mbx_pf;
34443- mbx->ops.read_posted = igb_read_posted_mbx;
34444- mbx->ops.write_posted = igb_write_posted_mbx;
34445- mbx->ops.check_for_msg = igb_check_for_msg_pf;
34446- mbx->ops.check_for_ack = igb_check_for_ack_pf;
34447- mbx->ops.check_for_rst = igb_check_for_rst_pf;
34448+ *(void **)&mbx->ops.read = igb_read_mbx_pf;
34449+ *(void **)&mbx->ops.write = igb_write_mbx_pf;
34450+ *(void **)&mbx->ops.read_posted = igb_read_posted_mbx;
34451+ *(void **)&mbx->ops.write_posted = igb_write_posted_mbx;
34452+ *(void **)&mbx->ops.check_for_msg = igb_check_for_msg_pf;
34453+ *(void **)&mbx->ops.check_for_ack = igb_check_for_ack_pf;
34454+ *(void **)&mbx->ops.check_for_rst = igb_check_for_rst_pf;
34455
34456 mbx->stats.msgs_tx = 0;
34457 mbx->stats.msgs_rx = 0;
34458diff -urNp linux-2.6.32.43/drivers/net/igb/igb_main.c linux-2.6.32.43/drivers/net/igb/igb_main.c
34459--- linux-2.6.32.43/drivers/net/igb/igb_main.c 2011-03-27 14:31:47.000000000 -0400
34460+++ linux-2.6.32.43/drivers/net/igb/igb_main.c 2011-08-05 20:33:55.000000000 -0400
34461@@ -1295,9 +1295,9 @@ static int __devinit igb_probe(struct pc
34462 /* setup the private structure */
34463 hw->back = adapter;
34464 /* Copy the default MAC, PHY and NVM function pointers */
34465- memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34466- memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34467- memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34468+ memcpy((void *)&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
34469+ memcpy((void *)&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
34470+ memcpy((void *)&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
34471 /* Initialize skew-specific constants */
34472 err = ei->get_invariants(hw);
34473 if (err)
34474diff -urNp linux-2.6.32.43/drivers/net/igbvf/mbx.c linux-2.6.32.43/drivers/net/igbvf/mbx.c
34475--- linux-2.6.32.43/drivers/net/igbvf/mbx.c 2011-03-27 14:31:47.000000000 -0400
34476+++ linux-2.6.32.43/drivers/net/igbvf/mbx.c 2011-08-05 20:33:55.000000000 -0400
34477@@ -331,13 +331,13 @@ s32 e1000_init_mbx_params_vf(struct e100
34478
34479 mbx->size = E1000_VFMAILBOX_SIZE;
34480
34481- mbx->ops.read = e1000_read_mbx_vf;
34482- mbx->ops.write = e1000_write_mbx_vf;
34483- mbx->ops.read_posted = e1000_read_posted_mbx;
34484- mbx->ops.write_posted = e1000_write_posted_mbx;
34485- mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34486- mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34487- mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34488+ *(void **)&mbx->ops.read = e1000_read_mbx_vf;
34489+ *(void **)&mbx->ops.write = e1000_write_mbx_vf;
34490+ *(void **)&mbx->ops.read_posted = e1000_read_posted_mbx;
34491+ *(void **)&mbx->ops.write_posted = e1000_write_posted_mbx;
34492+ *(void **)&mbx->ops.check_for_msg = e1000_check_for_msg_vf;
34493+ *(void **)&mbx->ops.check_for_ack = e1000_check_for_ack_vf;
34494+ *(void **)&mbx->ops.check_for_rst = e1000_check_for_rst_vf;
34495
34496 mbx->stats.msgs_tx = 0;
34497 mbx->stats.msgs_rx = 0;
34498diff -urNp linux-2.6.32.43/drivers/net/igbvf/vf.c linux-2.6.32.43/drivers/net/igbvf/vf.c
34499--- linux-2.6.32.43/drivers/net/igbvf/vf.c 2011-03-27 14:31:47.000000000 -0400
34500+++ linux-2.6.32.43/drivers/net/igbvf/vf.c 2011-08-05 20:33:55.000000000 -0400
34501@@ -55,21 +55,21 @@ static s32 e1000_init_mac_params_vf(stru
34502
34503 /* Function pointers */
34504 /* reset */
34505- mac->ops.reset_hw = e1000_reset_hw_vf;
34506+ *(void **)&mac->ops.reset_hw = e1000_reset_hw_vf;
34507 /* hw initialization */
34508- mac->ops.init_hw = e1000_init_hw_vf;
34509+ *(void **)&mac->ops.init_hw = e1000_init_hw_vf;
34510 /* check for link */
34511- mac->ops.check_for_link = e1000_check_for_link_vf;
34512+ *(void **)&mac->ops.check_for_link = e1000_check_for_link_vf;
34513 /* link info */
34514- mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34515+ *(void **)&mac->ops.get_link_up_info = e1000_get_link_up_info_vf;
34516 /* multicast address update */
34517- mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34518+ *(void **)&mac->ops.update_mc_addr_list = e1000_update_mc_addr_list_vf;
34519 /* set mac address */
34520- mac->ops.rar_set = e1000_rar_set_vf;
34521+ *(void **)&mac->ops.rar_set = e1000_rar_set_vf;
34522 /* read mac address */
34523- mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34524+ *(void **)&mac->ops.read_mac_addr = e1000_read_mac_addr_vf;
34525 /* set vlan filter table array */
34526- mac->ops.set_vfta = e1000_set_vfta_vf;
34527+ *(void **)&mac->ops.set_vfta = e1000_set_vfta_vf;
34528
34529 return E1000_SUCCESS;
34530 }
34531@@ -80,8 +80,8 @@ static s32 e1000_init_mac_params_vf(stru
34532 **/
34533 void e1000_init_function_pointers_vf(struct e1000_hw *hw)
34534 {
34535- hw->mac.ops.init_params = e1000_init_mac_params_vf;
34536- hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34537+ *(void **)&hw->mac.ops.init_params = e1000_init_mac_params_vf;
34538+ *(void **)&hw->mbx.ops.init_params = e1000_init_mbx_params_vf;
34539 }
34540
34541 /**
34542diff -urNp linux-2.6.32.43/drivers/net/iseries_veth.c linux-2.6.32.43/drivers/net/iseries_veth.c
34543--- linux-2.6.32.43/drivers/net/iseries_veth.c 2011-03-27 14:31:47.000000000 -0400
34544+++ linux-2.6.32.43/drivers/net/iseries_veth.c 2011-04-17 15:56:46.000000000 -0400
34545@@ -384,7 +384,7 @@ static struct attribute *veth_cnx_defaul
34546 NULL
34547 };
34548
34549-static struct sysfs_ops veth_cnx_sysfs_ops = {
34550+static const struct sysfs_ops veth_cnx_sysfs_ops = {
34551 .show = veth_cnx_attribute_show
34552 };
34553
34554@@ -441,7 +441,7 @@ static struct attribute *veth_port_defau
34555 NULL
34556 };
34557
34558-static struct sysfs_ops veth_port_sysfs_ops = {
34559+static const struct sysfs_ops veth_port_sysfs_ops = {
34560 .show = veth_port_attribute_show
34561 };
34562
34563diff -urNp linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c
34564--- linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c 2011-03-27 14:31:47.000000000 -0400
34565+++ linux-2.6.32.43/drivers/net/ixgb/ixgb_main.c 2011-05-16 21:46:57.000000000 -0400
34566@@ -1052,6 +1052,8 @@ ixgb_set_multi(struct net_device *netdev
34567 u32 rctl;
34568 int i;
34569
34570+ pax_track_stack();
34571+
34572 /* Check for Promiscuous and All Multicast modes */
34573
34574 rctl = IXGB_READ_REG(hw, RCTL);
34575diff -urNp linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c
34576--- linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c 2011-03-27 14:31:47.000000000 -0400
34577+++ linux-2.6.32.43/drivers/net/ixgb/ixgb_param.c 2011-05-16 21:46:57.000000000 -0400
34578@@ -260,6 +260,9 @@ void __devinit
34579 ixgb_check_options(struct ixgb_adapter *adapter)
34580 {
34581 int bd = adapter->bd_number;
34582+
34583+ pax_track_stack();
34584+
34585 if (bd >= IXGB_MAX_NIC) {
34586 printk(KERN_NOTICE
34587 "Warning: no configuration for board #%i\n", bd);
34588diff -urNp linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82598.c linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82598.c
34589--- linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82598.c 2011-03-27 14:31:47.000000000 -0400
34590+++ linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82598.c 2011-08-05 20:33:55.000000000 -0400
34591@@ -154,19 +154,19 @@ static s32 ixgbe_init_phy_ops_82598(stru
34592
34593 /* Overwrite the link function pointers if copper PHY */
34594 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34595- mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34596- mac->ops.get_link_capabilities =
34597+ *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82598;
34598+ *(void **)&mac->ops.get_link_capabilities =
34599 &ixgbe_get_copper_link_capabilities_82598;
34600 }
34601
34602 switch (hw->phy.type) {
34603 case ixgbe_phy_tn:
34604- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34605- phy->ops.get_firmware_version =
34606+ *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34607+ *(void **)&phy->ops.get_firmware_version =
34608 &ixgbe_get_phy_firmware_version_tnx;
34609 break;
34610 case ixgbe_phy_nl:
34611- phy->ops.reset = &ixgbe_reset_phy_nl;
34612+ *(void **)&phy->ops.reset = &ixgbe_reset_phy_nl;
34613
34614 /* Call SFP+ identify routine to get the SFP+ module type */
34615 ret_val = phy->ops.identify_sfp(hw);
34616diff -urNp linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82599.c linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82599.c
34617--- linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82599.c 2011-03-27 14:31:47.000000000 -0400
34618+++ linux-2.6.32.43/drivers/net/ixgbe/ixgbe_82599.c 2011-08-05 20:33:55.000000000 -0400
34619@@ -62,9 +62,9 @@ static void ixgbe_init_mac_link_ops_8259
34620 struct ixgbe_mac_info *mac = &hw->mac;
34621 if (hw->phy.multispeed_fiber) {
34622 /* Set up dual speed SFP+ support */
34623- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34624+ *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
34625 } else {
34626- mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34627+ *(void **)&mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
34628 }
34629 }
34630
34631@@ -76,7 +76,7 @@ static s32 ixgbe_setup_sfp_modules_82599
34632 if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) {
34633 ixgbe_init_mac_link_ops_82599(hw);
34634
34635- hw->phy.ops.reset = NULL;
34636+ *(void **)&hw->phy.ops.reset = NULL;
34637
34638 ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset,
34639 &data_offset);
34640@@ -171,16 +171,16 @@ static s32 ixgbe_init_phy_ops_82599(stru
34641
34642 /* If copper media, overwrite with copper function pointers */
34643 if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) {
34644- mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34645- mac->ops.get_link_capabilities =
34646+ *(void **)&mac->ops.setup_link = &ixgbe_setup_copper_link_82599;
34647+ *(void **)&mac->ops.get_link_capabilities =
34648 &ixgbe_get_copper_link_capabilities_82599;
34649 }
34650
34651 /* Set necessary function pointers based on phy type */
34652 switch (hw->phy.type) {
34653 case ixgbe_phy_tn:
34654- phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34655- phy->ops.get_firmware_version =
34656+ *(void **)&phy->ops.check_link = &ixgbe_check_phy_link_tnx;
34657+ *(void **)&phy->ops.get_firmware_version =
34658 &ixgbe_get_phy_firmware_version_tnx;
34659 break;
34660 default:
34661diff -urNp linux-2.6.32.43/drivers/net/ixgbe/ixgbe_main.c linux-2.6.32.43/drivers/net/ixgbe/ixgbe_main.c
34662--- linux-2.6.32.43/drivers/net/ixgbe/ixgbe_main.c 2011-03-27 14:31:47.000000000 -0400
34663+++ linux-2.6.32.43/drivers/net/ixgbe/ixgbe_main.c 2011-08-05 20:33:55.000000000 -0400
34664@@ -5638,18 +5638,18 @@ static int __devinit ixgbe_probe(struct
34665 adapter->bd_number = cards_found;
34666
34667 /* Setup hw api */
34668- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34669+ memcpy((void *)&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops));
34670 hw->mac.type = ii->mac;
34671
34672 /* EEPROM */
34673- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34674+ memcpy((void *)&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops));
34675 eec = IXGBE_READ_REG(hw, IXGBE_EEC);
34676 /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */
34677 if (!(eec & (1 << 8)))
34678- hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34679+ *(void **)&hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic;
34680
34681 /* PHY */
34682- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34683+ memcpy((void *)&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops));
34684 hw->phy.sfp_type = ixgbe_sfp_type_unknown;
34685 /* ixgbe_identify_phy_generic will set prtad and mmds properly */
34686 hw->phy.mdio.prtad = MDIO_PRTAD_NONE;
34687diff -urNp linux-2.6.32.43/drivers/net/mlx4/main.c linux-2.6.32.43/drivers/net/mlx4/main.c
34688--- linux-2.6.32.43/drivers/net/mlx4/main.c 2011-03-27 14:31:47.000000000 -0400
34689+++ linux-2.6.32.43/drivers/net/mlx4/main.c 2011-05-18 20:09:37.000000000 -0400
34690@@ -38,6 +38,7 @@
34691 #include <linux/errno.h>
34692 #include <linux/pci.h>
34693 #include <linux/dma-mapping.h>
34694+#include <linux/sched.h>
34695
34696 #include <linux/mlx4/device.h>
34697 #include <linux/mlx4/doorbell.h>
34698@@ -730,6 +731,8 @@ static int mlx4_init_hca(struct mlx4_dev
34699 u64 icm_size;
34700 int err;
34701
34702+ pax_track_stack();
34703+
34704 err = mlx4_QUERY_FW(dev);
34705 if (err) {
34706 if (err == -EACCES)
34707diff -urNp linux-2.6.32.43/drivers/net/niu.c linux-2.6.32.43/drivers/net/niu.c
34708--- linux-2.6.32.43/drivers/net/niu.c 2011-05-10 22:12:01.000000000 -0400
34709+++ linux-2.6.32.43/drivers/net/niu.c 2011-05-16 21:46:57.000000000 -0400
34710@@ -9128,6 +9128,8 @@ static void __devinit niu_try_msix(struc
34711 int i, num_irqs, err;
34712 u8 first_ldg;
34713
34714+ pax_track_stack();
34715+
34716 first_ldg = (NIU_NUM_LDG / parent->num_ports) * np->port;
34717 for (i = 0; i < (NIU_NUM_LDG / parent->num_ports); i++)
34718 ldg_num_map[i] = first_ldg + i;
34719diff -urNp linux-2.6.32.43/drivers/net/pcnet32.c linux-2.6.32.43/drivers/net/pcnet32.c
34720--- linux-2.6.32.43/drivers/net/pcnet32.c 2011-03-27 14:31:47.000000000 -0400
34721+++ linux-2.6.32.43/drivers/net/pcnet32.c 2011-08-05 20:33:55.000000000 -0400
34722@@ -79,7 +79,7 @@ static int cards_found;
34723 /*
34724 * VLB I/O addresses
34725 */
34726-static unsigned int pcnet32_portlist[] __initdata =
34727+static unsigned int pcnet32_portlist[] __devinitdata =
34728 { 0x300, 0x320, 0x340, 0x360, 0 };
34729
34730 static int pcnet32_debug = 0;
34731@@ -267,7 +267,7 @@ struct pcnet32_private {
34732 struct sk_buff **rx_skbuff;
34733 dma_addr_t *tx_dma_addr;
34734 dma_addr_t *rx_dma_addr;
34735- struct pcnet32_access a;
34736+ struct pcnet32_access *a;
34737 spinlock_t lock; /* Guard lock */
34738 unsigned int cur_rx, cur_tx; /* The next free ring entry */
34739 unsigned int rx_ring_size; /* current rx ring size */
34740@@ -457,9 +457,9 @@ static void pcnet32_netif_start(struct n
34741 u16 val;
34742
34743 netif_wake_queue(dev);
34744- val = lp->a.read_csr(ioaddr, CSR3);
34745+ val = lp->a->read_csr(ioaddr, CSR3);
34746 val &= 0x00ff;
34747- lp->a.write_csr(ioaddr, CSR3, val);
34748+ lp->a->write_csr(ioaddr, CSR3, val);
34749 napi_enable(&lp->napi);
34750 }
34751
34752@@ -744,7 +744,7 @@ static u32 pcnet32_get_link(struct net_d
34753 r = mii_link_ok(&lp->mii_if);
34754 } else if (lp->chip_version >= PCNET32_79C970A) {
34755 ulong ioaddr = dev->base_addr; /* card base I/O address */
34756- r = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
34757+ r = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
34758 } else { /* can not detect link on really old chips */
34759 r = 1;
34760 }
34761@@ -806,7 +806,7 @@ static int pcnet32_set_ringparam(struct
34762 pcnet32_netif_stop(dev);
34763
34764 spin_lock_irqsave(&lp->lock, flags);
34765- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34766+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34767
34768 size = min(ering->tx_pending, (unsigned int)TX_MAX_RING_SIZE);
34769
34770@@ -886,7 +886,7 @@ static void pcnet32_ethtool_test(struct
34771 static int pcnet32_loopback_test(struct net_device *dev, uint64_t * data1)
34772 {
34773 struct pcnet32_private *lp = netdev_priv(dev);
34774- struct pcnet32_access *a = &lp->a; /* access to registers */
34775+ struct pcnet32_access *a = lp->a; /* access to registers */
34776 ulong ioaddr = dev->base_addr; /* card base I/O address */
34777 struct sk_buff *skb; /* sk buff */
34778 int x, i; /* counters */
34779@@ -906,21 +906,21 @@ static int pcnet32_loopback_test(struct
34780 pcnet32_netif_stop(dev);
34781
34782 spin_lock_irqsave(&lp->lock, flags);
34783- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34784+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* stop the chip */
34785
34786 numbuffs = min(numbuffs, (int)min(lp->rx_ring_size, lp->tx_ring_size));
34787
34788 /* Reset the PCNET32 */
34789- lp->a.reset(ioaddr);
34790- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34791+ lp->a->reset(ioaddr);
34792+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34793
34794 /* switch pcnet32 to 32bit mode */
34795- lp->a.write_bcr(ioaddr, 20, 2);
34796+ lp->a->write_bcr(ioaddr, 20, 2);
34797
34798 /* purge & init rings but don't actually restart */
34799 pcnet32_restart(dev, 0x0000);
34800
34801- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34802+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34803
34804 /* Initialize Transmit buffers. */
34805 size = data_len + 15;
34806@@ -966,10 +966,10 @@ static int pcnet32_loopback_test(struct
34807
34808 /* set int loopback in CSR15 */
34809 x = a->read_csr(ioaddr, CSR15) & 0xfffc;
34810- lp->a.write_csr(ioaddr, CSR15, x | 0x0044);
34811+ lp->a->write_csr(ioaddr, CSR15, x | 0x0044);
34812
34813 teststatus = cpu_to_le16(0x8000);
34814- lp->a.write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34815+ lp->a->write_csr(ioaddr, CSR0, CSR0_START); /* Set STRT bit */
34816
34817 /* Check status of descriptors */
34818 for (x = 0; x < numbuffs; x++) {
34819@@ -990,7 +990,7 @@ static int pcnet32_loopback_test(struct
34820 }
34821 }
34822
34823- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34824+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP); /* Set STOP bit */
34825 wmb();
34826 if (netif_msg_hw(lp) && netif_msg_pktdata(lp)) {
34827 printk(KERN_DEBUG "%s: RX loopback packets:\n", dev->name);
34828@@ -1039,7 +1039,7 @@ static int pcnet32_loopback_test(struct
34829 pcnet32_restart(dev, CSR0_NORMAL);
34830 } else {
34831 pcnet32_purge_rx_ring(dev);
34832- lp->a.write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34833+ lp->a->write_bcr(ioaddr, 20, 4); /* return to 16bit mode */
34834 }
34835 spin_unlock_irqrestore(&lp->lock, flags);
34836
34837@@ -1049,7 +1049,7 @@ static int pcnet32_loopback_test(struct
34838 static void pcnet32_led_blink_callback(struct net_device *dev)
34839 {
34840 struct pcnet32_private *lp = netdev_priv(dev);
34841- struct pcnet32_access *a = &lp->a;
34842+ struct pcnet32_access *a = lp->a;
34843 ulong ioaddr = dev->base_addr;
34844 unsigned long flags;
34845 int i;
34846@@ -1066,7 +1066,7 @@ static void pcnet32_led_blink_callback(s
34847 static int pcnet32_phys_id(struct net_device *dev, u32 data)
34848 {
34849 struct pcnet32_private *lp = netdev_priv(dev);
34850- struct pcnet32_access *a = &lp->a;
34851+ struct pcnet32_access *a = lp->a;
34852 ulong ioaddr = dev->base_addr;
34853 unsigned long flags;
34854 int i, regs[4];
34855@@ -1112,7 +1112,7 @@ static int pcnet32_suspend(struct net_de
34856 {
34857 int csr5;
34858 struct pcnet32_private *lp = netdev_priv(dev);
34859- struct pcnet32_access *a = &lp->a;
34860+ struct pcnet32_access *a = lp->a;
34861 ulong ioaddr = dev->base_addr;
34862 int ticks;
34863
34864@@ -1388,8 +1388,8 @@ static int pcnet32_poll(struct napi_stru
34865 spin_lock_irqsave(&lp->lock, flags);
34866 if (pcnet32_tx(dev)) {
34867 /* reset the chip to clear the error condition, then restart */
34868- lp->a.reset(ioaddr);
34869- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34870+ lp->a->reset(ioaddr);
34871+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
34872 pcnet32_restart(dev, CSR0_START);
34873 netif_wake_queue(dev);
34874 }
34875@@ -1401,12 +1401,12 @@ static int pcnet32_poll(struct napi_stru
34876 __napi_complete(napi);
34877
34878 /* clear interrupt masks */
34879- val = lp->a.read_csr(ioaddr, CSR3);
34880+ val = lp->a->read_csr(ioaddr, CSR3);
34881 val &= 0x00ff;
34882- lp->a.write_csr(ioaddr, CSR3, val);
34883+ lp->a->write_csr(ioaddr, CSR3, val);
34884
34885 /* Set interrupt enable. */
34886- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN);
34887+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN);
34888
34889 spin_unlock_irqrestore(&lp->lock, flags);
34890 }
34891@@ -1429,7 +1429,7 @@ static void pcnet32_get_regs(struct net_
34892 int i, csr0;
34893 u16 *buff = ptr;
34894 struct pcnet32_private *lp = netdev_priv(dev);
34895- struct pcnet32_access *a = &lp->a;
34896+ struct pcnet32_access *a = lp->a;
34897 ulong ioaddr = dev->base_addr;
34898 unsigned long flags;
34899
34900@@ -1466,9 +1466,9 @@ static void pcnet32_get_regs(struct net_
34901 for (j = 0; j < PCNET32_MAX_PHYS; j++) {
34902 if (lp->phymask & (1 << j)) {
34903 for (i = 0; i < PCNET32_REGS_PER_PHY; i++) {
34904- lp->a.write_bcr(ioaddr, 33,
34905+ lp->a->write_bcr(ioaddr, 33,
34906 (j << 5) | i);
34907- *buff++ = lp->a.read_bcr(ioaddr, 34);
34908+ *buff++ = lp->a->read_bcr(ioaddr, 34);
34909 }
34910 }
34911 }
34912@@ -1858,7 +1858,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34913 ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
34914 lp->options |= PCNET32_PORT_FD;
34915
34916- lp->a = *a;
34917+ lp->a = a;
34918
34919 /* prior to register_netdev, dev->name is not yet correct */
34920 if (pcnet32_alloc_ring(dev, pci_name(lp->pci_dev))) {
34921@@ -1917,7 +1917,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34922 if (lp->mii) {
34923 /* lp->phycount and lp->phymask are set to 0 by memset above */
34924
34925- lp->mii_if.phy_id = ((lp->a.read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34926+ lp->mii_if.phy_id = ((lp->a->read_bcr(ioaddr, 33)) >> 5) & 0x1f;
34927 /* scan for PHYs */
34928 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
34929 unsigned short id1, id2;
34930@@ -1938,7 +1938,7 @@ pcnet32_probe1(unsigned long ioaddr, int
34931 "Found PHY %04x:%04x at address %d.\n",
34932 id1, id2, i);
34933 }
34934- lp->a.write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34935+ lp->a->write_bcr(ioaddr, 33, (lp->mii_if.phy_id) << 5);
34936 if (lp->phycount > 1) {
34937 lp->options |= PCNET32_PORT_MII;
34938 }
34939@@ -2109,10 +2109,10 @@ static int pcnet32_open(struct net_devic
34940 }
34941
34942 /* Reset the PCNET32 */
34943- lp->a.reset(ioaddr);
34944+ lp->a->reset(ioaddr);
34945
34946 /* switch pcnet32 to 32bit mode */
34947- lp->a.write_bcr(ioaddr, 20, 2);
34948+ lp->a->write_bcr(ioaddr, 20, 2);
34949
34950 if (netif_msg_ifup(lp))
34951 printk(KERN_DEBUG
34952@@ -2122,14 +2122,14 @@ static int pcnet32_open(struct net_devic
34953 (u32) (lp->init_dma_addr));
34954
34955 /* set/reset autoselect bit */
34956- val = lp->a.read_bcr(ioaddr, 2) & ~2;
34957+ val = lp->a->read_bcr(ioaddr, 2) & ~2;
34958 if (lp->options & PCNET32_PORT_ASEL)
34959 val |= 2;
34960- lp->a.write_bcr(ioaddr, 2, val);
34961+ lp->a->write_bcr(ioaddr, 2, val);
34962
34963 /* handle full duplex setting */
34964 if (lp->mii_if.full_duplex) {
34965- val = lp->a.read_bcr(ioaddr, 9) & ~3;
34966+ val = lp->a->read_bcr(ioaddr, 9) & ~3;
34967 if (lp->options & PCNET32_PORT_FD) {
34968 val |= 1;
34969 if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
34970@@ -2139,14 +2139,14 @@ static int pcnet32_open(struct net_devic
34971 if (lp->chip_version == 0x2627)
34972 val |= 3;
34973 }
34974- lp->a.write_bcr(ioaddr, 9, val);
34975+ lp->a->write_bcr(ioaddr, 9, val);
34976 }
34977
34978 /* set/reset GPSI bit in test register */
34979- val = lp->a.read_csr(ioaddr, 124) & ~0x10;
34980+ val = lp->a->read_csr(ioaddr, 124) & ~0x10;
34981 if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
34982 val |= 0x10;
34983- lp->a.write_csr(ioaddr, 124, val);
34984+ lp->a->write_csr(ioaddr, 124, val);
34985
34986 /* Allied Telesyn AT 2700/2701 FX are 100Mbit only and do not negotiate */
34987 if (pdev && pdev->subsystem_vendor == PCI_VENDOR_ID_AT &&
34988@@ -2167,24 +2167,24 @@ static int pcnet32_open(struct net_devic
34989 * duplex, and/or enable auto negotiation, and clear DANAS
34990 */
34991 if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
34992- lp->a.write_bcr(ioaddr, 32,
34993- lp->a.read_bcr(ioaddr, 32) | 0x0080);
34994+ lp->a->write_bcr(ioaddr, 32,
34995+ lp->a->read_bcr(ioaddr, 32) | 0x0080);
34996 /* disable Auto Negotiation, set 10Mpbs, HD */
34997- val = lp->a.read_bcr(ioaddr, 32) & ~0xb8;
34998+ val = lp->a->read_bcr(ioaddr, 32) & ~0xb8;
34999 if (lp->options & PCNET32_PORT_FD)
35000 val |= 0x10;
35001 if (lp->options & PCNET32_PORT_100)
35002 val |= 0x08;
35003- lp->a.write_bcr(ioaddr, 32, val);
35004+ lp->a->write_bcr(ioaddr, 32, val);
35005 } else {
35006 if (lp->options & PCNET32_PORT_ASEL) {
35007- lp->a.write_bcr(ioaddr, 32,
35008- lp->a.read_bcr(ioaddr,
35009+ lp->a->write_bcr(ioaddr, 32,
35010+ lp->a->read_bcr(ioaddr,
35011 32) | 0x0080);
35012 /* enable auto negotiate, setup, disable fd */
35013- val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
35014+ val = lp->a->read_bcr(ioaddr, 32) & ~0x98;
35015 val |= 0x20;
35016- lp->a.write_bcr(ioaddr, 32, val);
35017+ lp->a->write_bcr(ioaddr, 32, val);
35018 }
35019 }
35020 } else {
35021@@ -2197,10 +2197,10 @@ static int pcnet32_open(struct net_devic
35022 * There is really no good other way to handle multiple PHYs
35023 * other than turning off all automatics
35024 */
35025- val = lp->a.read_bcr(ioaddr, 2);
35026- lp->a.write_bcr(ioaddr, 2, val & ~2);
35027- val = lp->a.read_bcr(ioaddr, 32);
35028- lp->a.write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
35029+ val = lp->a->read_bcr(ioaddr, 2);
35030+ lp->a->write_bcr(ioaddr, 2, val & ~2);
35031+ val = lp->a->read_bcr(ioaddr, 32);
35032+ lp->a->write_bcr(ioaddr, 32, val & ~(1 << 7)); /* stop MII manager */
35033
35034 if (!(lp->options & PCNET32_PORT_ASEL)) {
35035 /* setup ecmd */
35036@@ -2210,7 +2210,7 @@ static int pcnet32_open(struct net_devic
35037 ecmd.speed =
35038 lp->
35039 options & PCNET32_PORT_100 ? SPEED_100 : SPEED_10;
35040- bcr9 = lp->a.read_bcr(ioaddr, 9);
35041+ bcr9 = lp->a->read_bcr(ioaddr, 9);
35042
35043 if (lp->options & PCNET32_PORT_FD) {
35044 ecmd.duplex = DUPLEX_FULL;
35045@@ -2219,7 +2219,7 @@ static int pcnet32_open(struct net_devic
35046 ecmd.duplex = DUPLEX_HALF;
35047 bcr9 |= ~(1 << 0);
35048 }
35049- lp->a.write_bcr(ioaddr, 9, bcr9);
35050+ lp->a->write_bcr(ioaddr, 9, bcr9);
35051 }
35052
35053 for (i = 0; i < PCNET32_MAX_PHYS; i++) {
35054@@ -2252,9 +2252,9 @@ static int pcnet32_open(struct net_devic
35055
35056 #ifdef DO_DXSUFLO
35057 if (lp->dxsuflo) { /* Disable transmit stop on underflow */
35058- val = lp->a.read_csr(ioaddr, CSR3);
35059+ val = lp->a->read_csr(ioaddr, CSR3);
35060 val |= 0x40;
35061- lp->a.write_csr(ioaddr, CSR3, val);
35062+ lp->a->write_csr(ioaddr, CSR3, val);
35063 }
35064 #endif
35065
35066@@ -2270,11 +2270,11 @@ static int pcnet32_open(struct net_devic
35067 napi_enable(&lp->napi);
35068
35069 /* Re-initialize the PCNET32, and start it when done. */
35070- lp->a.write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
35071- lp->a.write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
35072+ lp->a->write_csr(ioaddr, 1, (lp->init_dma_addr & 0xffff));
35073+ lp->a->write_csr(ioaddr, 2, (lp->init_dma_addr >> 16));
35074
35075- lp->a.write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35076- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
35077+ lp->a->write_csr(ioaddr, CSR4, 0x0915); /* auto tx pad */
35078+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
35079
35080 netif_start_queue(dev);
35081
35082@@ -2286,20 +2286,20 @@ static int pcnet32_open(struct net_devic
35083
35084 i = 0;
35085 while (i++ < 100)
35086- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
35087+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
35088 break;
35089 /*
35090 * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
35091 * reports that doing so triggers a bug in the '974.
35092 */
35093- lp->a.write_csr(ioaddr, CSR0, CSR0_NORMAL);
35094+ lp->a->write_csr(ioaddr, CSR0, CSR0_NORMAL);
35095
35096 if (netif_msg_ifup(lp))
35097 printk(KERN_DEBUG
35098 "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
35099 dev->name, i,
35100 (u32) (lp->init_dma_addr),
35101- lp->a.read_csr(ioaddr, CSR0));
35102+ lp->a->read_csr(ioaddr, CSR0));
35103
35104 spin_unlock_irqrestore(&lp->lock, flags);
35105
35106@@ -2313,7 +2313,7 @@ static int pcnet32_open(struct net_devic
35107 * Switch back to 16bit mode to avoid problems with dumb
35108 * DOS packet driver after a warm reboot
35109 */
35110- lp->a.write_bcr(ioaddr, 20, 4);
35111+ lp->a->write_bcr(ioaddr, 20, 4);
35112
35113 err_free_irq:
35114 spin_unlock_irqrestore(&lp->lock, flags);
35115@@ -2420,7 +2420,7 @@ static void pcnet32_restart(struct net_d
35116
35117 /* wait for stop */
35118 for (i = 0; i < 100; i++)
35119- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_STOP)
35120+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_STOP)
35121 break;
35122
35123 if (i >= 100 && netif_msg_drv(lp))
35124@@ -2433,13 +2433,13 @@ static void pcnet32_restart(struct net_d
35125 return;
35126
35127 /* ReInit Ring */
35128- lp->a.write_csr(ioaddr, CSR0, CSR0_INIT);
35129+ lp->a->write_csr(ioaddr, CSR0, CSR0_INIT);
35130 i = 0;
35131 while (i++ < 1000)
35132- if (lp->a.read_csr(ioaddr, CSR0) & CSR0_IDON)
35133+ if (lp->a->read_csr(ioaddr, CSR0) & CSR0_IDON)
35134 break;
35135
35136- lp->a.write_csr(ioaddr, CSR0, csr0_bits);
35137+ lp->a->write_csr(ioaddr, CSR0, csr0_bits);
35138 }
35139
35140 static void pcnet32_tx_timeout(struct net_device *dev)
35141@@ -2452,8 +2452,8 @@ static void pcnet32_tx_timeout(struct ne
35142 if (pcnet32_debug & NETIF_MSG_DRV)
35143 printk(KERN_ERR
35144 "%s: transmit timed out, status %4.4x, resetting.\n",
35145- dev->name, lp->a.read_csr(ioaddr, CSR0));
35146- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35147+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35148+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35149 dev->stats.tx_errors++;
35150 if (netif_msg_tx_err(lp)) {
35151 int i;
35152@@ -2497,7 +2497,7 @@ static netdev_tx_t pcnet32_start_xmit(st
35153 if (netif_msg_tx_queued(lp)) {
35154 printk(KERN_DEBUG
35155 "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
35156- dev->name, lp->a.read_csr(ioaddr, CSR0));
35157+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35158 }
35159
35160 /* Default status -- will not enable Successful-TxDone
35161@@ -2528,7 +2528,7 @@ static netdev_tx_t pcnet32_start_xmit(st
35162 dev->stats.tx_bytes += skb->len;
35163
35164 /* Trigger an immediate send poll. */
35165- lp->a.write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
35166+ lp->a->write_csr(ioaddr, CSR0, CSR0_INTEN | CSR0_TXPOLL);
35167
35168 dev->trans_start = jiffies;
35169
35170@@ -2555,18 +2555,18 @@ pcnet32_interrupt(int irq, void *dev_id)
35171
35172 spin_lock(&lp->lock);
35173
35174- csr0 = lp->a.read_csr(ioaddr, CSR0);
35175+ csr0 = lp->a->read_csr(ioaddr, CSR0);
35176 while ((csr0 & 0x8f00) && --boguscnt >= 0) {
35177 if (csr0 == 0xffff) {
35178 break; /* PCMCIA remove happened */
35179 }
35180 /* Acknowledge all of the current interrupt sources ASAP. */
35181- lp->a.write_csr(ioaddr, CSR0, csr0 & ~0x004f);
35182+ lp->a->write_csr(ioaddr, CSR0, csr0 & ~0x004f);
35183
35184 if (netif_msg_intr(lp))
35185 printk(KERN_DEBUG
35186 "%s: interrupt csr0=%#2.2x new csr=%#2.2x.\n",
35187- dev->name, csr0, lp->a.read_csr(ioaddr, CSR0));
35188+ dev->name, csr0, lp->a->read_csr(ioaddr, CSR0));
35189
35190 /* Log misc errors. */
35191 if (csr0 & 0x4000)
35192@@ -2595,19 +2595,19 @@ pcnet32_interrupt(int irq, void *dev_id)
35193 if (napi_schedule_prep(&lp->napi)) {
35194 u16 val;
35195 /* set interrupt masks */
35196- val = lp->a.read_csr(ioaddr, CSR3);
35197+ val = lp->a->read_csr(ioaddr, CSR3);
35198 val |= 0x5f00;
35199- lp->a.write_csr(ioaddr, CSR3, val);
35200+ lp->a->write_csr(ioaddr, CSR3, val);
35201
35202 __napi_schedule(&lp->napi);
35203 break;
35204 }
35205- csr0 = lp->a.read_csr(ioaddr, CSR0);
35206+ csr0 = lp->a->read_csr(ioaddr, CSR0);
35207 }
35208
35209 if (netif_msg_intr(lp))
35210 printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
35211- dev->name, lp->a.read_csr(ioaddr, CSR0));
35212+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35213
35214 spin_unlock(&lp->lock);
35215
35216@@ -2627,21 +2627,21 @@ static int pcnet32_close(struct net_devi
35217
35218 spin_lock_irqsave(&lp->lock, flags);
35219
35220- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
35221+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
35222
35223 if (netif_msg_ifdown(lp))
35224 printk(KERN_DEBUG
35225 "%s: Shutting down ethercard, status was %2.2x.\n",
35226- dev->name, lp->a.read_csr(ioaddr, CSR0));
35227+ dev->name, lp->a->read_csr(ioaddr, CSR0));
35228
35229 /* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
35230- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35231+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35232
35233 /*
35234 * Switch back to 16bit mode to avoid problems with dumb
35235 * DOS packet driver after a warm reboot
35236 */
35237- lp->a.write_bcr(ioaddr, 20, 4);
35238+ lp->a->write_bcr(ioaddr, 20, 4);
35239
35240 spin_unlock_irqrestore(&lp->lock, flags);
35241
35242@@ -2664,7 +2664,7 @@ static struct net_device_stats *pcnet32_
35243 unsigned long flags;
35244
35245 spin_lock_irqsave(&lp->lock, flags);
35246- dev->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
35247+ dev->stats.rx_missed_errors = lp->a->read_csr(ioaddr, 112);
35248 spin_unlock_irqrestore(&lp->lock, flags);
35249
35250 return &dev->stats;
35251@@ -2686,10 +2686,10 @@ static void pcnet32_load_multicast(struc
35252 if (dev->flags & IFF_ALLMULTI) {
35253 ib->filter[0] = cpu_to_le32(~0U);
35254 ib->filter[1] = cpu_to_le32(~0U);
35255- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
35256- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
35257- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
35258- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
35259+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER, 0xffff);
35260+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+1, 0xffff);
35261+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+2, 0xffff);
35262+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER+3, 0xffff);
35263 return;
35264 }
35265 /* clear the multicast filter */
35266@@ -2710,7 +2710,7 @@ static void pcnet32_load_multicast(struc
35267 mcast_table[crc >> 4] |= cpu_to_le16(1 << (crc & 0xf));
35268 }
35269 for (i = 0; i < 4; i++)
35270- lp->a.write_csr(ioaddr, PCNET32_MC_FILTER + i,
35271+ lp->a->write_csr(ioaddr, PCNET32_MC_FILTER + i,
35272 le16_to_cpu(mcast_table[i]));
35273 return;
35274 }
35275@@ -2726,7 +2726,7 @@ static void pcnet32_set_multicast_list(s
35276
35277 spin_lock_irqsave(&lp->lock, flags);
35278 suspended = pcnet32_suspend(dev, &flags, 0);
35279- csr15 = lp->a.read_csr(ioaddr, CSR15);
35280+ csr15 = lp->a->read_csr(ioaddr, CSR15);
35281 if (dev->flags & IFF_PROMISC) {
35282 /* Log any net taps. */
35283 if (netif_msg_hw(lp))
35284@@ -2735,21 +2735,21 @@ static void pcnet32_set_multicast_list(s
35285 lp->init_block->mode =
35286 cpu_to_le16(0x8000 | (lp->options & PCNET32_PORT_PORTSEL) <<
35287 7);
35288- lp->a.write_csr(ioaddr, CSR15, csr15 | 0x8000);
35289+ lp->a->write_csr(ioaddr, CSR15, csr15 | 0x8000);
35290 } else {
35291 lp->init_block->mode =
35292 cpu_to_le16((lp->options & PCNET32_PORT_PORTSEL) << 7);
35293- lp->a.write_csr(ioaddr, CSR15, csr15 & 0x7fff);
35294+ lp->a->write_csr(ioaddr, CSR15, csr15 & 0x7fff);
35295 pcnet32_load_multicast(dev);
35296 }
35297
35298 if (suspended) {
35299 int csr5;
35300 /* clear SUSPEND (SPND) - CSR5 bit 0 */
35301- csr5 = lp->a.read_csr(ioaddr, CSR5);
35302- lp->a.write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
35303+ csr5 = lp->a->read_csr(ioaddr, CSR5);
35304+ lp->a->write_csr(ioaddr, CSR5, csr5 & (~CSR5_SUSPEND));
35305 } else {
35306- lp->a.write_csr(ioaddr, CSR0, CSR0_STOP);
35307+ lp->a->write_csr(ioaddr, CSR0, CSR0_STOP);
35308 pcnet32_restart(dev, CSR0_NORMAL);
35309 netif_wake_queue(dev);
35310 }
35311@@ -2767,8 +2767,8 @@ static int mdio_read(struct net_device *
35312 if (!lp->mii)
35313 return 0;
35314
35315- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35316- val_out = lp->a.read_bcr(ioaddr, 34);
35317+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35318+ val_out = lp->a->read_bcr(ioaddr, 34);
35319
35320 return val_out;
35321 }
35322@@ -2782,8 +2782,8 @@ static void mdio_write(struct net_device
35323 if (!lp->mii)
35324 return;
35325
35326- lp->a.write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35327- lp->a.write_bcr(ioaddr, 34, val);
35328+ lp->a->write_bcr(ioaddr, 33, ((phy_id & 0x1f) << 5) | (reg_num & 0x1f));
35329+ lp->a->write_bcr(ioaddr, 34, val);
35330 }
35331
35332 static int pcnet32_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
35333@@ -2862,7 +2862,7 @@ static void pcnet32_check_media(struct n
35334 curr_link = mii_link_ok(&lp->mii_if);
35335 } else {
35336 ulong ioaddr = dev->base_addr; /* card base I/O address */
35337- curr_link = (lp->a.read_bcr(ioaddr, 4) != 0xc0);
35338+ curr_link = (lp->a->read_bcr(ioaddr, 4) != 0xc0);
35339 }
35340 if (!curr_link) {
35341 if (prev_link || verbose) {
35342@@ -2887,13 +2887,13 @@ static void pcnet32_check_media(struct n
35343 (ecmd.duplex ==
35344 DUPLEX_FULL) ? "full" : "half");
35345 }
35346- bcr9 = lp->a.read_bcr(dev->base_addr, 9);
35347+ bcr9 = lp->a->read_bcr(dev->base_addr, 9);
35348 if ((bcr9 & (1 << 0)) != lp->mii_if.full_duplex) {
35349 if (lp->mii_if.full_duplex)
35350 bcr9 |= (1 << 0);
35351 else
35352 bcr9 &= ~(1 << 0);
35353- lp->a.write_bcr(dev->base_addr, 9, bcr9);
35354+ lp->a->write_bcr(dev->base_addr, 9, bcr9);
35355 }
35356 } else {
35357 if (netif_msg_link(lp))
35358diff -urNp linux-2.6.32.43/drivers/net/tg3.h linux-2.6.32.43/drivers/net/tg3.h
35359--- linux-2.6.32.43/drivers/net/tg3.h 2011-03-27 14:31:47.000000000 -0400
35360+++ linux-2.6.32.43/drivers/net/tg3.h 2011-04-17 15:56:46.000000000 -0400
35361@@ -95,6 +95,7 @@
35362 #define CHIPREV_ID_5750_A0 0x4000
35363 #define CHIPREV_ID_5750_A1 0x4001
35364 #define CHIPREV_ID_5750_A3 0x4003
35365+#define CHIPREV_ID_5750_C1 0x4201
35366 #define CHIPREV_ID_5750_C2 0x4202
35367 #define CHIPREV_ID_5752_A0_HW 0x5000
35368 #define CHIPREV_ID_5752_A0 0x6000
35369diff -urNp linux-2.6.32.43/drivers/net/tokenring/abyss.c linux-2.6.32.43/drivers/net/tokenring/abyss.c
35370--- linux-2.6.32.43/drivers/net/tokenring/abyss.c 2011-03-27 14:31:47.000000000 -0400
35371+++ linux-2.6.32.43/drivers/net/tokenring/abyss.c 2011-08-05 20:33:55.000000000 -0400
35372@@ -451,10 +451,12 @@ static struct pci_driver abyss_driver =
35373
35374 static int __init abyss_init (void)
35375 {
35376- abyss_netdev_ops = tms380tr_netdev_ops;
35377+ pax_open_kernel();
35378+ memcpy((void *)&abyss_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35379
35380- abyss_netdev_ops.ndo_open = abyss_open;
35381- abyss_netdev_ops.ndo_stop = abyss_close;
35382+ *(void **)&abyss_netdev_ops.ndo_open = abyss_open;
35383+ *(void **)&abyss_netdev_ops.ndo_stop = abyss_close;
35384+ pax_close_kernel();
35385
35386 return pci_register_driver(&abyss_driver);
35387 }
35388diff -urNp linux-2.6.32.43/drivers/net/tokenring/madgemc.c linux-2.6.32.43/drivers/net/tokenring/madgemc.c
35389--- linux-2.6.32.43/drivers/net/tokenring/madgemc.c 2011-03-27 14:31:47.000000000 -0400
35390+++ linux-2.6.32.43/drivers/net/tokenring/madgemc.c 2011-08-05 20:33:55.000000000 -0400
35391@@ -755,9 +755,11 @@ static struct mca_driver madgemc_driver
35392
35393 static int __init madgemc_init (void)
35394 {
35395- madgemc_netdev_ops = tms380tr_netdev_ops;
35396- madgemc_netdev_ops.ndo_open = madgemc_open;
35397- madgemc_netdev_ops.ndo_stop = madgemc_close;
35398+ pax_open_kernel();
35399+ memcpy((void *)&madgemc_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35400+ *(void **)&madgemc_netdev_ops.ndo_open = madgemc_open;
35401+ *(void **)&madgemc_netdev_ops.ndo_stop = madgemc_close;
35402+ pax_close_kernel();
35403
35404 return mca_register_driver (&madgemc_driver);
35405 }
35406diff -urNp linux-2.6.32.43/drivers/net/tokenring/proteon.c linux-2.6.32.43/drivers/net/tokenring/proteon.c
35407--- linux-2.6.32.43/drivers/net/tokenring/proteon.c 2011-03-27 14:31:47.000000000 -0400
35408+++ linux-2.6.32.43/drivers/net/tokenring/proteon.c 2011-08-05 20:33:55.000000000 -0400
35409@@ -353,9 +353,11 @@ static int __init proteon_init(void)
35410 struct platform_device *pdev;
35411 int i, num = 0, err = 0;
35412
35413- proteon_netdev_ops = tms380tr_netdev_ops;
35414- proteon_netdev_ops.ndo_open = proteon_open;
35415- proteon_netdev_ops.ndo_stop = tms380tr_close;
35416+ pax_open_kernel();
35417+ memcpy((void *)&proteon_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35418+ *(void **)&proteon_netdev_ops.ndo_open = proteon_open;
35419+ *(void **)&proteon_netdev_ops.ndo_stop = tms380tr_close;
35420+ pax_close_kernel();
35421
35422 err = platform_driver_register(&proteon_driver);
35423 if (err)
35424diff -urNp linux-2.6.32.43/drivers/net/tokenring/skisa.c linux-2.6.32.43/drivers/net/tokenring/skisa.c
35425--- linux-2.6.32.43/drivers/net/tokenring/skisa.c 2011-03-27 14:31:47.000000000 -0400
35426+++ linux-2.6.32.43/drivers/net/tokenring/skisa.c 2011-08-05 20:33:55.000000000 -0400
35427@@ -363,9 +363,11 @@ static int __init sk_isa_init(void)
35428 struct platform_device *pdev;
35429 int i, num = 0, err = 0;
35430
35431- sk_isa_netdev_ops = tms380tr_netdev_ops;
35432- sk_isa_netdev_ops.ndo_open = sk_isa_open;
35433- sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35434+ pax_open_kernel();
35435+ memcpy((void *)&sk_isa_netdev_ops, &tms380tr_netdev_ops, sizeof(tms380tr_netdev_ops));
35436+ *(void **)&sk_isa_netdev_ops.ndo_open = sk_isa_open;
35437+ *(void **)&sk_isa_netdev_ops.ndo_stop = tms380tr_close;
35438+ pax_close_kernel();
35439
35440 err = platform_driver_register(&sk_isa_driver);
35441 if (err)
35442diff -urNp linux-2.6.32.43/drivers/net/tulip/de2104x.c linux-2.6.32.43/drivers/net/tulip/de2104x.c
35443--- linux-2.6.32.43/drivers/net/tulip/de2104x.c 2011-03-27 14:31:47.000000000 -0400
35444+++ linux-2.6.32.43/drivers/net/tulip/de2104x.c 2011-05-16 21:46:57.000000000 -0400
35445@@ -1785,6 +1785,8 @@ static void __devinit de21041_get_srom_i
35446 struct de_srom_info_leaf *il;
35447 void *bufp;
35448
35449+ pax_track_stack();
35450+
35451 /* download entire eeprom */
35452 for (i = 0; i < DE_EEPROM_WORDS; i++)
35453 ((__le16 *)ee_data)[i] =
35454diff -urNp linux-2.6.32.43/drivers/net/tulip/de4x5.c linux-2.6.32.43/drivers/net/tulip/de4x5.c
35455--- linux-2.6.32.43/drivers/net/tulip/de4x5.c 2011-03-27 14:31:47.000000000 -0400
35456+++ linux-2.6.32.43/drivers/net/tulip/de4x5.c 2011-04-17 15:56:46.000000000 -0400
35457@@ -5472,7 +5472,7 @@ de4x5_ioctl(struct net_device *dev, stru
35458 for (i=0; i<ETH_ALEN; i++) {
35459 tmp.addr[i] = dev->dev_addr[i];
35460 }
35461- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35462+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
35463 break;
35464
35465 case DE4X5_SET_HWADDR: /* Set the hardware address */
35466@@ -5512,7 +5512,7 @@ de4x5_ioctl(struct net_device *dev, stru
35467 spin_lock_irqsave(&lp->lock, flags);
35468 memcpy(&statbuf, &lp->pktStats, ioc->len);
35469 spin_unlock_irqrestore(&lp->lock, flags);
35470- if (copy_to_user(ioc->data, &statbuf, ioc->len))
35471+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
35472 return -EFAULT;
35473 break;
35474 }
35475diff -urNp linux-2.6.32.43/drivers/net/usb/hso.c linux-2.6.32.43/drivers/net/usb/hso.c
35476--- linux-2.6.32.43/drivers/net/usb/hso.c 2011-03-27 14:31:47.000000000 -0400
35477+++ linux-2.6.32.43/drivers/net/usb/hso.c 2011-04-17 15:56:46.000000000 -0400
35478@@ -71,7 +71,7 @@
35479 #include <asm/byteorder.h>
35480 #include <linux/serial_core.h>
35481 #include <linux/serial.h>
35482-
35483+#include <asm/local.h>
35484
35485 #define DRIVER_VERSION "1.2"
35486 #define MOD_AUTHOR "Option Wireless"
35487@@ -258,7 +258,7 @@ struct hso_serial {
35488
35489 /* from usb_serial_port */
35490 struct tty_struct *tty;
35491- int open_count;
35492+ local_t open_count;
35493 spinlock_t serial_lock;
35494
35495 int (*write_data) (struct hso_serial *serial);
35496@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_
35497 struct urb *urb;
35498
35499 urb = serial->rx_urb[0];
35500- if (serial->open_count > 0) {
35501+ if (local_read(&serial->open_count) > 0) {
35502 count = put_rxbuf_data(urb, serial);
35503 if (count == -1)
35504 return;
35505@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_cal
35506 DUMP1(urb->transfer_buffer, urb->actual_length);
35507
35508 /* Anyone listening? */
35509- if (serial->open_count == 0)
35510+ if (local_read(&serial->open_count) == 0)
35511 return;
35512
35513 if (status == 0) {
35514@@ -1311,8 +1311,7 @@ static int hso_serial_open(struct tty_st
35515 spin_unlock_irq(&serial->serial_lock);
35516
35517 /* check for port already opened, if not set the termios */
35518- serial->open_count++;
35519- if (serial->open_count == 1) {
35520+ if (local_inc_return(&serial->open_count) == 1) {
35521 tty->low_latency = 1;
35522 serial->rx_state = RX_IDLE;
35523 /* Force default termio settings */
35524@@ -1325,7 +1324,7 @@ static int hso_serial_open(struct tty_st
35525 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
35526 if (result) {
35527 hso_stop_serial_device(serial->parent);
35528- serial->open_count--;
35529+ local_dec(&serial->open_count);
35530 kref_put(&serial->parent->ref, hso_serial_ref_free);
35531 }
35532 } else {
35533@@ -1362,10 +1361,10 @@ static void hso_serial_close(struct tty_
35534
35535 /* reset the rts and dtr */
35536 /* do the actual close */
35537- serial->open_count--;
35538+ local_dec(&serial->open_count);
35539
35540- if (serial->open_count <= 0) {
35541- serial->open_count = 0;
35542+ if (local_read(&serial->open_count) <= 0) {
35543+ local_set(&serial->open_count, 0);
35544 spin_lock_irq(&serial->serial_lock);
35545 if (serial->tty == tty) {
35546 serial->tty->driver_data = NULL;
35547@@ -1447,7 +1446,7 @@ static void hso_serial_set_termios(struc
35548
35549 /* the actual setup */
35550 spin_lock_irqsave(&serial->serial_lock, flags);
35551- if (serial->open_count)
35552+ if (local_read(&serial->open_count))
35553 _hso_serial_set_termios(tty, old);
35554 else
35555 tty->termios = old;
35556@@ -3097,7 +3096,7 @@ static int hso_resume(struct usb_interfa
35557 /* Start all serial ports */
35558 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
35559 if (serial_table[i] && (serial_table[i]->interface == iface)) {
35560- if (dev2ser(serial_table[i])->open_count) {
35561+ if (local_read(&dev2ser(serial_table[i])->open_count)) {
35562 result =
35563 hso_start_serial_device(serial_table[i], GFP_NOIO);
35564 hso_kick_transmit(dev2ser(serial_table[i]));
35565diff -urNp linux-2.6.32.43/drivers/net/vxge/vxge-config.h linux-2.6.32.43/drivers/net/vxge/vxge-config.h
35566--- linux-2.6.32.43/drivers/net/vxge/vxge-config.h 2011-03-27 14:31:47.000000000 -0400
35567+++ linux-2.6.32.43/drivers/net/vxge/vxge-config.h 2011-08-05 20:33:55.000000000 -0400
35568@@ -474,7 +474,7 @@ struct vxge_hw_uld_cbs {
35569 void (*link_down)(struct __vxge_hw_device *devh);
35570 void (*crit_err)(struct __vxge_hw_device *devh,
35571 enum vxge_hw_event type, u64 ext_data);
35572-};
35573+} __no_const;
35574
35575 /*
35576 * struct __vxge_hw_blockpool_entry - Block private data structure
35577diff -urNp linux-2.6.32.43/drivers/net/vxge/vxge-main.c linux-2.6.32.43/drivers/net/vxge/vxge-main.c
35578--- linux-2.6.32.43/drivers/net/vxge/vxge-main.c 2011-03-27 14:31:47.000000000 -0400
35579+++ linux-2.6.32.43/drivers/net/vxge/vxge-main.c 2011-05-16 21:46:57.000000000 -0400
35580@@ -93,6 +93,8 @@ static inline void VXGE_COMPLETE_VPATH_T
35581 struct sk_buff *completed[NR_SKB_COMPLETED];
35582 int more;
35583
35584+ pax_track_stack();
35585+
35586 do {
35587 more = 0;
35588 skb_ptr = completed;
35589@@ -1779,6 +1781,8 @@ static enum vxge_hw_status vxge_rth_conf
35590 u8 mtable[256] = {0}; /* CPU to vpath mapping */
35591 int index;
35592
35593+ pax_track_stack();
35594+
35595 /*
35596 * Filling
35597 * - itable with bucket numbers
35598diff -urNp linux-2.6.32.43/drivers/net/vxge/vxge-traffic.h linux-2.6.32.43/drivers/net/vxge/vxge-traffic.h
35599--- linux-2.6.32.43/drivers/net/vxge/vxge-traffic.h 2011-03-27 14:31:47.000000000 -0400
35600+++ linux-2.6.32.43/drivers/net/vxge/vxge-traffic.h 2011-08-05 20:33:55.000000000 -0400
35601@@ -2123,7 +2123,7 @@ struct vxge_hw_mempool_cbs {
35602 struct vxge_hw_mempool_dma *dma_object,
35603 u32 index,
35604 u32 is_last);
35605-};
35606+} __no_const;
35607
35608 void
35609 __vxge_hw_mempool_destroy(
35610diff -urNp linux-2.6.32.43/drivers/net/wan/cycx_x25.c linux-2.6.32.43/drivers/net/wan/cycx_x25.c
35611--- linux-2.6.32.43/drivers/net/wan/cycx_x25.c 2011-03-27 14:31:47.000000000 -0400
35612+++ linux-2.6.32.43/drivers/net/wan/cycx_x25.c 2011-05-16 21:46:57.000000000 -0400
35613@@ -1017,6 +1017,8 @@ static void hex_dump(char *msg, unsigned
35614 unsigned char hex[1024],
35615 * phex = hex;
35616
35617+ pax_track_stack();
35618+
35619 if (len >= (sizeof(hex) / 2))
35620 len = (sizeof(hex) / 2) - 1;
35621
35622diff -urNp linux-2.6.32.43/drivers/net/wan/hdlc_x25.c linux-2.6.32.43/drivers/net/wan/hdlc_x25.c
35623--- linux-2.6.32.43/drivers/net/wan/hdlc_x25.c 2011-03-27 14:31:47.000000000 -0400
35624+++ linux-2.6.32.43/drivers/net/wan/hdlc_x25.c 2011-08-05 20:33:55.000000000 -0400
35625@@ -136,16 +136,16 @@ static netdev_tx_t x25_xmit(struct sk_bu
35626
35627 static int x25_open(struct net_device *dev)
35628 {
35629- struct lapb_register_struct cb;
35630+ static struct lapb_register_struct cb = {
35631+ .connect_confirmation = x25_connected,
35632+ .connect_indication = x25_connected,
35633+ .disconnect_confirmation = x25_disconnected,
35634+ .disconnect_indication = x25_disconnected,
35635+ .data_indication = x25_data_indication,
35636+ .data_transmit = x25_data_transmit
35637+ };
35638 int result;
35639
35640- cb.connect_confirmation = x25_connected;
35641- cb.connect_indication = x25_connected;
35642- cb.disconnect_confirmation = x25_disconnected;
35643- cb.disconnect_indication = x25_disconnected;
35644- cb.data_indication = x25_data_indication;
35645- cb.data_transmit = x25_data_transmit;
35646-
35647 result = lapb_register(dev, &cb);
35648 if (result != LAPB_OK)
35649 return result;
35650diff -urNp linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c
35651--- linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c 2011-03-27 14:31:47.000000000 -0400
35652+++ linux-2.6.32.43/drivers/net/wimax/i2400m/usb-fw.c 2011-05-16 21:46:57.000000000 -0400
35653@@ -263,6 +263,8 @@ ssize_t i2400mu_bus_bm_wait_for_ack(stru
35654 int do_autopm = 1;
35655 DECLARE_COMPLETION_ONSTACK(notif_completion);
35656
35657+ pax_track_stack();
35658+
35659 d_fnstart(8, dev, "(i2400m %p ack %p size %zu)\n",
35660 i2400m, ack, ack_size);
35661 BUG_ON(_ack == i2400m->bm_ack_buf);
35662diff -urNp linux-2.6.32.43/drivers/net/wireless/airo.c linux-2.6.32.43/drivers/net/wireless/airo.c
35663--- linux-2.6.32.43/drivers/net/wireless/airo.c 2011-03-27 14:31:47.000000000 -0400
35664+++ linux-2.6.32.43/drivers/net/wireless/airo.c 2011-05-16 21:46:57.000000000 -0400
35665@@ -3003,6 +3003,8 @@ static void airo_process_scan_results (s
35666 BSSListElement * loop_net;
35667 BSSListElement * tmp_net;
35668
35669+ pax_track_stack();
35670+
35671 /* Blow away current list of scan results */
35672 list_for_each_entry_safe (loop_net, tmp_net, &ai->network_list, list) {
35673 list_move_tail (&loop_net->list, &ai->network_free_list);
35674@@ -3783,6 +3785,8 @@ static u16 setup_card(struct airo_info *
35675 WepKeyRid wkr;
35676 int rc;
35677
35678+ pax_track_stack();
35679+
35680 memset( &mySsid, 0, sizeof( mySsid ) );
35681 kfree (ai->flash);
35682 ai->flash = NULL;
35683@@ -4758,6 +4762,8 @@ static int proc_stats_rid_open( struct i
35684 __le32 *vals = stats.vals;
35685 int len;
35686
35687+ pax_track_stack();
35688+
35689 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35690 return -ENOMEM;
35691 data = (struct proc_data *)file->private_data;
35692@@ -5487,6 +5493,8 @@ static int proc_BSSList_open( struct ino
35693 /* If doLoseSync is not 1, we won't do a Lose Sync */
35694 int doLoseSync = -1;
35695
35696+ pax_track_stack();
35697+
35698 if ((file->private_data = kzalloc(sizeof(struct proc_data ), GFP_KERNEL)) == NULL)
35699 return -ENOMEM;
35700 data = (struct proc_data *)file->private_data;
35701@@ -7193,6 +7201,8 @@ static int airo_get_aplist(struct net_de
35702 int i;
35703 int loseSync = capable(CAP_NET_ADMIN) ? 1: -1;
35704
35705+ pax_track_stack();
35706+
35707 qual = kmalloc(IW_MAX_AP * sizeof(*qual), GFP_KERNEL);
35708 if (!qual)
35709 return -ENOMEM;
35710@@ -7753,6 +7763,8 @@ static void airo_read_wireless_stats(str
35711 CapabilityRid cap_rid;
35712 __le32 *vals = stats_rid.vals;
35713
35714+ pax_track_stack();
35715+
35716 /* Get stats out of the card */
35717 clear_bit(JOB_WSTATS, &local->jobs);
35718 if (local->power.event) {
35719diff -urNp linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c
35720--- linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c 2011-03-27 14:31:47.000000000 -0400
35721+++ linux-2.6.32.43/drivers/net/wireless/ath/ath5k/debug.c 2011-05-16 21:46:57.000000000 -0400
35722@@ -205,6 +205,8 @@ static ssize_t read_file_beacon(struct f
35723 unsigned int v;
35724 u64 tsf;
35725
35726+ pax_track_stack();
35727+
35728 v = ath5k_hw_reg_read(sc->ah, AR5K_BEACON);
35729 len += snprintf(buf+len, sizeof(buf)-len,
35730 "%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
35731@@ -318,6 +320,8 @@ static ssize_t read_file_debug(struct fi
35732 unsigned int len = 0;
35733 unsigned int i;
35734
35735+ pax_track_stack();
35736+
35737 len += snprintf(buf+len, sizeof(buf)-len,
35738 "DEBUG LEVEL: 0x%08x\n\n", sc->debug.level);
35739
35740diff -urNp linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c
35741--- linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c 2011-03-27 14:31:47.000000000 -0400
35742+++ linux-2.6.32.43/drivers/net/wireless/ath/ath9k/debug.c 2011-05-16 21:46:57.000000000 -0400
35743@@ -220,6 +220,8 @@ static ssize_t read_file_interrupt(struc
35744 char buf[512];
35745 unsigned int len = 0;
35746
35747+ pax_track_stack();
35748+
35749 len += snprintf(buf + len, sizeof(buf) - len,
35750 "%8s: %10u\n", "RX", sc->debug.stats.istats.rxok);
35751 len += snprintf(buf + len, sizeof(buf) - len,
35752@@ -360,6 +362,8 @@ static ssize_t read_file_wiphy(struct fi
35753 int i;
35754 u8 addr[ETH_ALEN];
35755
35756+ pax_track_stack();
35757+
35758 len += snprintf(buf + len, sizeof(buf) - len,
35759 "primary: %s (%s chan=%d ht=%d)\n",
35760 wiphy_name(sc->pri_wiphy->hw->wiphy),
35761diff -urNp linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c
35762--- linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35763+++ linux-2.6.32.43/drivers/net/wireless/b43/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35764@@ -43,7 +43,7 @@ static struct dentry *rootdir;
35765 struct b43_debugfs_fops {
35766 ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize);
35767 int (*write)(struct b43_wldev *dev, const char *buf, size_t count);
35768- struct file_operations fops;
35769+ const struct file_operations fops;
35770 /* Offset of struct b43_dfs_file in struct b43_dfsentry */
35771 size_t file_struct_offset;
35772 };
35773diff -urNp linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c
35774--- linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35775+++ linux-2.6.32.43/drivers/net/wireless/b43legacy/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35776@@ -44,7 +44,7 @@ static struct dentry *rootdir;
35777 struct b43legacy_debugfs_fops {
35778 ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize);
35779 int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count);
35780- struct file_operations fops;
35781+ const struct file_operations fops;
35782 /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */
35783 size_t file_struct_offset;
35784 /* Take wl->irq_lock before calling read/write? */
35785diff -urNp linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c
35786--- linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c 2011-03-27 14:31:47.000000000 -0400
35787+++ linux-2.6.32.43/drivers/net/wireless/ipw2x00/ipw2100.c 2011-05-16 21:46:57.000000000 -0400
35788@@ -2014,6 +2014,8 @@ static int ipw2100_set_essid(struct ipw2
35789 int err;
35790 DECLARE_SSID_BUF(ssid);
35791
35792+ pax_track_stack();
35793+
35794 IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len));
35795
35796 if (ssid_len)
35797@@ -5380,6 +5382,8 @@ static int ipw2100_set_key(struct ipw210
35798 struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters;
35799 int err;
35800
35801+ pax_track_stack();
35802+
35803 IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n",
35804 idx, keylen, len);
35805
35806diff -urNp linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c
35807--- linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-03-27 14:31:47.000000000 -0400
35808+++ linux-2.6.32.43/drivers/net/wireless/ipw2x00/libipw_rx.c 2011-05-16 21:46:57.000000000 -0400
35809@@ -1566,6 +1566,8 @@ static void libipw_process_probe_respons
35810 unsigned long flags;
35811 DECLARE_SSID_BUF(ssid);
35812
35813+ pax_track_stack();
35814+
35815 LIBIPW_DEBUG_SCAN("'%s' (%pM"
35816 "): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n",
35817 print_ssid(ssid, info_element->data, info_element->len),
35818diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c
35819--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-03-27 14:31:47.000000000 -0400
35820+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-1000.c 2011-04-17 15:56:46.000000000 -0400
35821@@ -137,7 +137,7 @@ static struct iwl_lib_ops iwl1000_lib =
35822 },
35823 };
35824
35825-static struct iwl_ops iwl1000_ops = {
35826+static const struct iwl_ops iwl1000_ops = {
35827 .ucode = &iwl5000_ucode,
35828 .lib = &iwl1000_lib,
35829 .hcmd = &iwl5000_hcmd,
35830diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl3945-base.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl3945-base.c
35831--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-03-27 14:31:47.000000000 -0400
35832+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl3945-base.c 2011-08-05 20:33:55.000000000 -0400
35833@@ -3927,7 +3927,9 @@ static int iwl3945_pci_probe(struct pci_
35834 */
35835 if (iwl3945_mod_params.disable_hw_scan) {
35836 IWL_DEBUG_INFO(priv, "Disabling hw_scan\n");
35837- iwl3945_hw_ops.hw_scan = NULL;
35838+ pax_open_kernel();
35839+ *(void **)&iwl3945_hw_ops.hw_scan = NULL;
35840+ pax_close_kernel();
35841 }
35842
35843
35844diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c
35845--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-03-27 14:31:47.000000000 -0400
35846+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-3945.c 2011-04-17 15:56:46.000000000 -0400
35847@@ -2874,7 +2874,7 @@ static struct iwl_hcmd_utils_ops iwl3945
35848 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
35849 };
35850
35851-static struct iwl_ops iwl3945_ops = {
35852+static const struct iwl_ops iwl3945_ops = {
35853 .ucode = &iwl3945_ucode,
35854 .lib = &iwl3945_lib,
35855 .hcmd = &iwl3945_hcmd,
35856diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c
35857--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-03-27 14:31:47.000000000 -0400
35858+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-4965.c 2011-04-17 15:56:46.000000000 -0400
35859@@ -2345,7 +2345,7 @@ static struct iwl_lib_ops iwl4965_lib =
35860 },
35861 };
35862
35863-static struct iwl_ops iwl4965_ops = {
35864+static const struct iwl_ops iwl4965_ops = {
35865 .ucode = &iwl4965_ucode,
35866 .lib = &iwl4965_lib,
35867 .hcmd = &iwl4965_hcmd,
35868diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c
35869--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:55:34.000000000 -0400
35870+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-5000.c 2011-06-25 12:56:37.000000000 -0400
35871@@ -1633,14 +1633,14 @@ static struct iwl_lib_ops iwl5150_lib =
35872 },
35873 };
35874
35875-struct iwl_ops iwl5000_ops = {
35876+const struct iwl_ops iwl5000_ops = {
35877 .ucode = &iwl5000_ucode,
35878 .lib = &iwl5000_lib,
35879 .hcmd = &iwl5000_hcmd,
35880 .utils = &iwl5000_hcmd_utils,
35881 };
35882
35883-static struct iwl_ops iwl5150_ops = {
35884+static const struct iwl_ops iwl5150_ops = {
35885 .ucode = &iwl5000_ucode,
35886 .lib = &iwl5150_lib,
35887 .hcmd = &iwl5000_hcmd,
35888diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c
35889--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-03-27 14:31:47.000000000 -0400
35890+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-6000.c 2011-04-17 15:56:46.000000000 -0400
35891@@ -146,7 +146,7 @@ static struct iwl_hcmd_utils_ops iwl6000
35892 .calc_rssi = iwl5000_calc_rssi,
35893 };
35894
35895-static struct iwl_ops iwl6000_ops = {
35896+static const struct iwl_ops iwl6000_ops = {
35897 .ucode = &iwl5000_ucode,
35898 .lib = &iwl6000_lib,
35899 .hcmd = &iwl5000_hcmd,
35900diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn.c
35901--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-03-27 14:31:47.000000000 -0400
35902+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn.c 2011-08-05 20:33:55.000000000 -0400
35903@@ -2911,7 +2911,9 @@ static int iwl_pci_probe(struct pci_dev
35904 if (iwl_debug_level & IWL_DL_INFO)
35905 dev_printk(KERN_DEBUG, &(pdev->dev),
35906 "Disabling hw_scan\n");
35907- iwl_hw_ops.hw_scan = NULL;
35908+ pax_open_kernel();
35909+ *(void **)&iwl_hw_ops.hw_scan = NULL;
35910+ pax_close_kernel();
35911 }
35912
35913 hw = iwl_alloc_all(cfg, &iwl_hw_ops);
35914diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
35915--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-03-27 14:31:47.000000000 -0400
35916+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-agn-rs.c 2011-05-16 21:46:57.000000000 -0400
35917@@ -857,6 +857,8 @@ static void rs_tx_status(void *priv_r, s
35918 u8 active_index = 0;
35919 s32 tpt = 0;
35920
35921+ pax_track_stack();
35922+
35923 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
35924
35925 if (!ieee80211_is_data(hdr->frame_control) ||
35926@@ -2722,6 +2724,8 @@ static void rs_fill_link_cmd(struct iwl_
35927 u8 valid_tx_ant = 0;
35928 struct iwl_link_quality_cmd *lq_cmd = &lq_sta->lq;
35929
35930+ pax_track_stack();
35931+
35932 /* Override starting rate (index 0) if needed for debug purposes */
35933 rs_dbgfs_set_mcs(lq_sta, &new_rate, index);
35934
35935diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c
35936--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-03-27 14:31:47.000000000 -0400
35937+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debugfs.c 2011-05-16 21:46:57.000000000 -0400
35938@@ -524,6 +524,8 @@ static ssize_t iwl_dbgfs_status_read(str
35939 int pos = 0;
35940 const size_t bufsz = sizeof(buf);
35941
35942+ pax_track_stack();
35943+
35944 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_ACTIVE:\t %d\n",
35945 test_bit(STATUS_HCMD_ACTIVE, &priv->status));
35946 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_HCMD_SYNC_ACTIVE: %d\n",
35947@@ -658,6 +660,8 @@ static ssize_t iwl_dbgfs_qos_read(struct
35948 const size_t bufsz = sizeof(buf);
35949 ssize_t ret;
35950
35951+ pax_track_stack();
35952+
35953 for (i = 0; i < AC_NUM; i++) {
35954 pos += scnprintf(buf + pos, bufsz - pos,
35955 "\tcw_min\tcw_max\taifsn\ttxop\n");
35956diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h
35957--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-03-27 14:31:47.000000000 -0400
35958+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-debug.h 2011-04-17 15:56:46.000000000 -0400
35959@@ -118,8 +118,8 @@ void iwl_dbgfs_unregister(struct iwl_pri
35960 #endif
35961
35962 #else
35963-#define IWL_DEBUG(__priv, level, fmt, args...)
35964-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...)
35965+#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0)
35966+#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0)
35967 static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level,
35968 void *p, u32 len)
35969 {}
35970diff -urNp linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h
35971--- linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-03-27 14:31:47.000000000 -0400
35972+++ linux-2.6.32.43/drivers/net/wireless/iwlwifi/iwl-dev.h 2011-04-17 15:56:46.000000000 -0400
35973@@ -68,7 +68,7 @@ struct iwl_tx_queue;
35974
35975 /* shared structures from iwl-5000.c */
35976 extern struct iwl_mod_params iwl50_mod_params;
35977-extern struct iwl_ops iwl5000_ops;
35978+extern const struct iwl_ops iwl5000_ops;
35979 extern struct iwl_ucode_ops iwl5000_ucode;
35980 extern struct iwl_lib_ops iwl5000_lib;
35981 extern struct iwl_hcmd_ops iwl5000_hcmd;
35982diff -urNp linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c
35983--- linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35984+++ linux-2.6.32.43/drivers/net/wireless/iwmc3200wifi/debugfs.c 2011-05-16 21:46:57.000000000 -0400
35985@@ -299,6 +299,8 @@ static ssize_t iwm_debugfs_fw_err_read(s
35986 int buf_len = 512;
35987 size_t len = 0;
35988
35989+ pax_track_stack();
35990+
35991 if (*ppos != 0)
35992 return 0;
35993 if (count < sizeof(buf))
35994diff -urNp linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c
35995--- linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c 2011-03-27 14:31:47.000000000 -0400
35996+++ linux-2.6.32.43/drivers/net/wireless/libertas/debugfs.c 2011-04-17 15:56:46.000000000 -0400
35997@@ -708,7 +708,7 @@ out_unlock:
35998 struct lbs_debugfs_files {
35999 const char *name;
36000 int perm;
36001- struct file_operations fops;
36002+ const struct file_operations fops;
36003 };
36004
36005 static const struct lbs_debugfs_files debugfs_files[] = {
36006diff -urNp linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c
36007--- linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c 2011-03-27 14:31:47.000000000 -0400
36008+++ linux-2.6.32.43/drivers/net/wireless/rndis_wlan.c 2011-04-17 15:56:46.000000000 -0400
36009@@ -1176,7 +1176,7 @@ static int set_rts_threshold(struct usbn
36010
36011 devdbg(usbdev, "set_rts_threshold %i", rts_threshold);
36012
36013- if (rts_threshold < 0 || rts_threshold > 2347)
36014+ if (rts_threshold > 2347)
36015 rts_threshold = 2347;
36016
36017 tmp = cpu_to_le32(rts_threshold);
36018diff -urNp linux-2.6.32.43/drivers/oprofile/buffer_sync.c linux-2.6.32.43/drivers/oprofile/buffer_sync.c
36019--- linux-2.6.32.43/drivers/oprofile/buffer_sync.c 2011-03-27 14:31:47.000000000 -0400
36020+++ linux-2.6.32.43/drivers/oprofile/buffer_sync.c 2011-04-17 15:56:46.000000000 -0400
36021@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en
36022 if (cookie == NO_COOKIE)
36023 offset = pc;
36024 if (cookie == INVALID_COOKIE) {
36025- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36026+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36027 offset = pc;
36028 }
36029 if (cookie != last_cookie) {
36030@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct
36031 /* add userspace sample */
36032
36033 if (!mm) {
36034- atomic_inc(&oprofile_stats.sample_lost_no_mm);
36035+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
36036 return 0;
36037 }
36038
36039 cookie = lookup_dcookie(mm, s->eip, &offset);
36040
36041 if (cookie == INVALID_COOKIE) {
36042- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
36043+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
36044 return 0;
36045 }
36046
36047@@ -561,7 +561,7 @@ void sync_buffer(int cpu)
36048 /* ignore backtraces if failed to add a sample */
36049 if (state == sb_bt_start) {
36050 state = sb_bt_ignore;
36051- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
36052+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
36053 }
36054 }
36055 release_mm(mm);
36056diff -urNp linux-2.6.32.43/drivers/oprofile/event_buffer.c linux-2.6.32.43/drivers/oprofile/event_buffer.c
36057--- linux-2.6.32.43/drivers/oprofile/event_buffer.c 2011-03-27 14:31:47.000000000 -0400
36058+++ linux-2.6.32.43/drivers/oprofile/event_buffer.c 2011-04-17 15:56:46.000000000 -0400
36059@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value
36060 }
36061
36062 if (buffer_pos == buffer_size) {
36063- atomic_inc(&oprofile_stats.event_lost_overflow);
36064+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
36065 return;
36066 }
36067
36068diff -urNp linux-2.6.32.43/drivers/oprofile/oprof.c linux-2.6.32.43/drivers/oprofile/oprof.c
36069--- linux-2.6.32.43/drivers/oprofile/oprof.c 2011-03-27 14:31:47.000000000 -0400
36070+++ linux-2.6.32.43/drivers/oprofile/oprof.c 2011-04-17 15:56:46.000000000 -0400
36071@@ -110,7 +110,7 @@ static void switch_worker(struct work_st
36072 if (oprofile_ops.switch_events())
36073 return;
36074
36075- atomic_inc(&oprofile_stats.multiplex_counter);
36076+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
36077 start_switch_worker();
36078 }
36079
36080diff -urNp linux-2.6.32.43/drivers/oprofile/oprofilefs.c linux-2.6.32.43/drivers/oprofile/oprofilefs.c
36081--- linux-2.6.32.43/drivers/oprofile/oprofilefs.c 2011-03-27 14:31:47.000000000 -0400
36082+++ linux-2.6.32.43/drivers/oprofile/oprofilefs.c 2011-04-17 15:56:46.000000000 -0400
36083@@ -187,7 +187,7 @@ static const struct file_operations atom
36084
36085
36086 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
36087- char const *name, atomic_t *val)
36088+ char const *name, atomic_unchecked_t *val)
36089 {
36090 struct dentry *d = __oprofilefs_create_file(sb, root, name,
36091 &atomic_ro_fops, 0444);
36092diff -urNp linux-2.6.32.43/drivers/oprofile/oprofile_stats.c linux-2.6.32.43/drivers/oprofile/oprofile_stats.c
36093--- linux-2.6.32.43/drivers/oprofile/oprofile_stats.c 2011-03-27 14:31:47.000000000 -0400
36094+++ linux-2.6.32.43/drivers/oprofile/oprofile_stats.c 2011-04-17 15:56:46.000000000 -0400
36095@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
36096 cpu_buf->sample_invalid_eip = 0;
36097 }
36098
36099- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
36100- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
36101- atomic_set(&oprofile_stats.event_lost_overflow, 0);
36102- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
36103- atomic_set(&oprofile_stats.multiplex_counter, 0);
36104+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
36105+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
36106+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
36107+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
36108+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
36109 }
36110
36111
36112diff -urNp linux-2.6.32.43/drivers/oprofile/oprofile_stats.h linux-2.6.32.43/drivers/oprofile/oprofile_stats.h
36113--- linux-2.6.32.43/drivers/oprofile/oprofile_stats.h 2011-03-27 14:31:47.000000000 -0400
36114+++ linux-2.6.32.43/drivers/oprofile/oprofile_stats.h 2011-04-17 15:56:46.000000000 -0400
36115@@ -13,11 +13,11 @@
36116 #include <asm/atomic.h>
36117
36118 struct oprofile_stat_struct {
36119- atomic_t sample_lost_no_mm;
36120- atomic_t sample_lost_no_mapping;
36121- atomic_t bt_lost_no_mapping;
36122- atomic_t event_lost_overflow;
36123- atomic_t multiplex_counter;
36124+ atomic_unchecked_t sample_lost_no_mm;
36125+ atomic_unchecked_t sample_lost_no_mapping;
36126+ atomic_unchecked_t bt_lost_no_mapping;
36127+ atomic_unchecked_t event_lost_overflow;
36128+ atomic_unchecked_t multiplex_counter;
36129 };
36130
36131 extern struct oprofile_stat_struct oprofile_stats;
36132diff -urNp linux-2.6.32.43/drivers/parisc/pdc_stable.c linux-2.6.32.43/drivers/parisc/pdc_stable.c
36133--- linux-2.6.32.43/drivers/parisc/pdc_stable.c 2011-03-27 14:31:47.000000000 -0400
36134+++ linux-2.6.32.43/drivers/parisc/pdc_stable.c 2011-04-17 15:56:46.000000000 -0400
36135@@ -481,7 +481,7 @@ pdcspath_attr_store(struct kobject *kobj
36136 return ret;
36137 }
36138
36139-static struct sysfs_ops pdcspath_attr_ops = {
36140+static const struct sysfs_ops pdcspath_attr_ops = {
36141 .show = pdcspath_attr_show,
36142 .store = pdcspath_attr_store,
36143 };
36144diff -urNp linux-2.6.32.43/drivers/parport/procfs.c linux-2.6.32.43/drivers/parport/procfs.c
36145--- linux-2.6.32.43/drivers/parport/procfs.c 2011-03-27 14:31:47.000000000 -0400
36146+++ linux-2.6.32.43/drivers/parport/procfs.c 2011-04-17 15:56:46.000000000 -0400
36147@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t
36148
36149 *ppos += len;
36150
36151- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
36152+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
36153 }
36154
36155 #ifdef CONFIG_PARPORT_1284
36156@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table
36157
36158 *ppos += len;
36159
36160- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
36161+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
36162 }
36163 #endif /* IEEE1284.3 support. */
36164
36165diff -urNp linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c
36166--- linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c 2011-03-27 14:31:47.000000000 -0400
36167+++ linux-2.6.32.43/drivers/pci/hotplug/acpiphp_glue.c 2011-04-17 15:56:46.000000000 -0400
36168@@ -111,7 +111,7 @@ static int post_dock_fixups(struct notif
36169 }
36170
36171
36172-static struct acpi_dock_ops acpiphp_dock_ops = {
36173+static const struct acpi_dock_ops acpiphp_dock_ops = {
36174 .handler = handle_hotplug_event_func,
36175 };
36176
36177diff -urNp linux-2.6.32.43/drivers/pci/hotplug/cpci_hotplug.h linux-2.6.32.43/drivers/pci/hotplug/cpci_hotplug.h
36178--- linux-2.6.32.43/drivers/pci/hotplug/cpci_hotplug.h 2011-03-27 14:31:47.000000000 -0400
36179+++ linux-2.6.32.43/drivers/pci/hotplug/cpci_hotplug.h 2011-08-05 20:33:55.000000000 -0400
36180@@ -59,7 +59,7 @@ struct cpci_hp_controller_ops {
36181 int (*hardware_test) (struct slot* slot, u32 value);
36182 u8 (*get_power) (struct slot* slot);
36183 int (*set_power) (struct slot* slot, int value);
36184-};
36185+} __no_const;
36186
36187 struct cpci_hp_controller {
36188 unsigned int irq;
36189diff -urNp linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c
36190--- linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c 2011-03-27 14:31:47.000000000 -0400
36191+++ linux-2.6.32.43/drivers/pci/hotplug/cpqphp_nvram.c 2011-04-17 15:56:46.000000000 -0400
36192@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_
36193
36194 void compaq_nvram_init (void __iomem *rom_start)
36195 {
36196+
36197+#ifndef CONFIG_PAX_KERNEXEC
36198 if (rom_start) {
36199 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
36200 }
36201+#endif
36202+
36203 dbg("int15 entry = %p\n", compaq_int15_entry_point);
36204
36205 /* initialize our int15 lock */
36206diff -urNp linux-2.6.32.43/drivers/pci/hotplug/fakephp.c linux-2.6.32.43/drivers/pci/hotplug/fakephp.c
36207--- linux-2.6.32.43/drivers/pci/hotplug/fakephp.c 2011-03-27 14:31:47.000000000 -0400
36208+++ linux-2.6.32.43/drivers/pci/hotplug/fakephp.c 2011-04-17 15:56:46.000000000 -0400
36209@@ -73,7 +73,7 @@ static void legacy_release(struct kobjec
36210 }
36211
36212 static struct kobj_type legacy_ktype = {
36213- .sysfs_ops = &(struct sysfs_ops){
36214+ .sysfs_ops = &(const struct sysfs_ops){
36215 .store = legacy_store, .show = legacy_show
36216 },
36217 .release = &legacy_release,
36218diff -urNp linux-2.6.32.43/drivers/pci/intel-iommu.c linux-2.6.32.43/drivers/pci/intel-iommu.c
36219--- linux-2.6.32.43/drivers/pci/intel-iommu.c 2011-05-10 22:12:01.000000000 -0400
36220+++ linux-2.6.32.43/drivers/pci/intel-iommu.c 2011-05-10 22:12:33.000000000 -0400
36221@@ -2643,7 +2643,7 @@ error:
36222 return 0;
36223 }
36224
36225-static dma_addr_t intel_map_page(struct device *dev, struct page *page,
36226+dma_addr_t intel_map_page(struct device *dev, struct page *page,
36227 unsigned long offset, size_t size,
36228 enum dma_data_direction dir,
36229 struct dma_attrs *attrs)
36230@@ -2719,7 +2719,7 @@ static void add_unmap(struct dmar_domain
36231 spin_unlock_irqrestore(&async_umap_flush_lock, flags);
36232 }
36233
36234-static void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
36235+void intel_unmap_page(struct device *dev, dma_addr_t dev_addr,
36236 size_t size, enum dma_data_direction dir,
36237 struct dma_attrs *attrs)
36238 {
36239@@ -2768,7 +2768,7 @@ static void intel_unmap_page(struct devi
36240 }
36241 }
36242
36243-static void *intel_alloc_coherent(struct device *hwdev, size_t size,
36244+void *intel_alloc_coherent(struct device *hwdev, size_t size,
36245 dma_addr_t *dma_handle, gfp_t flags)
36246 {
36247 void *vaddr;
36248@@ -2800,7 +2800,7 @@ static void *intel_alloc_coherent(struct
36249 return NULL;
36250 }
36251
36252-static void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
36253+void intel_free_coherent(struct device *hwdev, size_t size, void *vaddr,
36254 dma_addr_t dma_handle)
36255 {
36256 int order;
36257@@ -2812,7 +2812,7 @@ static void intel_free_coherent(struct d
36258 free_pages((unsigned long)vaddr, order);
36259 }
36260
36261-static void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
36262+void intel_unmap_sg(struct device *hwdev, struct scatterlist *sglist,
36263 int nelems, enum dma_data_direction dir,
36264 struct dma_attrs *attrs)
36265 {
36266@@ -2872,7 +2872,7 @@ static int intel_nontranslate_map_sg(str
36267 return nelems;
36268 }
36269
36270-static int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
36271+int intel_map_sg(struct device *hwdev, struct scatterlist *sglist, int nelems,
36272 enum dma_data_direction dir, struct dma_attrs *attrs)
36273 {
36274 int i;
36275@@ -2941,12 +2941,12 @@ static int intel_map_sg(struct device *h
36276 return nelems;
36277 }
36278
36279-static int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
36280+int intel_mapping_error(struct device *dev, dma_addr_t dma_addr)
36281 {
36282 return !dma_addr;
36283 }
36284
36285-struct dma_map_ops intel_dma_ops = {
36286+const struct dma_map_ops intel_dma_ops = {
36287 .alloc_coherent = intel_alloc_coherent,
36288 .free_coherent = intel_free_coherent,
36289 .map_sg = intel_map_sg,
36290diff -urNp linux-2.6.32.43/drivers/pci/pcie/aspm.c linux-2.6.32.43/drivers/pci/pcie/aspm.c
36291--- linux-2.6.32.43/drivers/pci/pcie/aspm.c 2011-03-27 14:31:47.000000000 -0400
36292+++ linux-2.6.32.43/drivers/pci/pcie/aspm.c 2011-04-17 15:56:46.000000000 -0400
36293@@ -27,9 +27,9 @@
36294 #define MODULE_PARAM_PREFIX "pcie_aspm."
36295
36296 /* Note: those are not register definitions */
36297-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
36298-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
36299-#define ASPM_STATE_L1 (4) /* L1 state */
36300+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
36301+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
36302+#define ASPM_STATE_L1 (4U) /* L1 state */
36303 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
36304 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
36305
36306diff -urNp linux-2.6.32.43/drivers/pci/probe.c linux-2.6.32.43/drivers/pci/probe.c
36307--- linux-2.6.32.43/drivers/pci/probe.c 2011-03-27 14:31:47.000000000 -0400
36308+++ linux-2.6.32.43/drivers/pci/probe.c 2011-04-17 15:56:46.000000000 -0400
36309@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity(
36310 return ret;
36311 }
36312
36313-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev,
36314+static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev,
36315 struct device_attribute *attr,
36316 char *buf)
36317 {
36318 return pci_bus_show_cpuaffinity(dev, 0, attr, buf);
36319 }
36320
36321-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev,
36322+static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev,
36323 struct device_attribute *attr,
36324 char *buf)
36325 {
36326diff -urNp linux-2.6.32.43/drivers/pci/proc.c linux-2.6.32.43/drivers/pci/proc.c
36327--- linux-2.6.32.43/drivers/pci/proc.c 2011-03-27 14:31:47.000000000 -0400
36328+++ linux-2.6.32.43/drivers/pci/proc.c 2011-04-17 15:56:46.000000000 -0400
36329@@ -480,7 +480,16 @@ static const struct file_operations proc
36330 static int __init pci_proc_init(void)
36331 {
36332 struct pci_dev *dev = NULL;
36333+
36334+#ifdef CONFIG_GRKERNSEC_PROC_ADD
36335+#ifdef CONFIG_GRKERNSEC_PROC_USER
36336+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
36337+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
36338+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
36339+#endif
36340+#else
36341 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
36342+#endif
36343 proc_create("devices", 0, proc_bus_pci_dir,
36344 &proc_bus_pci_dev_operations);
36345 proc_initialized = 1;
36346diff -urNp linux-2.6.32.43/drivers/pci/slot.c linux-2.6.32.43/drivers/pci/slot.c
36347--- linux-2.6.32.43/drivers/pci/slot.c 2011-03-27 14:31:47.000000000 -0400
36348+++ linux-2.6.32.43/drivers/pci/slot.c 2011-04-17 15:56:46.000000000 -0400
36349@@ -29,7 +29,7 @@ static ssize_t pci_slot_attr_store(struc
36350 return attribute->store ? attribute->store(slot, buf, len) : -EIO;
36351 }
36352
36353-static struct sysfs_ops pci_slot_sysfs_ops = {
36354+static const struct sysfs_ops pci_slot_sysfs_ops = {
36355 .show = pci_slot_attr_show,
36356 .store = pci_slot_attr_store,
36357 };
36358diff -urNp linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c
36359--- linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c 2011-03-27 14:31:47.000000000 -0400
36360+++ linux-2.6.32.43/drivers/pcmcia/pcmcia_ioctl.c 2011-04-17 15:56:46.000000000 -0400
36361@@ -819,7 +819,7 @@ static int ds_ioctl(struct inode * inode
36362 return -EFAULT;
36363 }
36364 }
36365- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36366+ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL);
36367 if (!buf)
36368 return -ENOMEM;
36369
36370diff -urNp linux-2.6.32.43/drivers/platform/x86/acer-wmi.c linux-2.6.32.43/drivers/platform/x86/acer-wmi.c
36371--- linux-2.6.32.43/drivers/platform/x86/acer-wmi.c 2011-03-27 14:31:47.000000000 -0400
36372+++ linux-2.6.32.43/drivers/platform/x86/acer-wmi.c 2011-04-17 15:56:46.000000000 -0400
36373@@ -918,7 +918,7 @@ static int update_bl_status(struct backl
36374 return 0;
36375 }
36376
36377-static struct backlight_ops acer_bl_ops = {
36378+static const struct backlight_ops acer_bl_ops = {
36379 .get_brightness = read_brightness,
36380 .update_status = update_bl_status,
36381 };
36382diff -urNp linux-2.6.32.43/drivers/platform/x86/asus_acpi.c linux-2.6.32.43/drivers/platform/x86/asus_acpi.c
36383--- linux-2.6.32.43/drivers/platform/x86/asus_acpi.c 2011-03-27 14:31:47.000000000 -0400
36384+++ linux-2.6.32.43/drivers/platform/x86/asus_acpi.c 2011-04-17 15:56:46.000000000 -0400
36385@@ -1396,7 +1396,7 @@ static int asus_hotk_remove(struct acpi_
36386 return 0;
36387 }
36388
36389-static struct backlight_ops asus_backlight_data = {
36390+static const struct backlight_ops asus_backlight_data = {
36391 .get_brightness = read_brightness,
36392 .update_status = set_brightness_status,
36393 };
36394diff -urNp linux-2.6.32.43/drivers/platform/x86/asus-laptop.c linux-2.6.32.43/drivers/platform/x86/asus-laptop.c
36395--- linux-2.6.32.43/drivers/platform/x86/asus-laptop.c 2011-03-27 14:31:47.000000000 -0400
36396+++ linux-2.6.32.43/drivers/platform/x86/asus-laptop.c 2011-04-17 15:56:46.000000000 -0400
36397@@ -250,7 +250,7 @@ static struct backlight_device *asus_bac
36398 */
36399 static int read_brightness(struct backlight_device *bd);
36400 static int update_bl_status(struct backlight_device *bd);
36401-static struct backlight_ops asusbl_ops = {
36402+static const struct backlight_ops asusbl_ops = {
36403 .get_brightness = read_brightness,
36404 .update_status = update_bl_status,
36405 };
36406diff -urNp linux-2.6.32.43/drivers/platform/x86/compal-laptop.c linux-2.6.32.43/drivers/platform/x86/compal-laptop.c
36407--- linux-2.6.32.43/drivers/platform/x86/compal-laptop.c 2011-03-27 14:31:47.000000000 -0400
36408+++ linux-2.6.32.43/drivers/platform/x86/compal-laptop.c 2011-04-17 15:56:46.000000000 -0400
36409@@ -163,7 +163,7 @@ static int bl_update_status(struct backl
36410 return set_lcd_level(b->props.brightness);
36411 }
36412
36413-static struct backlight_ops compalbl_ops = {
36414+static const struct backlight_ops compalbl_ops = {
36415 .get_brightness = bl_get_brightness,
36416 .update_status = bl_update_status,
36417 };
36418diff -urNp linux-2.6.32.43/drivers/platform/x86/dell-laptop.c linux-2.6.32.43/drivers/platform/x86/dell-laptop.c
36419--- linux-2.6.32.43/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:01.000000000 -0400
36420+++ linux-2.6.32.43/drivers/platform/x86/dell-laptop.c 2011-05-10 22:12:33.000000000 -0400
36421@@ -318,7 +318,7 @@ static int dell_get_intensity(struct bac
36422 return buffer.output[1];
36423 }
36424
36425-static struct backlight_ops dell_ops = {
36426+static const struct backlight_ops dell_ops = {
36427 .get_brightness = dell_get_intensity,
36428 .update_status = dell_send_intensity,
36429 };
36430diff -urNp linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c
36431--- linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c 2011-03-27 14:31:47.000000000 -0400
36432+++ linux-2.6.32.43/drivers/platform/x86/eeepc-laptop.c 2011-04-17 15:56:46.000000000 -0400
36433@@ -245,7 +245,7 @@ static struct device *eeepc_hwmon_device
36434 */
36435 static int read_brightness(struct backlight_device *bd);
36436 static int update_bl_status(struct backlight_device *bd);
36437-static struct backlight_ops eeepcbl_ops = {
36438+static const struct backlight_ops eeepcbl_ops = {
36439 .get_brightness = read_brightness,
36440 .update_status = update_bl_status,
36441 };
36442diff -urNp linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c
36443--- linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c 2011-03-27 14:31:47.000000000 -0400
36444+++ linux-2.6.32.43/drivers/platform/x86/fujitsu-laptop.c 2011-04-17 15:56:46.000000000 -0400
36445@@ -436,7 +436,7 @@ static int bl_update_status(struct backl
36446 return ret;
36447 }
36448
36449-static struct backlight_ops fujitsubl_ops = {
36450+static const struct backlight_ops fujitsubl_ops = {
36451 .get_brightness = bl_get_brightness,
36452 .update_status = bl_update_status,
36453 };
36454diff -urNp linux-2.6.32.43/drivers/platform/x86/msi-laptop.c linux-2.6.32.43/drivers/platform/x86/msi-laptop.c
36455--- linux-2.6.32.43/drivers/platform/x86/msi-laptop.c 2011-03-27 14:31:47.000000000 -0400
36456+++ linux-2.6.32.43/drivers/platform/x86/msi-laptop.c 2011-04-17 15:56:46.000000000 -0400
36457@@ -161,7 +161,7 @@ static int bl_update_status(struct backl
36458 return set_lcd_level(b->props.brightness);
36459 }
36460
36461-static struct backlight_ops msibl_ops = {
36462+static const struct backlight_ops msibl_ops = {
36463 .get_brightness = bl_get_brightness,
36464 .update_status = bl_update_status,
36465 };
36466diff -urNp linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c
36467--- linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c 2011-03-27 14:31:47.000000000 -0400
36468+++ linux-2.6.32.43/drivers/platform/x86/panasonic-laptop.c 2011-04-17 15:56:46.000000000 -0400
36469@@ -352,7 +352,7 @@ static int bl_set_status(struct backligh
36470 return acpi_pcc_write_sset(pcc, SINF_DC_CUR_BRIGHT, bright);
36471 }
36472
36473-static struct backlight_ops pcc_backlight_ops = {
36474+static const struct backlight_ops pcc_backlight_ops = {
36475 .get_brightness = bl_get,
36476 .update_status = bl_set_status,
36477 };
36478diff -urNp linux-2.6.32.43/drivers/platform/x86/sony-laptop.c linux-2.6.32.43/drivers/platform/x86/sony-laptop.c
36479--- linux-2.6.32.43/drivers/platform/x86/sony-laptop.c 2011-03-27 14:31:47.000000000 -0400
36480+++ linux-2.6.32.43/drivers/platform/x86/sony-laptop.c 2011-04-17 15:56:46.000000000 -0400
36481@@ -850,7 +850,7 @@ static int sony_backlight_get_brightness
36482 }
36483
36484 static struct backlight_device *sony_backlight_device;
36485-static struct backlight_ops sony_backlight_ops = {
36486+static const struct backlight_ops sony_backlight_ops = {
36487 .update_status = sony_backlight_update_status,
36488 .get_brightness = sony_backlight_get_brightness,
36489 };
36490diff -urNp linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c
36491--- linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c 2011-03-27 14:31:47.000000000 -0400
36492+++ linux-2.6.32.43/drivers/platform/x86/thinkpad_acpi.c 2011-08-05 20:33:55.000000000 -0400
36493@@ -2137,7 +2137,7 @@ static int hotkey_mask_get(void)
36494 return 0;
36495 }
36496
36497-void static hotkey_mask_warn_incomplete_mask(void)
36498+static void hotkey_mask_warn_incomplete_mask(void)
36499 {
36500 /* log only what the user can fix... */
36501 const u32 wantedmask = hotkey_driver_mask &
36502@@ -6122,7 +6122,7 @@ static void tpacpi_brightness_notify_cha
36503 BACKLIGHT_UPDATE_HOTKEY);
36504 }
36505
36506-static struct backlight_ops ibm_backlight_data = {
36507+static const struct backlight_ops ibm_backlight_data = {
36508 .get_brightness = brightness_get,
36509 .update_status = brightness_update_status,
36510 };
36511diff -urNp linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c
36512--- linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c 2011-03-27 14:31:47.000000000 -0400
36513+++ linux-2.6.32.43/drivers/platform/x86/toshiba_acpi.c 2011-04-17 15:56:46.000000000 -0400
36514@@ -671,7 +671,7 @@ static acpi_status remove_device(void)
36515 return AE_OK;
36516 }
36517
36518-static struct backlight_ops toshiba_backlight_data = {
36519+static const struct backlight_ops toshiba_backlight_data = {
36520 .get_brightness = get_lcd,
36521 .update_status = set_lcd_status,
36522 };
36523diff -urNp linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c
36524--- linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c 2011-03-27 14:31:47.000000000 -0400
36525+++ linux-2.6.32.43/drivers/pnp/pnpbios/bioscalls.c 2011-04-17 15:56:46.000000000 -0400
36526@@ -60,7 +60,7 @@ do { \
36527 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
36528 } while(0)
36529
36530-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
36531+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
36532 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
36533
36534 /*
36535@@ -97,7 +97,10 @@ static inline u16 call_pnp_bios(u16 func
36536
36537 cpu = get_cpu();
36538 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
36539+
36540+ pax_open_kernel();
36541 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
36542+ pax_close_kernel();
36543
36544 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
36545 spin_lock_irqsave(&pnp_bios_lock, flags);
36546@@ -135,7 +138,10 @@ static inline u16 call_pnp_bios(u16 func
36547 :"memory");
36548 spin_unlock_irqrestore(&pnp_bios_lock, flags);
36549
36550+ pax_open_kernel();
36551 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
36552+ pax_close_kernel();
36553+
36554 put_cpu();
36555
36556 /* If we get here and this is set then the PnP BIOS faulted on us. */
36557@@ -469,7 +475,7 @@ int pnp_bios_read_escd(char *data, u32 n
36558 return status;
36559 }
36560
36561-void pnpbios_calls_init(union pnp_bios_install_struct *header)
36562+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
36563 {
36564 int i;
36565
36566@@ -477,6 +483,8 @@ void pnpbios_calls_init(union pnp_bios_i
36567 pnp_bios_callpoint.offset = header->fields.pm16offset;
36568 pnp_bios_callpoint.segment = PNP_CS16;
36569
36570+ pax_open_kernel();
36571+
36572 for_each_possible_cpu(i) {
36573 struct desc_struct *gdt = get_cpu_gdt_table(i);
36574 if (!gdt)
36575@@ -488,4 +496,6 @@ void pnpbios_calls_init(union pnp_bios_i
36576 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
36577 (unsigned long)__va(header->fields.pm16dseg));
36578 }
36579+
36580+ pax_close_kernel();
36581 }
36582diff -urNp linux-2.6.32.43/drivers/pnp/resource.c linux-2.6.32.43/drivers/pnp/resource.c
36583--- linux-2.6.32.43/drivers/pnp/resource.c 2011-03-27 14:31:47.000000000 -0400
36584+++ linux-2.6.32.43/drivers/pnp/resource.c 2011-04-17 15:56:46.000000000 -0400
36585@@ -355,7 +355,7 @@ int pnp_check_irq(struct pnp_dev *dev, s
36586 return 1;
36587
36588 /* check if the resource is valid */
36589- if (*irq < 0 || *irq > 15)
36590+ if (*irq > 15)
36591 return 0;
36592
36593 /* check if the resource is reserved */
36594@@ -419,7 +419,7 @@ int pnp_check_dma(struct pnp_dev *dev, s
36595 return 1;
36596
36597 /* check if the resource is valid */
36598- if (*dma < 0 || *dma == 4 || *dma > 7)
36599+ if (*dma == 4 || *dma > 7)
36600 return 0;
36601
36602 /* check if the resource is reserved */
36603diff -urNp linux-2.6.32.43/drivers/power/bq27x00_battery.c linux-2.6.32.43/drivers/power/bq27x00_battery.c
36604--- linux-2.6.32.43/drivers/power/bq27x00_battery.c 2011-03-27 14:31:47.000000000 -0400
36605+++ linux-2.6.32.43/drivers/power/bq27x00_battery.c 2011-08-05 20:33:55.000000000 -0400
36606@@ -44,7 +44,7 @@ struct bq27x00_device_info;
36607 struct bq27x00_access_methods {
36608 int (*read)(u8 reg, int *rt_value, int b_single,
36609 struct bq27x00_device_info *di);
36610-};
36611+} __no_const;
36612
36613 struct bq27x00_device_info {
36614 struct device *dev;
36615diff -urNp linux-2.6.32.43/drivers/rtc/rtc-dev.c linux-2.6.32.43/drivers/rtc/rtc-dev.c
36616--- linux-2.6.32.43/drivers/rtc/rtc-dev.c 2011-03-27 14:31:47.000000000 -0400
36617+++ linux-2.6.32.43/drivers/rtc/rtc-dev.c 2011-04-17 15:56:46.000000000 -0400
36618@@ -14,6 +14,7 @@
36619 #include <linux/module.h>
36620 #include <linux/rtc.h>
36621 #include <linux/sched.h>
36622+#include <linux/grsecurity.h>
36623 #include "rtc-core.h"
36624
36625 static dev_t rtc_devt;
36626@@ -357,6 +358,8 @@ static long rtc_dev_ioctl(struct file *f
36627 if (copy_from_user(&tm, uarg, sizeof(tm)))
36628 return -EFAULT;
36629
36630+ gr_log_timechange();
36631+
36632 return rtc_set_time(rtc, &tm);
36633
36634 case RTC_PIE_ON:
36635diff -urNp linux-2.6.32.43/drivers/s390/cio/qdio_perf.c linux-2.6.32.43/drivers/s390/cio/qdio_perf.c
36636--- linux-2.6.32.43/drivers/s390/cio/qdio_perf.c 2011-03-27 14:31:47.000000000 -0400
36637+++ linux-2.6.32.43/drivers/s390/cio/qdio_perf.c 2011-04-17 15:56:46.000000000 -0400
36638@@ -31,51 +31,51 @@ static struct proc_dir_entry *qdio_perf_
36639 static int qdio_perf_proc_show(struct seq_file *m, void *v)
36640 {
36641 seq_printf(m, "Number of qdio interrupts\t\t\t: %li\n",
36642- (long)atomic_long_read(&perf_stats.qdio_int));
36643+ (long)atomic_long_read_unchecked(&perf_stats.qdio_int));
36644 seq_printf(m, "Number of PCI interrupts\t\t\t: %li\n",
36645- (long)atomic_long_read(&perf_stats.pci_int));
36646+ (long)atomic_long_read_unchecked(&perf_stats.pci_int));
36647 seq_printf(m, "Number of adapter interrupts\t\t\t: %li\n",
36648- (long)atomic_long_read(&perf_stats.thin_int));
36649+ (long)atomic_long_read_unchecked(&perf_stats.thin_int));
36650 seq_printf(m, "\n");
36651 seq_printf(m, "Inbound tasklet runs\t\t\t\t: %li\n",
36652- (long)atomic_long_read(&perf_stats.tasklet_inbound));
36653+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_inbound));
36654 seq_printf(m, "Outbound tasklet runs\t\t\t\t: %li\n",
36655- (long)atomic_long_read(&perf_stats.tasklet_outbound));
36656+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_outbound));
36657 seq_printf(m, "Adapter interrupt tasklet runs/loops\t\t: %li/%li\n",
36658- (long)atomic_long_read(&perf_stats.tasklet_thinint),
36659- (long)atomic_long_read(&perf_stats.tasklet_thinint_loop));
36660+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint),
36661+ (long)atomic_long_read_unchecked(&perf_stats.tasklet_thinint_loop));
36662 seq_printf(m, "Adapter interrupt inbound tasklet runs/loops\t: %li/%li\n",
36663- (long)atomic_long_read(&perf_stats.thinint_inbound),
36664- (long)atomic_long_read(&perf_stats.thinint_inbound_loop));
36665+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound),
36666+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop));
36667 seq_printf(m, "\n");
36668 seq_printf(m, "Number of SIGA In issued\t\t\t: %li\n",
36669- (long)atomic_long_read(&perf_stats.siga_in));
36670+ (long)atomic_long_read_unchecked(&perf_stats.siga_in));
36671 seq_printf(m, "Number of SIGA Out issued\t\t\t: %li\n",
36672- (long)atomic_long_read(&perf_stats.siga_out));
36673+ (long)atomic_long_read_unchecked(&perf_stats.siga_out));
36674 seq_printf(m, "Number of SIGA Sync issued\t\t\t: %li\n",
36675- (long)atomic_long_read(&perf_stats.siga_sync));
36676+ (long)atomic_long_read_unchecked(&perf_stats.siga_sync));
36677 seq_printf(m, "\n");
36678 seq_printf(m, "Number of inbound transfers\t\t\t: %li\n",
36679- (long)atomic_long_read(&perf_stats.inbound_handler));
36680+ (long)atomic_long_read_unchecked(&perf_stats.inbound_handler));
36681 seq_printf(m, "Number of outbound transfers\t\t\t: %li\n",
36682- (long)atomic_long_read(&perf_stats.outbound_handler));
36683+ (long)atomic_long_read_unchecked(&perf_stats.outbound_handler));
36684 seq_printf(m, "\n");
36685 seq_printf(m, "Number of fast requeues (outg. SBAL w/o SIGA)\t: %li\n",
36686- (long)atomic_long_read(&perf_stats.fast_requeue));
36687+ (long)atomic_long_read_unchecked(&perf_stats.fast_requeue));
36688 seq_printf(m, "Number of outbound target full condition\t: %li\n",
36689- (long)atomic_long_read(&perf_stats.outbound_target_full));
36690+ (long)atomic_long_read_unchecked(&perf_stats.outbound_target_full));
36691 seq_printf(m, "Number of outbound tasklet mod_timer calls\t: %li\n",
36692- (long)atomic_long_read(&perf_stats.debug_tl_out_timer));
36693+ (long)atomic_long_read_unchecked(&perf_stats.debug_tl_out_timer));
36694 seq_printf(m, "Number of stop polling calls\t\t\t: %li\n",
36695- (long)atomic_long_read(&perf_stats.debug_stop_polling));
36696+ (long)atomic_long_read_unchecked(&perf_stats.debug_stop_polling));
36697 seq_printf(m, "AI inbound tasklet loops after stop polling\t: %li\n",
36698- (long)atomic_long_read(&perf_stats.thinint_inbound_loop2));
36699+ (long)atomic_long_read_unchecked(&perf_stats.thinint_inbound_loop2));
36700 seq_printf(m, "QEBSM EQBS total/incomplete\t\t\t: %li/%li\n",
36701- (long)atomic_long_read(&perf_stats.debug_eqbs_all),
36702- (long)atomic_long_read(&perf_stats.debug_eqbs_incomplete));
36703+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_all),
36704+ (long)atomic_long_read_unchecked(&perf_stats.debug_eqbs_incomplete));
36705 seq_printf(m, "QEBSM SQBS total/incomplete\t\t\t: %li/%li\n",
36706- (long)atomic_long_read(&perf_stats.debug_sqbs_all),
36707- (long)atomic_long_read(&perf_stats.debug_sqbs_incomplete));
36708+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_all),
36709+ (long)atomic_long_read_unchecked(&perf_stats.debug_sqbs_incomplete));
36710 seq_printf(m, "\n");
36711 return 0;
36712 }
36713diff -urNp linux-2.6.32.43/drivers/s390/cio/qdio_perf.h linux-2.6.32.43/drivers/s390/cio/qdio_perf.h
36714--- linux-2.6.32.43/drivers/s390/cio/qdio_perf.h 2011-03-27 14:31:47.000000000 -0400
36715+++ linux-2.6.32.43/drivers/s390/cio/qdio_perf.h 2011-04-17 15:56:46.000000000 -0400
36716@@ -13,46 +13,46 @@
36717
36718 struct qdio_perf_stats {
36719 /* interrupt handler calls */
36720- atomic_long_t qdio_int;
36721- atomic_long_t pci_int;
36722- atomic_long_t thin_int;
36723+ atomic_long_unchecked_t qdio_int;
36724+ atomic_long_unchecked_t pci_int;
36725+ atomic_long_unchecked_t thin_int;
36726
36727 /* tasklet runs */
36728- atomic_long_t tasklet_inbound;
36729- atomic_long_t tasklet_outbound;
36730- atomic_long_t tasklet_thinint;
36731- atomic_long_t tasklet_thinint_loop;
36732- atomic_long_t thinint_inbound;
36733- atomic_long_t thinint_inbound_loop;
36734- atomic_long_t thinint_inbound_loop2;
36735+ atomic_long_unchecked_t tasklet_inbound;
36736+ atomic_long_unchecked_t tasklet_outbound;
36737+ atomic_long_unchecked_t tasklet_thinint;
36738+ atomic_long_unchecked_t tasklet_thinint_loop;
36739+ atomic_long_unchecked_t thinint_inbound;
36740+ atomic_long_unchecked_t thinint_inbound_loop;
36741+ atomic_long_unchecked_t thinint_inbound_loop2;
36742
36743 /* signal adapter calls */
36744- atomic_long_t siga_out;
36745- atomic_long_t siga_in;
36746- atomic_long_t siga_sync;
36747+ atomic_long_unchecked_t siga_out;
36748+ atomic_long_unchecked_t siga_in;
36749+ atomic_long_unchecked_t siga_sync;
36750
36751 /* misc */
36752- atomic_long_t inbound_handler;
36753- atomic_long_t outbound_handler;
36754- atomic_long_t fast_requeue;
36755- atomic_long_t outbound_target_full;
36756+ atomic_long_unchecked_t inbound_handler;
36757+ atomic_long_unchecked_t outbound_handler;
36758+ atomic_long_unchecked_t fast_requeue;
36759+ atomic_long_unchecked_t outbound_target_full;
36760
36761 /* for debugging */
36762- atomic_long_t debug_tl_out_timer;
36763- atomic_long_t debug_stop_polling;
36764- atomic_long_t debug_eqbs_all;
36765- atomic_long_t debug_eqbs_incomplete;
36766- atomic_long_t debug_sqbs_all;
36767- atomic_long_t debug_sqbs_incomplete;
36768+ atomic_long_unchecked_t debug_tl_out_timer;
36769+ atomic_long_unchecked_t debug_stop_polling;
36770+ atomic_long_unchecked_t debug_eqbs_all;
36771+ atomic_long_unchecked_t debug_eqbs_incomplete;
36772+ atomic_long_unchecked_t debug_sqbs_all;
36773+ atomic_long_unchecked_t debug_sqbs_incomplete;
36774 };
36775
36776 extern struct qdio_perf_stats perf_stats;
36777 extern int qdio_performance_stats;
36778
36779-static inline void qdio_perf_stat_inc(atomic_long_t *count)
36780+static inline void qdio_perf_stat_inc(atomic_long_unchecked_t *count)
36781 {
36782 if (qdio_performance_stats)
36783- atomic_long_inc(count);
36784+ atomic_long_inc_unchecked(count);
36785 }
36786
36787 int qdio_setup_perf_stats(void);
36788diff -urNp linux-2.6.32.43/drivers/scsi/aacraid/aacraid.h linux-2.6.32.43/drivers/scsi/aacraid/aacraid.h
36789--- linux-2.6.32.43/drivers/scsi/aacraid/aacraid.h 2011-03-27 14:31:47.000000000 -0400
36790+++ linux-2.6.32.43/drivers/scsi/aacraid/aacraid.h 2011-08-05 20:33:55.000000000 -0400
36791@@ -471,7 +471,7 @@ struct adapter_ops
36792 int (*adapter_scsi)(struct fib * fib, struct scsi_cmnd * cmd);
36793 /* Administrative operations */
36794 int (*adapter_comm)(struct aac_dev * dev, int comm);
36795-};
36796+} __no_const;
36797
36798 /*
36799 * Define which interrupt handler needs to be installed
36800diff -urNp linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c
36801--- linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c 2011-03-27 14:31:47.000000000 -0400
36802+++ linux-2.6.32.43/drivers/scsi/aacraid/commctrl.c 2011-05-16 21:46:57.000000000 -0400
36803@@ -481,6 +481,7 @@ static int aac_send_raw_srb(struct aac_d
36804 u32 actual_fibsize64, actual_fibsize = 0;
36805 int i;
36806
36807+ pax_track_stack();
36808
36809 if (dev->in_reset) {
36810 dprintk((KERN_DEBUG"aacraid: send raw srb -EBUSY\n"));
36811diff -urNp linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c
36812--- linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c 2011-03-27 14:31:47.000000000 -0400
36813+++ linux-2.6.32.43/drivers/scsi/aic94xx/aic94xx_init.c 2011-04-17 15:56:46.000000000 -0400
36814@@ -485,7 +485,7 @@ static ssize_t asd_show_update_bios(stru
36815 flash_error_table[i].reason);
36816 }
36817
36818-static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUGO,
36819+static DEVICE_ATTR(update_bios, S_IRUGO|S_IWUSR,
36820 asd_show_update_bios, asd_store_update_bios);
36821
36822 static int asd_create_dev_attrs(struct asd_ha_struct *asd_ha)
36823diff -urNp linux-2.6.32.43/drivers/scsi/bfa/bfa_iocfc.h linux-2.6.32.43/drivers/scsi/bfa/bfa_iocfc.h
36824--- linux-2.6.32.43/drivers/scsi/bfa/bfa_iocfc.h 2011-03-27 14:31:47.000000000 -0400
36825+++ linux-2.6.32.43/drivers/scsi/bfa/bfa_iocfc.h 2011-08-05 20:33:55.000000000 -0400
36826@@ -61,7 +61,7 @@ struct bfa_hwif_s {
36827 void (*hw_isr_mode_set)(struct bfa_s *bfa, bfa_boolean_t msix);
36828 void (*hw_msix_getvecs)(struct bfa_s *bfa, u32 *vecmap,
36829 u32 *nvecs, u32 *maxvec);
36830-};
36831+} __no_const;
36832 typedef void (*bfa_cb_iocfc_t) (void *cbarg, enum bfa_status status);
36833
36834 struct bfa_iocfc_s {
36835diff -urNp linux-2.6.32.43/drivers/scsi/bfa/bfa_ioc.h linux-2.6.32.43/drivers/scsi/bfa/bfa_ioc.h
36836--- linux-2.6.32.43/drivers/scsi/bfa/bfa_ioc.h 2011-03-27 14:31:47.000000000 -0400
36837+++ linux-2.6.32.43/drivers/scsi/bfa/bfa_ioc.h 2011-08-05 20:33:55.000000000 -0400
36838@@ -127,7 +127,7 @@ struct bfa_ioc_cbfn_s {
36839 bfa_ioc_disable_cbfn_t disable_cbfn;
36840 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
36841 bfa_ioc_reset_cbfn_t reset_cbfn;
36842-};
36843+} __no_const;
36844
36845 /**
36846 * Heartbeat failure notification queue element.
36847diff -urNp linux-2.6.32.43/drivers/scsi/BusLogic.c linux-2.6.32.43/drivers/scsi/BusLogic.c
36848--- linux-2.6.32.43/drivers/scsi/BusLogic.c 2011-03-27 14:31:47.000000000 -0400
36849+++ linux-2.6.32.43/drivers/scsi/BusLogic.c 2011-05-16 21:46:57.000000000 -0400
36850@@ -961,6 +961,8 @@ static int __init BusLogic_InitializeFla
36851 static void __init BusLogic_InitializeProbeInfoList(struct BusLogic_HostAdapter
36852 *PrototypeHostAdapter)
36853 {
36854+ pax_track_stack();
36855+
36856 /*
36857 If a PCI BIOS is present, interrogate it for MultiMaster and FlashPoint
36858 Host Adapters; otherwise, default to the standard ISA MultiMaster probe.
36859diff -urNp linux-2.6.32.43/drivers/scsi/dpt_i2o.c linux-2.6.32.43/drivers/scsi/dpt_i2o.c
36860--- linux-2.6.32.43/drivers/scsi/dpt_i2o.c 2011-03-27 14:31:47.000000000 -0400
36861+++ linux-2.6.32.43/drivers/scsi/dpt_i2o.c 2011-05-16 21:46:57.000000000 -0400
36862@@ -1804,6 +1804,8 @@ static int adpt_i2o_passthru(adpt_hba* p
36863 dma_addr_t addr;
36864 ulong flags = 0;
36865
36866+ pax_track_stack();
36867+
36868 memset(&msg, 0, MAX_MESSAGE_SIZE*4);
36869 // get user msg size in u32s
36870 if(get_user(size, &user_msg[0])){
36871@@ -2297,6 +2299,8 @@ static s32 adpt_scsi_to_i2o(adpt_hba* pH
36872 s32 rcode;
36873 dma_addr_t addr;
36874
36875+ pax_track_stack();
36876+
36877 memset(msg, 0 , sizeof(msg));
36878 len = scsi_bufflen(cmd);
36879 direction = 0x00000000;
36880diff -urNp linux-2.6.32.43/drivers/scsi/eata.c linux-2.6.32.43/drivers/scsi/eata.c
36881--- linux-2.6.32.43/drivers/scsi/eata.c 2011-03-27 14:31:47.000000000 -0400
36882+++ linux-2.6.32.43/drivers/scsi/eata.c 2011-05-16 21:46:57.000000000 -0400
36883@@ -1087,6 +1087,8 @@ static int port_detect(unsigned long por
36884 struct hostdata *ha;
36885 char name[16];
36886
36887+ pax_track_stack();
36888+
36889 sprintf(name, "%s%d", driver_name, j);
36890
36891 if (!request_region(port_base, REGION_SIZE, driver_name)) {
36892diff -urNp linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c
36893--- linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c 2011-03-27 14:31:47.000000000 -0400
36894+++ linux-2.6.32.43/drivers/scsi/fcoe/libfcoe.c 2011-05-16 21:46:57.000000000 -0400
36895@@ -809,6 +809,8 @@ static void fcoe_ctlr_recv_els(struct fc
36896 size_t rlen;
36897 size_t dlen;
36898
36899+ pax_track_stack();
36900+
36901 fiph = (struct fip_header *)skb->data;
36902 sub = fiph->fip_subcode;
36903 if (sub != FIP_SC_REQ && sub != FIP_SC_REP)
36904diff -urNp linux-2.6.32.43/drivers/scsi/fnic/fnic_main.c linux-2.6.32.43/drivers/scsi/fnic/fnic_main.c
36905--- linux-2.6.32.43/drivers/scsi/fnic/fnic_main.c 2011-03-27 14:31:47.000000000 -0400
36906+++ linux-2.6.32.43/drivers/scsi/fnic/fnic_main.c 2011-08-05 20:33:55.000000000 -0400
36907@@ -669,7 +669,7 @@ static int __devinit fnic_probe(struct p
36908 /* Start local port initiatialization */
36909
36910 lp->link_up = 0;
36911- lp->tt = fnic_transport_template;
36912+ memcpy((void *)&lp->tt, &fnic_transport_template, sizeof(fnic_transport_template));
36913
36914 lp->max_retry_count = fnic->config.flogi_retries;
36915 lp->max_rport_retry_count = fnic->config.plogi_retries;
36916diff -urNp linux-2.6.32.43/drivers/scsi/gdth.c linux-2.6.32.43/drivers/scsi/gdth.c
36917--- linux-2.6.32.43/drivers/scsi/gdth.c 2011-03-27 14:31:47.000000000 -0400
36918+++ linux-2.6.32.43/drivers/scsi/gdth.c 2011-05-16 21:46:57.000000000 -0400
36919@@ -4102,6 +4102,8 @@ static int ioc_lockdrv(void __user *arg)
36920 ulong flags;
36921 gdth_ha_str *ha;
36922
36923+ pax_track_stack();
36924+
36925 if (copy_from_user(&ldrv, arg, sizeof(gdth_ioctl_lockdrv)))
36926 return -EFAULT;
36927 ha = gdth_find_ha(ldrv.ionode);
36928@@ -4134,6 +4136,8 @@ static int ioc_resetdrv(void __user *arg
36929 gdth_ha_str *ha;
36930 int rval;
36931
36932+ pax_track_stack();
36933+
36934 if (copy_from_user(&res, arg, sizeof(gdth_ioctl_reset)) ||
36935 res.number >= MAX_HDRIVES)
36936 return -EFAULT;
36937@@ -4169,6 +4173,8 @@ static int ioc_general(void __user *arg,
36938 gdth_ha_str *ha;
36939 int rval;
36940
36941+ pax_track_stack();
36942+
36943 if (copy_from_user(&gen, arg, sizeof(gdth_ioctl_general)))
36944 return -EFAULT;
36945 ha = gdth_find_ha(gen.ionode);
36946@@ -4625,6 +4631,9 @@ static void gdth_flush(gdth_ha_str *ha)
36947 int i;
36948 gdth_cmd_str gdtcmd;
36949 char cmnd[MAX_COMMAND_SIZE];
36950+
36951+ pax_track_stack();
36952+
36953 memset(cmnd, 0xff, MAX_COMMAND_SIZE);
36954
36955 TRACE2(("gdth_flush() hanum %d\n", ha->hanum));
36956diff -urNp linux-2.6.32.43/drivers/scsi/gdth_proc.c linux-2.6.32.43/drivers/scsi/gdth_proc.c
36957--- linux-2.6.32.43/drivers/scsi/gdth_proc.c 2011-03-27 14:31:47.000000000 -0400
36958+++ linux-2.6.32.43/drivers/scsi/gdth_proc.c 2011-05-16 21:46:57.000000000 -0400
36959@@ -46,6 +46,9 @@ static int gdth_set_asc_info(struct Scsi
36960 ulong64 paddr;
36961
36962 char cmnd[MAX_COMMAND_SIZE];
36963+
36964+ pax_track_stack();
36965+
36966 memset(cmnd, 0xff, 12);
36967 memset(&gdtcmd, 0, sizeof(gdth_cmd_str));
36968
36969@@ -174,6 +177,8 @@ static int gdth_get_info(char *buffer,ch
36970 gdth_hget_str *phg;
36971 char cmnd[MAX_COMMAND_SIZE];
36972
36973+ pax_track_stack();
36974+
36975 gdtcmd = kmalloc(sizeof(*gdtcmd), GFP_KERNEL);
36976 estr = kmalloc(sizeof(*estr), GFP_KERNEL);
36977 if (!gdtcmd || !estr)
36978diff -urNp linux-2.6.32.43/drivers/scsi/hosts.c linux-2.6.32.43/drivers/scsi/hosts.c
36979--- linux-2.6.32.43/drivers/scsi/hosts.c 2011-03-27 14:31:47.000000000 -0400
36980+++ linux-2.6.32.43/drivers/scsi/hosts.c 2011-05-04 17:56:28.000000000 -0400
36981@@ -40,7 +40,7 @@
36982 #include "scsi_logging.h"
36983
36984
36985-static atomic_t scsi_host_next_hn; /* host_no for next new host */
36986+static atomic_unchecked_t scsi_host_next_hn; /* host_no for next new host */
36987
36988
36989 static void scsi_host_cls_release(struct device *dev)
36990@@ -344,7 +344,7 @@ struct Scsi_Host *scsi_host_alloc(struct
36991 * subtract one because we increment first then return, but we need to
36992 * know what the next host number was before increment
36993 */
36994- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
36995+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
36996 shost->dma_channel = 0xff;
36997
36998 /* These three are default values which can be overridden */
36999diff -urNp linux-2.6.32.43/drivers/scsi/ipr.c linux-2.6.32.43/drivers/scsi/ipr.c
37000--- linux-2.6.32.43/drivers/scsi/ipr.c 2011-03-27 14:31:47.000000000 -0400
37001+++ linux-2.6.32.43/drivers/scsi/ipr.c 2011-04-17 15:56:46.000000000 -0400
37002@@ -5286,7 +5286,7 @@ static bool ipr_qc_fill_rtf(struct ata_q
37003 return true;
37004 }
37005
37006-static struct ata_port_operations ipr_sata_ops = {
37007+static const struct ata_port_operations ipr_sata_ops = {
37008 .phy_reset = ipr_ata_phy_reset,
37009 .hardreset = ipr_sata_reset,
37010 .post_internal_cmd = ipr_ata_post_internal,
37011diff -urNp linux-2.6.32.43/drivers/scsi/ips.h linux-2.6.32.43/drivers/scsi/ips.h
37012--- linux-2.6.32.43/drivers/scsi/ips.h 2011-03-27 14:31:47.000000000 -0400
37013+++ linux-2.6.32.43/drivers/scsi/ips.h 2011-08-05 20:33:55.000000000 -0400
37014@@ -1027,7 +1027,7 @@ typedef struct {
37015 int (*intr)(struct ips_ha *);
37016 void (*enableint)(struct ips_ha *);
37017 uint32_t (*statupd)(struct ips_ha *);
37018-} ips_hw_func_t;
37019+} __no_const ips_hw_func_t;
37020
37021 typedef struct ips_ha {
37022 uint8_t ha_id[IPS_MAX_CHANNELS+1];
37023diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_disc.c linux-2.6.32.43/drivers/scsi/libfc/fc_disc.c
37024--- linux-2.6.32.43/drivers/scsi/libfc/fc_disc.c 2011-03-27 14:31:47.000000000 -0400
37025+++ linux-2.6.32.43/drivers/scsi/libfc/fc_disc.c 2011-08-05 20:33:55.000000000 -0400
37026@@ -715,16 +715,16 @@ int fc_disc_init(struct fc_lport *lport)
37027 struct fc_disc *disc;
37028
37029 if (!lport->tt.disc_start)
37030- lport->tt.disc_start = fc_disc_start;
37031+ *(void **)&lport->tt.disc_start = fc_disc_start;
37032
37033 if (!lport->tt.disc_stop)
37034- lport->tt.disc_stop = fc_disc_stop;
37035+ *(void **)&lport->tt.disc_stop = fc_disc_stop;
37036
37037 if (!lport->tt.disc_stop_final)
37038- lport->tt.disc_stop_final = fc_disc_stop_final;
37039+ *(void **)&lport->tt.disc_stop_final = fc_disc_stop_final;
37040
37041 if (!lport->tt.disc_recv_req)
37042- lport->tt.disc_recv_req = fc_disc_recv_req;
37043+ *(void **)&lport->tt.disc_recv_req = fc_disc_recv_req;
37044
37045 disc = &lport->disc;
37046 INIT_DELAYED_WORK(&disc->disc_work, fc_disc_timeout);
37047diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_elsct.c linux-2.6.32.43/drivers/scsi/libfc/fc_elsct.c
37048--- linux-2.6.32.43/drivers/scsi/libfc/fc_elsct.c 2011-03-27 14:31:47.000000000 -0400
37049+++ linux-2.6.32.43/drivers/scsi/libfc/fc_elsct.c 2011-08-05 20:33:55.000000000 -0400
37050@@ -67,7 +67,7 @@ static struct fc_seq *fc_elsct_send(stru
37051 int fc_elsct_init(struct fc_lport *lport)
37052 {
37053 if (!lport->tt.elsct_send)
37054- lport->tt.elsct_send = fc_elsct_send;
37055+ *(void **)&lport->tt.elsct_send = fc_elsct_send;
37056
37057 return 0;
37058 }
37059diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c
37060--- linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c 2011-03-27 14:31:47.000000000 -0400
37061+++ linux-2.6.32.43/drivers/scsi/libfc/fc_exch.c 2011-08-05 20:33:55.000000000 -0400
37062@@ -86,12 +86,12 @@ struct fc_exch_mgr {
37063 * all together if not used XXX
37064 */
37065 struct {
37066- atomic_t no_free_exch;
37067- atomic_t no_free_exch_xid;
37068- atomic_t xid_not_found;
37069- atomic_t xid_busy;
37070- atomic_t seq_not_found;
37071- atomic_t non_bls_resp;
37072+ atomic_unchecked_t no_free_exch;
37073+ atomic_unchecked_t no_free_exch_xid;
37074+ atomic_unchecked_t xid_not_found;
37075+ atomic_unchecked_t xid_busy;
37076+ atomic_unchecked_t seq_not_found;
37077+ atomic_unchecked_t non_bls_resp;
37078 } stats;
37079 };
37080 #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq)
37081@@ -510,7 +510,7 @@ static struct fc_exch *fc_exch_em_alloc(
37082 /* allocate memory for exchange */
37083 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
37084 if (!ep) {
37085- atomic_inc(&mp->stats.no_free_exch);
37086+ atomic_inc_unchecked(&mp->stats.no_free_exch);
37087 goto out;
37088 }
37089 memset(ep, 0, sizeof(*ep));
37090@@ -557,7 +557,7 @@ out:
37091 return ep;
37092 err:
37093 spin_unlock_bh(&pool->lock);
37094- atomic_inc(&mp->stats.no_free_exch_xid);
37095+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
37096 mempool_free(ep, mp->ep_pool);
37097 return NULL;
37098 }
37099@@ -690,7 +690,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37100 xid = ntohs(fh->fh_ox_id); /* we originated exch */
37101 ep = fc_exch_find(mp, xid);
37102 if (!ep) {
37103- atomic_inc(&mp->stats.xid_not_found);
37104+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37105 reject = FC_RJT_OX_ID;
37106 goto out;
37107 }
37108@@ -720,7 +720,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37109 ep = fc_exch_find(mp, xid);
37110 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
37111 if (ep) {
37112- atomic_inc(&mp->stats.xid_busy);
37113+ atomic_inc_unchecked(&mp->stats.xid_busy);
37114 reject = FC_RJT_RX_ID;
37115 goto rel;
37116 }
37117@@ -731,7 +731,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37118 }
37119 xid = ep->xid; /* get our XID */
37120 } else if (!ep) {
37121- atomic_inc(&mp->stats.xid_not_found);
37122+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37123 reject = FC_RJT_RX_ID; /* XID not found */
37124 goto out;
37125 }
37126@@ -752,7 +752,7 @@ static enum fc_pf_rjt_reason fc_seq_look
37127 } else {
37128 sp = &ep->seq;
37129 if (sp->id != fh->fh_seq_id) {
37130- atomic_inc(&mp->stats.seq_not_found);
37131+ atomic_inc_unchecked(&mp->stats.seq_not_found);
37132 reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */
37133 goto rel;
37134 }
37135@@ -1163,22 +1163,22 @@ static void fc_exch_recv_seq_resp(struct
37136
37137 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
37138 if (!ep) {
37139- atomic_inc(&mp->stats.xid_not_found);
37140+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37141 goto out;
37142 }
37143 if (ep->esb_stat & ESB_ST_COMPLETE) {
37144- atomic_inc(&mp->stats.xid_not_found);
37145+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37146 goto out;
37147 }
37148 if (ep->rxid == FC_XID_UNKNOWN)
37149 ep->rxid = ntohs(fh->fh_rx_id);
37150 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
37151- atomic_inc(&mp->stats.xid_not_found);
37152+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37153 goto rel;
37154 }
37155 if (ep->did != ntoh24(fh->fh_s_id) &&
37156 ep->did != FC_FID_FLOGI) {
37157- atomic_inc(&mp->stats.xid_not_found);
37158+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37159 goto rel;
37160 }
37161 sof = fr_sof(fp);
37162@@ -1189,7 +1189,7 @@ static void fc_exch_recv_seq_resp(struct
37163 } else {
37164 sp = &ep->seq;
37165 if (sp->id != fh->fh_seq_id) {
37166- atomic_inc(&mp->stats.seq_not_found);
37167+ atomic_inc_unchecked(&mp->stats.seq_not_found);
37168 goto rel;
37169 }
37170 }
37171@@ -1249,9 +1249,9 @@ static void fc_exch_recv_resp(struct fc_
37172 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
37173
37174 if (!sp)
37175- atomic_inc(&mp->stats.xid_not_found);
37176+ atomic_inc_unchecked(&mp->stats.xid_not_found);
37177 else
37178- atomic_inc(&mp->stats.non_bls_resp);
37179+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
37180
37181 fc_frame_free(fp);
37182 }
37183@@ -2027,25 +2027,25 @@ EXPORT_SYMBOL(fc_exch_recv);
37184 int fc_exch_init(struct fc_lport *lp)
37185 {
37186 if (!lp->tt.seq_start_next)
37187- lp->tt.seq_start_next = fc_seq_start_next;
37188+ *(void **)&lp->tt.seq_start_next = fc_seq_start_next;
37189
37190 if (!lp->tt.exch_seq_send)
37191- lp->tt.exch_seq_send = fc_exch_seq_send;
37192+ *(void **)&lp->tt.exch_seq_send = fc_exch_seq_send;
37193
37194 if (!lp->tt.seq_send)
37195- lp->tt.seq_send = fc_seq_send;
37196+ *(void **)&lp->tt.seq_send = fc_seq_send;
37197
37198 if (!lp->tt.seq_els_rsp_send)
37199- lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
37200+ *(void **)&lp->tt.seq_els_rsp_send = fc_seq_els_rsp_send;
37201
37202 if (!lp->tt.exch_done)
37203- lp->tt.exch_done = fc_exch_done;
37204+ *(void **)&lp->tt.exch_done = fc_exch_done;
37205
37206 if (!lp->tt.exch_mgr_reset)
37207- lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
37208+ *(void **)&lp->tt.exch_mgr_reset = fc_exch_mgr_reset;
37209
37210 if (!lp->tt.seq_exch_abort)
37211- lp->tt.seq_exch_abort = fc_seq_exch_abort;
37212+ *(void **)&lp->tt.seq_exch_abort = fc_seq_exch_abort;
37213
37214 /*
37215 * Initialize fc_cpu_mask and fc_cpu_order. The
37216diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_fcp.c linux-2.6.32.43/drivers/scsi/libfc/fc_fcp.c
37217--- linux-2.6.32.43/drivers/scsi/libfc/fc_fcp.c 2011-03-27 14:31:47.000000000 -0400
37218+++ linux-2.6.32.43/drivers/scsi/libfc/fc_fcp.c 2011-08-05 20:33:55.000000000 -0400
37219@@ -2105,13 +2105,13 @@ int fc_fcp_init(struct fc_lport *lp)
37220 struct fc_fcp_internal *si;
37221
37222 if (!lp->tt.fcp_cmd_send)
37223- lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
37224+ *(void **)&lp->tt.fcp_cmd_send = fc_fcp_cmd_send;
37225
37226 if (!lp->tt.fcp_cleanup)
37227- lp->tt.fcp_cleanup = fc_fcp_cleanup;
37228+ *(void **)&lp->tt.fcp_cleanup = fc_fcp_cleanup;
37229
37230 if (!lp->tt.fcp_abort_io)
37231- lp->tt.fcp_abort_io = fc_fcp_abort_io;
37232+ *(void **)&lp->tt.fcp_abort_io = fc_fcp_abort_io;
37233
37234 si = kzalloc(sizeof(struct fc_fcp_internal), GFP_KERNEL);
37235 if (!si)
37236diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_lport.c linux-2.6.32.43/drivers/scsi/libfc/fc_lport.c
37237--- linux-2.6.32.43/drivers/scsi/libfc/fc_lport.c 2011-03-27 14:31:47.000000000 -0400
37238+++ linux-2.6.32.43/drivers/scsi/libfc/fc_lport.c 2011-08-05 20:33:55.000000000 -0400
37239@@ -569,7 +569,7 @@ int fc_lport_destroy(struct fc_lport *lp
37240 mutex_lock(&lport->lp_mutex);
37241 lport->state = LPORT_ST_DISABLED;
37242 lport->link_up = 0;
37243- lport->tt.frame_send = fc_frame_drop;
37244+ *(void **)&lport->tt.frame_send = fc_frame_drop;
37245 mutex_unlock(&lport->lp_mutex);
37246
37247 lport->tt.fcp_abort_io(lport);
37248@@ -1477,10 +1477,10 @@ EXPORT_SYMBOL(fc_lport_config);
37249 int fc_lport_init(struct fc_lport *lport)
37250 {
37251 if (!lport->tt.lport_recv)
37252- lport->tt.lport_recv = fc_lport_recv_req;
37253+ *(void **)&lport->tt.lport_recv = fc_lport_recv_req;
37254
37255 if (!lport->tt.lport_reset)
37256- lport->tt.lport_reset = fc_lport_reset;
37257+ *(void **)&lport->tt.lport_reset = fc_lport_reset;
37258
37259 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
37260 fc_host_node_name(lport->host) = lport->wwnn;
37261diff -urNp linux-2.6.32.43/drivers/scsi/libfc/fc_rport.c linux-2.6.32.43/drivers/scsi/libfc/fc_rport.c
37262--- linux-2.6.32.43/drivers/scsi/libfc/fc_rport.c 2011-03-27 14:31:47.000000000 -0400
37263+++ linux-2.6.32.43/drivers/scsi/libfc/fc_rport.c 2011-08-05 20:33:55.000000000 -0400
37264@@ -1566,25 +1566,25 @@ static void fc_rport_flush_queue(void)
37265 int fc_rport_init(struct fc_lport *lport)
37266 {
37267 if (!lport->tt.rport_lookup)
37268- lport->tt.rport_lookup = fc_rport_lookup;
37269+ *(void **)&lport->tt.rport_lookup = fc_rport_lookup;
37270
37271 if (!lport->tt.rport_create)
37272- lport->tt.rport_create = fc_rport_create;
37273+ *(void **)&lport->tt.rport_create = fc_rport_create;
37274
37275 if (!lport->tt.rport_login)
37276- lport->tt.rport_login = fc_rport_login;
37277+ *(void **)&lport->tt.rport_login = fc_rport_login;
37278
37279 if (!lport->tt.rport_logoff)
37280- lport->tt.rport_logoff = fc_rport_logoff;
37281+ *(void **)&lport->tt.rport_logoff = fc_rport_logoff;
37282
37283 if (!lport->tt.rport_recv_req)
37284- lport->tt.rport_recv_req = fc_rport_recv_req;
37285+ *(void **)&lport->tt.rport_recv_req = fc_rport_recv_req;
37286
37287 if (!lport->tt.rport_flush_queue)
37288- lport->tt.rport_flush_queue = fc_rport_flush_queue;
37289+ *(void **)&lport->tt.rport_flush_queue = fc_rport_flush_queue;
37290
37291 if (!lport->tt.rport_destroy)
37292- lport->tt.rport_destroy = fc_rport_destroy;
37293+ *(void **)&lport->tt.rport_destroy = fc_rport_destroy;
37294
37295 return 0;
37296 }
37297diff -urNp linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c
37298--- linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c 2011-03-27 14:31:47.000000000 -0400
37299+++ linux-2.6.32.43/drivers/scsi/libsas/sas_ata.c 2011-04-23 12:56:11.000000000 -0400
37300@@ -343,7 +343,7 @@ static int sas_ata_scr_read(struct ata_l
37301 }
37302 }
37303
37304-static struct ata_port_operations sas_sata_ops = {
37305+static const struct ata_port_operations sas_sata_ops = {
37306 .phy_reset = sas_ata_phy_reset,
37307 .post_internal_cmd = sas_ata_post_internal,
37308 .qc_defer = ata_std_qc_defer,
37309diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c
37310--- linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c 2011-03-27 14:31:47.000000000 -0400
37311+++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc_debugfs.c 2011-05-16 21:46:57.000000000 -0400
37312@@ -124,7 +124,7 @@ struct lpfc_debug {
37313 int len;
37314 };
37315
37316-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37317+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
37318 static unsigned long lpfc_debugfs_start_time = 0L;
37319
37320 /**
37321@@ -158,7 +158,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_v
37322 lpfc_debugfs_enable = 0;
37323
37324 len = 0;
37325- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
37326+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
37327 (lpfc_debugfs_max_disc_trc - 1);
37328 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
37329 dtp = vport->disc_trc + i;
37330@@ -219,7 +219,7 @@ lpfc_debugfs_slow_ring_trc_data(struct l
37331 lpfc_debugfs_enable = 0;
37332
37333 len = 0;
37334- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
37335+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
37336 (lpfc_debugfs_max_slow_ring_trc - 1);
37337 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
37338 dtp = phba->slow_ring_trc + i;
37339@@ -397,6 +397,8 @@ lpfc_debugfs_dumpHBASlim_data(struct lpf
37340 uint32_t *ptr;
37341 char buffer[1024];
37342
37343+ pax_track_stack();
37344+
37345 off = 0;
37346 spin_lock_irq(&phba->hbalock);
37347
37348@@ -634,14 +636,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport
37349 !vport || !vport->disc_trc)
37350 return;
37351
37352- index = atomic_inc_return(&vport->disc_trc_cnt) &
37353+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
37354 (lpfc_debugfs_max_disc_trc - 1);
37355 dtp = vport->disc_trc + index;
37356 dtp->fmt = fmt;
37357 dtp->data1 = data1;
37358 dtp->data2 = data2;
37359 dtp->data3 = data3;
37360- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37361+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37362 dtp->jif = jiffies;
37363 #endif
37364 return;
37365@@ -672,14 +674,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_h
37366 !phba || !phba->slow_ring_trc)
37367 return;
37368
37369- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
37370+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
37371 (lpfc_debugfs_max_slow_ring_trc - 1);
37372 dtp = phba->slow_ring_trc + index;
37373 dtp->fmt = fmt;
37374 dtp->data1 = data1;
37375 dtp->data2 = data2;
37376 dtp->data3 = data3;
37377- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
37378+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
37379 dtp->jif = jiffies;
37380 #endif
37381 return;
37382@@ -1364,7 +1366,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37383 "slow_ring buffer\n");
37384 goto debug_failed;
37385 }
37386- atomic_set(&phba->slow_ring_trc_cnt, 0);
37387+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
37388 memset(phba->slow_ring_trc, 0,
37389 (sizeof(struct lpfc_debugfs_trc) *
37390 lpfc_debugfs_max_slow_ring_trc));
37391@@ -1410,7 +1412,7 @@ lpfc_debugfs_initialize(struct lpfc_vpor
37392 "buffer\n");
37393 goto debug_failed;
37394 }
37395- atomic_set(&vport->disc_trc_cnt, 0);
37396+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
37397
37398 snprintf(name, sizeof(name), "discovery_trace");
37399 vport->debug_disc_trc =
37400diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h
37401--- linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h 2011-03-27 14:31:47.000000000 -0400
37402+++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc.h 2011-05-04 17:56:28.000000000 -0400
37403@@ -400,7 +400,7 @@ struct lpfc_vport {
37404 struct dentry *debug_nodelist;
37405 struct dentry *vport_debugfs_root;
37406 struct lpfc_debugfs_trc *disc_trc;
37407- atomic_t disc_trc_cnt;
37408+ atomic_unchecked_t disc_trc_cnt;
37409 #endif
37410 uint8_t stat_data_enabled;
37411 uint8_t stat_data_blocked;
37412@@ -725,8 +725,8 @@ struct lpfc_hba {
37413 struct timer_list fabric_block_timer;
37414 unsigned long bit_flags;
37415 #define FABRIC_COMANDS_BLOCKED 0
37416- atomic_t num_rsrc_err;
37417- atomic_t num_cmd_success;
37418+ atomic_unchecked_t num_rsrc_err;
37419+ atomic_unchecked_t num_cmd_success;
37420 unsigned long last_rsrc_error_time;
37421 unsigned long last_ramp_down_time;
37422 unsigned long last_ramp_up_time;
37423@@ -740,7 +740,7 @@ struct lpfc_hba {
37424 struct dentry *debug_dumpDif; /* BlockGuard BPL*/
37425 struct dentry *debug_slow_ring_trc;
37426 struct lpfc_debugfs_trc *slow_ring_trc;
37427- atomic_t slow_ring_trc_cnt;
37428+ atomic_unchecked_t slow_ring_trc_cnt;
37429 #endif
37430
37431 /* Used for deferred freeing of ELS data buffers */
37432diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc_init.c linux-2.6.32.43/drivers/scsi/lpfc/lpfc_init.c
37433--- linux-2.6.32.43/drivers/scsi/lpfc/lpfc_init.c 2011-03-27 14:31:47.000000000 -0400
37434+++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc_init.c 2011-08-05 20:33:55.000000000 -0400
37435@@ -8021,8 +8021,10 @@ lpfc_init(void)
37436 printk(LPFC_COPYRIGHT "\n");
37437
37438 if (lpfc_enable_npiv) {
37439- lpfc_transport_functions.vport_create = lpfc_vport_create;
37440- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37441+ pax_open_kernel();
37442+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
37443+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
37444+ pax_close_kernel();
37445 }
37446 lpfc_transport_template =
37447 fc_attach_transport(&lpfc_transport_functions);
37448diff -urNp linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c
37449--- linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c 2011-03-27 14:31:47.000000000 -0400
37450+++ linux-2.6.32.43/drivers/scsi/lpfc/lpfc_scsi.c 2011-05-04 17:56:28.000000000 -0400
37451@@ -259,7 +259,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hb
37452 uint32_t evt_posted;
37453
37454 spin_lock_irqsave(&phba->hbalock, flags);
37455- atomic_inc(&phba->num_rsrc_err);
37456+ atomic_inc_unchecked(&phba->num_rsrc_err);
37457 phba->last_rsrc_error_time = jiffies;
37458
37459 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
37460@@ -300,7 +300,7 @@ lpfc_rampup_queue_depth(struct lpfc_vpor
37461 unsigned long flags;
37462 struct lpfc_hba *phba = vport->phba;
37463 uint32_t evt_posted;
37464- atomic_inc(&phba->num_cmd_success);
37465+ atomic_inc_unchecked(&phba->num_cmd_success);
37466
37467 if (vport->cfg_lun_queue_depth <= queue_depth)
37468 return;
37469@@ -343,8 +343,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37470 int i;
37471 struct lpfc_rport_data *rdata;
37472
37473- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
37474- num_cmd_success = atomic_read(&phba->num_cmd_success);
37475+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
37476+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
37477
37478 vports = lpfc_create_vport_work_array(phba);
37479 if (vports != NULL)
37480@@ -378,8 +378,8 @@ lpfc_ramp_down_queue_handler(struct lpfc
37481 }
37482 }
37483 lpfc_destroy_vport_work_array(phba, vports);
37484- atomic_set(&phba->num_rsrc_err, 0);
37485- atomic_set(&phba->num_cmd_success, 0);
37486+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37487+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37488 }
37489
37490 /**
37491@@ -427,8 +427,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_h
37492 }
37493 }
37494 lpfc_destroy_vport_work_array(phba, vports);
37495- atomic_set(&phba->num_rsrc_err, 0);
37496- atomic_set(&phba->num_cmd_success, 0);
37497+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
37498+ atomic_set_unchecked(&phba->num_cmd_success, 0);
37499 }
37500
37501 /**
37502diff -urNp linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c
37503--- linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c 2011-03-27 14:31:47.000000000 -0400
37504+++ linux-2.6.32.43/drivers/scsi/megaraid/megaraid_mbox.c 2011-05-16 21:46:57.000000000 -0400
37505@@ -3503,6 +3503,8 @@ megaraid_cmm_register(adapter_t *adapter
37506 int rval;
37507 int i;
37508
37509+ pax_track_stack();
37510+
37511 // Allocate memory for the base list of scb for management module.
37512 adapter->uscb_list = kcalloc(MBOX_MAX_USER_CMDS, sizeof(scb_t), GFP_KERNEL);
37513
37514diff -urNp linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c
37515--- linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c 2011-03-27 14:31:47.000000000 -0400
37516+++ linux-2.6.32.43/drivers/scsi/osd/osd_initiator.c 2011-05-16 21:46:57.000000000 -0400
37517@@ -94,6 +94,8 @@ static int _osd_print_system_info(struct
37518 int nelem = ARRAY_SIZE(get_attrs), a = 0;
37519 int ret;
37520
37521+ pax_track_stack();
37522+
37523 or = osd_start_request(od, GFP_KERNEL);
37524 if (!or)
37525 return -ENOMEM;
37526diff -urNp linux-2.6.32.43/drivers/scsi/pmcraid.c linux-2.6.32.43/drivers/scsi/pmcraid.c
37527--- linux-2.6.32.43/drivers/scsi/pmcraid.c 2011-05-10 22:12:01.000000000 -0400
37528+++ linux-2.6.32.43/drivers/scsi/pmcraid.c 2011-05-10 22:12:33.000000000 -0400
37529@@ -189,8 +189,8 @@ static int pmcraid_slave_alloc(struct sc
37530 res->scsi_dev = scsi_dev;
37531 scsi_dev->hostdata = res;
37532 res->change_detected = 0;
37533- atomic_set(&res->read_failures, 0);
37534- atomic_set(&res->write_failures, 0);
37535+ atomic_set_unchecked(&res->read_failures, 0);
37536+ atomic_set_unchecked(&res->write_failures, 0);
37537 rc = 0;
37538 }
37539 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
37540@@ -2396,9 +2396,9 @@ static int pmcraid_error_handler(struct
37541
37542 /* If this was a SCSI read/write command keep count of errors */
37543 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
37544- atomic_inc(&res->read_failures);
37545+ atomic_inc_unchecked(&res->read_failures);
37546 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
37547- atomic_inc(&res->write_failures);
37548+ atomic_inc_unchecked(&res->write_failures);
37549
37550 if (!RES_IS_GSCSI(res->cfg_entry) &&
37551 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
37552@@ -4113,7 +4113,7 @@ static void pmcraid_worker_function(stru
37553
37554 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
37555 /* add resources only after host is added into system */
37556- if (!atomic_read(&pinstance->expose_resources))
37557+ if (!atomic_read_unchecked(&pinstance->expose_resources))
37558 return;
37559
37560 spin_lock_irqsave(&pinstance->resource_lock, lock_flags);
37561@@ -4847,7 +4847,7 @@ static int __devinit pmcraid_init_instan
37562 init_waitqueue_head(&pinstance->reset_wait_q);
37563
37564 atomic_set(&pinstance->outstanding_cmds, 0);
37565- atomic_set(&pinstance->expose_resources, 0);
37566+ atomic_set_unchecked(&pinstance->expose_resources, 0);
37567
37568 INIT_LIST_HEAD(&pinstance->free_res_q);
37569 INIT_LIST_HEAD(&pinstance->used_res_q);
37570@@ -5499,7 +5499,7 @@ static int __devinit pmcraid_probe(
37571 /* Schedule worker thread to handle CCN and take care of adding and
37572 * removing devices to OS
37573 */
37574- atomic_set(&pinstance->expose_resources, 1);
37575+ atomic_set_unchecked(&pinstance->expose_resources, 1);
37576 schedule_work(&pinstance->worker_q);
37577 return rc;
37578
37579diff -urNp linux-2.6.32.43/drivers/scsi/pmcraid.h linux-2.6.32.43/drivers/scsi/pmcraid.h
37580--- linux-2.6.32.43/drivers/scsi/pmcraid.h 2011-03-27 14:31:47.000000000 -0400
37581+++ linux-2.6.32.43/drivers/scsi/pmcraid.h 2011-05-04 17:56:28.000000000 -0400
37582@@ -690,7 +690,7 @@ struct pmcraid_instance {
37583 atomic_t outstanding_cmds;
37584
37585 /* should add/delete resources to mid-layer now ?*/
37586- atomic_t expose_resources;
37587+ atomic_unchecked_t expose_resources;
37588
37589 /* Tasklet to handle deferred processing */
37590 struct tasklet_struct isr_tasklet[PMCRAID_NUM_MSIX_VECTORS];
37591@@ -727,8 +727,8 @@ struct pmcraid_resource_entry {
37592 struct list_head queue; /* link to "to be exposed" resources */
37593 struct pmcraid_config_table_entry cfg_entry;
37594 struct scsi_device *scsi_dev; /* Link scsi_device structure */
37595- atomic_t read_failures; /* count of failed READ commands */
37596- atomic_t write_failures; /* count of failed WRITE commands */
37597+ atomic_unchecked_t read_failures; /* count of failed READ commands */
37598+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
37599
37600 /* To indicate add/delete/modify during CCN */
37601 u8 change_detected;
37602diff -urNp linux-2.6.32.43/drivers/scsi/qla2xxx/qla_def.h linux-2.6.32.43/drivers/scsi/qla2xxx/qla_def.h
37603--- linux-2.6.32.43/drivers/scsi/qla2xxx/qla_def.h 2011-03-27 14:31:47.000000000 -0400
37604+++ linux-2.6.32.43/drivers/scsi/qla2xxx/qla_def.h 2011-08-05 20:33:55.000000000 -0400
37605@@ -2089,7 +2089,7 @@ struct isp_operations {
37606
37607 int (*get_flash_version) (struct scsi_qla_host *, void *);
37608 int (*start_scsi) (srb_t *);
37609-};
37610+} __no_const;
37611
37612 /* MSI-X Support *************************************************************/
37613
37614diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h
37615--- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h 2011-03-27 14:31:47.000000000 -0400
37616+++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_def.h 2011-05-04 17:56:28.000000000 -0400
37617@@ -240,7 +240,7 @@ struct ddb_entry {
37618 atomic_t retry_relogin_timer; /* Min Time between relogins
37619 * (4000 only) */
37620 atomic_t relogin_timer; /* Max Time to wait for relogin to complete */
37621- atomic_t relogin_retry_count; /* Num of times relogin has been
37622+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
37623 * retried */
37624
37625 uint16_t port;
37626diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c
37627--- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c 2011-03-27 14:31:47.000000000 -0400
37628+++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_init.c 2011-05-04 17:56:28.000000000 -0400
37629@@ -482,7 +482,7 @@ static struct ddb_entry * qla4xxx_alloc_
37630 atomic_set(&ddb_entry->port_down_timer, ha->port_down_retry_count);
37631 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
37632 atomic_set(&ddb_entry->relogin_timer, 0);
37633- atomic_set(&ddb_entry->relogin_retry_count, 0);
37634+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37635 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37636 list_add_tail(&ddb_entry->list, &ha->ddb_list);
37637 ha->fw_ddb_index_map[fw_ddb_index] = ddb_entry;
37638@@ -1308,7 +1308,7 @@ int qla4xxx_process_ddb_changed(struct s
37639 atomic_set(&ddb_entry->state, DDB_STATE_ONLINE);
37640 atomic_set(&ddb_entry->port_down_timer,
37641 ha->port_down_retry_count);
37642- atomic_set(&ddb_entry->relogin_retry_count, 0);
37643+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
37644 atomic_set(&ddb_entry->relogin_timer, 0);
37645 clear_bit(DF_RELOGIN, &ddb_entry->flags);
37646 clear_bit(DF_NO_RELOGIN, &ddb_entry->flags);
37647diff -urNp linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c
37648--- linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c 2011-03-27 14:31:47.000000000 -0400
37649+++ linux-2.6.32.43/drivers/scsi/qla4xxx/ql4_os.c 2011-05-04 17:56:28.000000000 -0400
37650@@ -641,13 +641,13 @@ static void qla4xxx_timer(struct scsi_ql
37651 ddb_entry->fw_ddb_device_state ==
37652 DDB_DS_SESSION_FAILED) {
37653 /* Reset retry relogin timer */
37654- atomic_inc(&ddb_entry->relogin_retry_count);
37655+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
37656 DEBUG2(printk("scsi%ld: index[%d] relogin"
37657 " timed out-retrying"
37658 " relogin (%d)\n",
37659 ha->host_no,
37660 ddb_entry->fw_ddb_index,
37661- atomic_read(&ddb_entry->
37662+ atomic_read_unchecked(&ddb_entry->
37663 relogin_retry_count))
37664 );
37665 start_dpc++;
37666diff -urNp linux-2.6.32.43/drivers/scsi/scsi.c linux-2.6.32.43/drivers/scsi/scsi.c
37667--- linux-2.6.32.43/drivers/scsi/scsi.c 2011-03-27 14:31:47.000000000 -0400
37668+++ linux-2.6.32.43/drivers/scsi/scsi.c 2011-05-04 17:56:28.000000000 -0400
37669@@ -652,7 +652,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *
37670 unsigned long timeout;
37671 int rtn = 0;
37672
37673- atomic_inc(&cmd->device->iorequest_cnt);
37674+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37675
37676 /* check if the device is still usable */
37677 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
37678diff -urNp linux-2.6.32.43/drivers/scsi/scsi_debug.c linux-2.6.32.43/drivers/scsi/scsi_debug.c
37679--- linux-2.6.32.43/drivers/scsi/scsi_debug.c 2011-03-27 14:31:47.000000000 -0400
37680+++ linux-2.6.32.43/drivers/scsi/scsi_debug.c 2011-05-16 21:46:57.000000000 -0400
37681@@ -1395,6 +1395,8 @@ static int resp_mode_select(struct scsi_
37682 unsigned char arr[SDEBUG_MAX_MSELECT_SZ];
37683 unsigned char *cmd = (unsigned char *)scp->cmnd;
37684
37685+ pax_track_stack();
37686+
37687 if ((errsts = check_readiness(scp, 1, devip)))
37688 return errsts;
37689 memset(arr, 0, sizeof(arr));
37690@@ -1492,6 +1494,8 @@ static int resp_log_sense(struct scsi_cm
37691 unsigned char arr[SDEBUG_MAX_LSENSE_SZ];
37692 unsigned char *cmd = (unsigned char *)scp->cmnd;
37693
37694+ pax_track_stack();
37695+
37696 if ((errsts = check_readiness(scp, 1, devip)))
37697 return errsts;
37698 memset(arr, 0, sizeof(arr));
37699diff -urNp linux-2.6.32.43/drivers/scsi/scsi_lib.c linux-2.6.32.43/drivers/scsi/scsi_lib.c
37700--- linux-2.6.32.43/drivers/scsi/scsi_lib.c 2011-05-10 22:12:01.000000000 -0400
37701+++ linux-2.6.32.43/drivers/scsi/scsi_lib.c 2011-05-10 22:12:33.000000000 -0400
37702@@ -1384,7 +1384,7 @@ static void scsi_kill_request(struct req
37703
37704 scsi_init_cmd_errh(cmd);
37705 cmd->result = DID_NO_CONNECT << 16;
37706- atomic_inc(&cmd->device->iorequest_cnt);
37707+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
37708
37709 /*
37710 * SCSI request completion path will do scsi_device_unbusy(),
37711@@ -1415,9 +1415,9 @@ static void scsi_softirq_done(struct req
37712 */
37713 cmd->serial_number = 0;
37714
37715- atomic_inc(&cmd->device->iodone_cnt);
37716+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
37717 if (cmd->result)
37718- atomic_inc(&cmd->device->ioerr_cnt);
37719+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
37720
37721 disposition = scsi_decide_disposition(cmd);
37722 if (disposition != SUCCESS &&
37723diff -urNp linux-2.6.32.43/drivers/scsi/scsi_sysfs.c linux-2.6.32.43/drivers/scsi/scsi_sysfs.c
37724--- linux-2.6.32.43/drivers/scsi/scsi_sysfs.c 2011-06-25 12:55:34.000000000 -0400
37725+++ linux-2.6.32.43/drivers/scsi/scsi_sysfs.c 2011-06-25 12:56:37.000000000 -0400
37726@@ -662,7 +662,7 @@ show_iostat_##field(struct device *dev,
37727 char *buf) \
37728 { \
37729 struct scsi_device *sdev = to_scsi_device(dev); \
37730- unsigned long long count = atomic_read(&sdev->field); \
37731+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
37732 return snprintf(buf, 20, "0x%llx\n", count); \
37733 } \
37734 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
37735diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c
37736--- linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c 2011-03-27 14:31:47.000000000 -0400
37737+++ linux-2.6.32.43/drivers/scsi/scsi_transport_fc.c 2011-05-04 17:56:28.000000000 -0400
37738@@ -480,7 +480,7 @@ MODULE_PARM_DESC(dev_loss_tmo,
37739 * Netlink Infrastructure
37740 */
37741
37742-static atomic_t fc_event_seq;
37743+static atomic_unchecked_t fc_event_seq;
37744
37745 /**
37746 * fc_get_event_number - Obtain the next sequential FC event number
37747@@ -493,7 +493,7 @@ static atomic_t fc_event_seq;
37748 u32
37749 fc_get_event_number(void)
37750 {
37751- return atomic_add_return(1, &fc_event_seq);
37752+ return atomic_add_return_unchecked(1, &fc_event_seq);
37753 }
37754 EXPORT_SYMBOL(fc_get_event_number);
37755
37756@@ -641,7 +641,7 @@ static __init int fc_transport_init(void
37757 {
37758 int error;
37759
37760- atomic_set(&fc_event_seq, 0);
37761+ atomic_set_unchecked(&fc_event_seq, 0);
37762
37763 error = transport_class_register(&fc_host_class);
37764 if (error)
37765diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c
37766--- linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c 2011-03-27 14:31:47.000000000 -0400
37767+++ linux-2.6.32.43/drivers/scsi/scsi_transport_iscsi.c 2011-05-04 17:56:28.000000000 -0400
37768@@ -81,7 +81,7 @@ struct iscsi_internal {
37769 struct device_attribute *session_attrs[ISCSI_SESSION_ATTRS + 1];
37770 };
37771
37772-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
37773+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
37774 static struct workqueue_struct *iscsi_eh_timer_workq;
37775
37776 /*
37777@@ -728,7 +728,7 @@ int iscsi_add_session(struct iscsi_cls_s
37778 int err;
37779
37780 ihost = shost->shost_data;
37781- session->sid = atomic_add_return(1, &iscsi_session_nr);
37782+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
37783
37784 if (id == ISCSI_MAX_TARGET) {
37785 for (id = 0; id < ISCSI_MAX_TARGET; id++) {
37786@@ -2060,7 +2060,7 @@ static __init int iscsi_transport_init(v
37787 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
37788 ISCSI_TRANSPORT_VERSION);
37789
37790- atomic_set(&iscsi_session_nr, 0);
37791+ atomic_set_unchecked(&iscsi_session_nr, 0);
37792
37793 err = class_register(&iscsi_transport_class);
37794 if (err)
37795diff -urNp linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c
37796--- linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c 2011-03-27 14:31:47.000000000 -0400
37797+++ linux-2.6.32.43/drivers/scsi/scsi_transport_srp.c 2011-05-04 17:56:28.000000000 -0400
37798@@ -33,7 +33,7 @@
37799 #include "scsi_transport_srp_internal.h"
37800
37801 struct srp_host_attrs {
37802- atomic_t next_port_id;
37803+ atomic_unchecked_t next_port_id;
37804 };
37805 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
37806
37807@@ -62,7 +62,7 @@ static int srp_host_setup(struct transpo
37808 struct Scsi_Host *shost = dev_to_shost(dev);
37809 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
37810
37811- atomic_set(&srp_host->next_port_id, 0);
37812+ atomic_set_unchecked(&srp_host->next_port_id, 0);
37813 return 0;
37814 }
37815
37816@@ -211,7 +211,7 @@ struct srp_rport *srp_rport_add(struct S
37817 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
37818 rport->roles = ids->roles;
37819
37820- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
37821+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
37822 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
37823
37824 transport_setup_device(&rport->dev);
37825diff -urNp linux-2.6.32.43/drivers/scsi/sg.c linux-2.6.32.43/drivers/scsi/sg.c
37826--- linux-2.6.32.43/drivers/scsi/sg.c 2011-03-27 14:31:47.000000000 -0400
37827+++ linux-2.6.32.43/drivers/scsi/sg.c 2011-04-17 15:56:46.000000000 -0400
37828@@ -2292,7 +2292,7 @@ struct sg_proc_leaf {
37829 const struct file_operations * fops;
37830 };
37831
37832-static struct sg_proc_leaf sg_proc_leaf_arr[] = {
37833+static const struct sg_proc_leaf sg_proc_leaf_arr[] = {
37834 {"allow_dio", &adio_fops},
37835 {"debug", &debug_fops},
37836 {"def_reserved_size", &dressz_fops},
37837@@ -2307,7 +2307,7 @@ sg_proc_init(void)
37838 {
37839 int k, mask;
37840 int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr);
37841- struct sg_proc_leaf * leaf;
37842+ const struct sg_proc_leaf * leaf;
37843
37844 sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL);
37845 if (!sg_proc_sgp)
37846diff -urNp linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c
37847--- linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-03-27 14:31:47.000000000 -0400
37848+++ linux-2.6.32.43/drivers/scsi/sym53c8xx_2/sym_glue.c 2011-05-16 21:46:57.000000000 -0400
37849@@ -1754,6 +1754,8 @@ static int __devinit sym2_probe(struct p
37850 int do_iounmap = 0;
37851 int do_disable_device = 1;
37852
37853+ pax_track_stack();
37854+
37855 memset(&sym_dev, 0, sizeof(sym_dev));
37856 memset(&nvram, 0, sizeof(nvram));
37857 sym_dev.pdev = pdev;
37858diff -urNp linux-2.6.32.43/drivers/serial/kgdboc.c linux-2.6.32.43/drivers/serial/kgdboc.c
37859--- linux-2.6.32.43/drivers/serial/kgdboc.c 2011-03-27 14:31:47.000000000 -0400
37860+++ linux-2.6.32.43/drivers/serial/kgdboc.c 2011-04-17 15:56:46.000000000 -0400
37861@@ -18,7 +18,7 @@
37862
37863 #define MAX_CONFIG_LEN 40
37864
37865-static struct kgdb_io kgdboc_io_ops;
37866+static const struct kgdb_io kgdboc_io_ops;
37867
37868 /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
37869 static int configured = -1;
37870@@ -154,7 +154,7 @@ static void kgdboc_post_exp_handler(void
37871 module_put(THIS_MODULE);
37872 }
37873
37874-static struct kgdb_io kgdboc_io_ops = {
37875+static const struct kgdb_io kgdboc_io_ops = {
37876 .name = "kgdboc",
37877 .read_char = kgdboc_get_char,
37878 .write_char = kgdboc_put_char,
37879diff -urNp linux-2.6.32.43/drivers/spi/spi.c linux-2.6.32.43/drivers/spi/spi.c
37880--- linux-2.6.32.43/drivers/spi/spi.c 2011-03-27 14:31:47.000000000 -0400
37881+++ linux-2.6.32.43/drivers/spi/spi.c 2011-05-04 17:56:28.000000000 -0400
37882@@ -774,7 +774,7 @@ int spi_sync(struct spi_device *spi, str
37883 EXPORT_SYMBOL_GPL(spi_sync);
37884
37885 /* portable code must never pass more than 32 bytes */
37886-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
37887+#define SPI_BUFSIZ max(32U,SMP_CACHE_BYTES)
37888
37889 static u8 *buf;
37890
37891diff -urNp linux-2.6.32.43/drivers/ssb/driver_gige.c linux-2.6.32.43/drivers/ssb/driver_gige.c
37892--- linux-2.6.32.43/drivers/ssb/driver_gige.c 2011-03-27 14:31:47.000000000 -0400
37893+++ linux-2.6.32.43/drivers/ssb/driver_gige.c 2011-08-05 20:33:55.000000000 -0400
37894@@ -180,8 +180,8 @@ static int ssb_gige_probe(struct ssb_dev
37895 dev->pci_controller.io_resource = &dev->io_resource;
37896 dev->pci_controller.mem_resource = &dev->mem_resource;
37897 dev->pci_controller.io_map_base = 0x800;
37898- dev->pci_ops.read = ssb_gige_pci_read_config;
37899- dev->pci_ops.write = ssb_gige_pci_write_config;
37900+ *(void **)&dev->pci_ops.read = ssb_gige_pci_read_config;
37901+ *(void **)&dev->pci_ops.write = ssb_gige_pci_write_config;
37902
37903 dev->io_resource.name = SSB_GIGE_IO_RES_NAME;
37904 dev->io_resource.start = 0x800;
37905diff -urNp linux-2.6.32.43/drivers/staging/android/binder.c linux-2.6.32.43/drivers/staging/android/binder.c
37906--- linux-2.6.32.43/drivers/staging/android/binder.c 2011-03-27 14:31:47.000000000 -0400
37907+++ linux-2.6.32.43/drivers/staging/android/binder.c 2011-04-17 15:56:46.000000000 -0400
37908@@ -2756,7 +2756,7 @@ static void binder_vma_close(struct vm_a
37909 binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
37910 }
37911
37912-static struct vm_operations_struct binder_vm_ops = {
37913+static const struct vm_operations_struct binder_vm_ops = {
37914 .open = binder_vma_open,
37915 .close = binder_vma_close,
37916 };
37917diff -urNp linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c
37918--- linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c 2011-03-27 14:31:47.000000000 -0400
37919+++ linux-2.6.32.43/drivers/staging/b3dfg/b3dfg.c 2011-04-17 15:56:46.000000000 -0400
37920@@ -455,7 +455,7 @@ static int b3dfg_vma_fault(struct vm_are
37921 return VM_FAULT_NOPAGE;
37922 }
37923
37924-static struct vm_operations_struct b3dfg_vm_ops = {
37925+static const struct vm_operations_struct b3dfg_vm_ops = {
37926 .fault = b3dfg_vma_fault,
37927 };
37928
37929@@ -848,7 +848,7 @@ static int b3dfg_mmap(struct file *filp,
37930 return r;
37931 }
37932
37933-static struct file_operations b3dfg_fops = {
37934+static const struct file_operations b3dfg_fops = {
37935 .owner = THIS_MODULE,
37936 .open = b3dfg_open,
37937 .release = b3dfg_release,
37938diff -urNp linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c
37939--- linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c 2011-03-27 14:31:47.000000000 -0400
37940+++ linux-2.6.32.43/drivers/staging/comedi/comedi_fops.c 2011-04-17 15:56:46.000000000 -0400
37941@@ -1389,7 +1389,7 @@ void comedi_unmap(struct vm_area_struct
37942 mutex_unlock(&dev->mutex);
37943 }
37944
37945-static struct vm_operations_struct comedi_vm_ops = {
37946+static const struct vm_operations_struct comedi_vm_ops = {
37947 .close = comedi_unmap,
37948 };
37949
37950diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c
37951--- linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c 2011-03-27 14:31:47.000000000 -0400
37952+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/adsp_driver.c 2011-04-17 15:56:46.000000000 -0400
37953@@ -576,7 +576,7 @@ static struct adsp_device *inode_to_devi
37954 static dev_t adsp_devno;
37955 static struct class *adsp_class;
37956
37957-static struct file_operations adsp_fops = {
37958+static const struct file_operations adsp_fops = {
37959 .owner = THIS_MODULE,
37960 .open = adsp_open,
37961 .unlocked_ioctl = adsp_ioctl,
37962diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c
37963--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c 2011-03-27 14:31:47.000000000 -0400
37964+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_aac.c 2011-04-17 15:56:46.000000000 -0400
37965@@ -1022,7 +1022,7 @@ done:
37966 return rc;
37967 }
37968
37969-static struct file_operations audio_aac_fops = {
37970+static const struct file_operations audio_aac_fops = {
37971 .owner = THIS_MODULE,
37972 .open = audio_open,
37973 .release = audio_release,
37974diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c
37975--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-03-27 14:31:47.000000000 -0400
37976+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_amrnb.c 2011-04-17 15:56:46.000000000 -0400
37977@@ -833,7 +833,7 @@ done:
37978 return rc;
37979 }
37980
37981-static struct file_operations audio_amrnb_fops = {
37982+static const struct file_operations audio_amrnb_fops = {
37983 .owner = THIS_MODULE,
37984 .open = audamrnb_open,
37985 .release = audamrnb_release,
37986diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c
37987--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c 2011-03-27 14:31:47.000000000 -0400
37988+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_evrc.c 2011-04-17 15:56:46.000000000 -0400
37989@@ -805,7 +805,7 @@ dma_fail:
37990 return rc;
37991 }
37992
37993-static struct file_operations audio_evrc_fops = {
37994+static const struct file_operations audio_evrc_fops = {
37995 .owner = THIS_MODULE,
37996 .open = audevrc_open,
37997 .release = audevrc_release,
37998diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c
37999--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c 2011-03-27 14:31:47.000000000 -0400
38000+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_in.c 2011-04-17 15:56:46.000000000 -0400
38001@@ -913,7 +913,7 @@ static int audpre_open(struct inode *ino
38002 return 0;
38003 }
38004
38005-static struct file_operations audio_fops = {
38006+static const struct file_operations audio_fops = {
38007 .owner = THIS_MODULE,
38008 .open = audio_in_open,
38009 .release = audio_in_release,
38010@@ -922,7 +922,7 @@ static struct file_operations audio_fops
38011 .unlocked_ioctl = audio_in_ioctl,
38012 };
38013
38014-static struct file_operations audpre_fops = {
38015+static const struct file_operations audpre_fops = {
38016 .owner = THIS_MODULE,
38017 .open = audpre_open,
38018 .unlocked_ioctl = audpre_ioctl,
38019diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c
38020--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c 2011-03-27 14:31:47.000000000 -0400
38021+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_mp3.c 2011-04-17 15:56:46.000000000 -0400
38022@@ -941,7 +941,7 @@ done:
38023 return rc;
38024 }
38025
38026-static struct file_operations audio_mp3_fops = {
38027+static const struct file_operations audio_mp3_fops = {
38028 .owner = THIS_MODULE,
38029 .open = audio_open,
38030 .release = audio_release,
38031diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c
38032--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c 2011-03-27 14:31:47.000000000 -0400
38033+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_out.c 2011-04-17 15:56:46.000000000 -0400
38034@@ -810,7 +810,7 @@ static int audpp_open(struct inode *inod
38035 return 0;
38036 }
38037
38038-static struct file_operations audio_fops = {
38039+static const struct file_operations audio_fops = {
38040 .owner = THIS_MODULE,
38041 .open = audio_open,
38042 .release = audio_release,
38043@@ -819,7 +819,7 @@ static struct file_operations audio_fops
38044 .unlocked_ioctl = audio_ioctl,
38045 };
38046
38047-static struct file_operations audpp_fops = {
38048+static const struct file_operations audpp_fops = {
38049 .owner = THIS_MODULE,
38050 .open = audpp_open,
38051 .unlocked_ioctl = audpp_ioctl,
38052diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c
38053--- linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-03-27 14:31:47.000000000 -0400
38054+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/audio_qcelp.c 2011-04-17 15:56:46.000000000 -0400
38055@@ -816,7 +816,7 @@ err:
38056 return rc;
38057 }
38058
38059-static struct file_operations audio_qcelp_fops = {
38060+static const struct file_operations audio_qcelp_fops = {
38061 .owner = THIS_MODULE,
38062 .open = audqcelp_open,
38063 .release = audqcelp_release,
38064diff -urNp linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c
38065--- linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c 2011-03-27 14:31:47.000000000 -0400
38066+++ linux-2.6.32.43/drivers/staging/dream/qdsp5/snd.c 2011-04-17 15:56:46.000000000 -0400
38067@@ -242,7 +242,7 @@ err:
38068 return rc;
38069 }
38070
38071-static struct file_operations snd_fops = {
38072+static const struct file_operations snd_fops = {
38073 .owner = THIS_MODULE,
38074 .open = snd_open,
38075 .release = snd_release,
38076diff -urNp linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c
38077--- linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c 2011-03-27 14:31:47.000000000 -0400
38078+++ linux-2.6.32.43/drivers/staging/dream/smd/smd_qmi.c 2011-04-17 15:56:46.000000000 -0400
38079@@ -793,7 +793,7 @@ static int qmi_release(struct inode *ip,
38080 return 0;
38081 }
38082
38083-static struct file_operations qmi_fops = {
38084+static const struct file_operations qmi_fops = {
38085 .owner = THIS_MODULE,
38086 .read = qmi_read,
38087 .write = qmi_write,
38088diff -urNp linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c
38089--- linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-03-27 14:31:47.000000000 -0400
38090+++ linux-2.6.32.43/drivers/staging/dream/smd/smd_rpcrouter_device.c 2011-04-17 15:56:46.000000000 -0400
38091@@ -214,7 +214,7 @@ static long rpcrouter_ioctl(struct file
38092 return rc;
38093 }
38094
38095-static struct file_operations rpcrouter_server_fops = {
38096+static const struct file_operations rpcrouter_server_fops = {
38097 .owner = THIS_MODULE,
38098 .open = rpcrouter_open,
38099 .release = rpcrouter_release,
38100@@ -224,7 +224,7 @@ static struct file_operations rpcrouter_
38101 .unlocked_ioctl = rpcrouter_ioctl,
38102 };
38103
38104-static struct file_operations rpcrouter_router_fops = {
38105+static const struct file_operations rpcrouter_router_fops = {
38106 .owner = THIS_MODULE,
38107 .open = rpcrouter_open,
38108 .release = rpcrouter_release,
38109diff -urNp linux-2.6.32.43/drivers/staging/dst/dcore.c linux-2.6.32.43/drivers/staging/dst/dcore.c
38110--- linux-2.6.32.43/drivers/staging/dst/dcore.c 2011-03-27 14:31:47.000000000 -0400
38111+++ linux-2.6.32.43/drivers/staging/dst/dcore.c 2011-04-17 15:56:46.000000000 -0400
38112@@ -149,7 +149,7 @@ static int dst_bdev_release(struct gendi
38113 return 0;
38114 }
38115
38116-static struct block_device_operations dst_blk_ops = {
38117+static const struct block_device_operations dst_blk_ops = {
38118 .open = dst_bdev_open,
38119 .release = dst_bdev_release,
38120 .owner = THIS_MODULE,
38121@@ -588,7 +588,7 @@ static struct dst_node *dst_alloc_node(s
38122 n->size = ctl->size;
38123
38124 atomic_set(&n->refcnt, 1);
38125- atomic_long_set(&n->gen, 0);
38126+ atomic_long_set_unchecked(&n->gen, 0);
38127 snprintf(n->name, sizeof(n->name), "%s", ctl->name);
38128
38129 err = dst_node_sysfs_init(n);
38130diff -urNp linux-2.6.32.43/drivers/staging/dst/trans.c linux-2.6.32.43/drivers/staging/dst/trans.c
38131--- linux-2.6.32.43/drivers/staging/dst/trans.c 2011-03-27 14:31:47.000000000 -0400
38132+++ linux-2.6.32.43/drivers/staging/dst/trans.c 2011-04-17 15:56:46.000000000 -0400
38133@@ -169,7 +169,7 @@ int dst_process_bio(struct dst_node *n,
38134 t->error = 0;
38135 t->retries = 0;
38136 atomic_set(&t->refcnt, 1);
38137- t->gen = atomic_long_inc_return(&n->gen);
38138+ t->gen = atomic_long_inc_return_unchecked(&n->gen);
38139
38140 t->enc = bio_data_dir(bio);
38141 dst_bio_to_cmd(bio, &t->cmd, DST_IO, t->gen);
38142diff -urNp linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c
38143--- linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c 2011-03-27 14:31:47.000000000 -0400
38144+++ linux-2.6.32.43/drivers/staging/et131x/et1310_tx.c 2011-05-04 17:56:28.000000000 -0400
38145@@ -710,11 +710,11 @@ inline void et131x_free_send_packet(stru
38146 struct net_device_stats *stats = &etdev->net_stats;
38147
38148 if (pMpTcb->Flags & fMP_DEST_BROAD)
38149- atomic_inc(&etdev->Stats.brdcstxmt);
38150+ atomic_inc_unchecked(&etdev->Stats.brdcstxmt);
38151 else if (pMpTcb->Flags & fMP_DEST_MULTI)
38152- atomic_inc(&etdev->Stats.multixmt);
38153+ atomic_inc_unchecked(&etdev->Stats.multixmt);
38154 else
38155- atomic_inc(&etdev->Stats.unixmt);
38156+ atomic_inc_unchecked(&etdev->Stats.unixmt);
38157
38158 if (pMpTcb->Packet) {
38159 stats->tx_bytes += pMpTcb->Packet->len;
38160diff -urNp linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h
38161--- linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h 2011-03-27 14:31:47.000000000 -0400
38162+++ linux-2.6.32.43/drivers/staging/et131x/et131x_adapter.h 2011-05-04 17:56:28.000000000 -0400
38163@@ -145,11 +145,11 @@ typedef struct _ce_stats_t {
38164 * operations
38165 */
38166 u32 unircv; /* # multicast packets received */
38167- atomic_t unixmt; /* # multicast packets for Tx */
38168+ atomic_unchecked_t unixmt; /* # multicast packets for Tx */
38169 u32 multircv; /* # multicast packets received */
38170- atomic_t multixmt; /* # multicast packets for Tx */
38171+ atomic_unchecked_t multixmt; /* # multicast packets for Tx */
38172 u32 brdcstrcv; /* # broadcast packets received */
38173- atomic_t brdcstxmt; /* # broadcast packets for Tx */
38174+ atomic_unchecked_t brdcstxmt; /* # broadcast packets for Tx */
38175 u32 norcvbuf; /* # Rx packets discarded */
38176 u32 noxmtbuf; /* # Tx packets discarded */
38177
38178diff -urNp linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c
38179--- linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c 2011-03-27 14:31:47.000000000 -0400
38180+++ linux-2.6.32.43/drivers/staging/go7007/go7007-v4l2.c 2011-04-17 15:56:46.000000000 -0400
38181@@ -1700,7 +1700,7 @@ static int go7007_vm_fault(struct vm_are
38182 return 0;
38183 }
38184
38185-static struct vm_operations_struct go7007_vm_ops = {
38186+static const struct vm_operations_struct go7007_vm_ops = {
38187 .open = go7007_vm_open,
38188 .close = go7007_vm_close,
38189 .fault = go7007_vm_fault,
38190diff -urNp linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c
38191--- linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c 2011-03-27 14:31:47.000000000 -0400
38192+++ linux-2.6.32.43/drivers/staging/hv/blkvsc_drv.c 2011-04-17 15:56:46.000000000 -0400
38193@@ -153,7 +153,7 @@ static int blkvsc_ringbuffer_size = BLKV
38194 /* The one and only one */
38195 static struct blkvsc_driver_context g_blkvsc_drv;
38196
38197-static struct block_device_operations block_ops = {
38198+static const struct block_device_operations block_ops = {
38199 .owner = THIS_MODULE,
38200 .open = blkvsc_open,
38201 .release = blkvsc_release,
38202diff -urNp linux-2.6.32.43/drivers/staging/hv/Channel.c linux-2.6.32.43/drivers/staging/hv/Channel.c
38203--- linux-2.6.32.43/drivers/staging/hv/Channel.c 2011-04-17 17:00:52.000000000 -0400
38204+++ linux-2.6.32.43/drivers/staging/hv/Channel.c 2011-05-04 17:56:28.000000000 -0400
38205@@ -464,8 +464,8 @@ int VmbusChannelEstablishGpadl(struct vm
38206
38207 DPRINT_ENTER(VMBUS);
38208
38209- nextGpadlHandle = atomic_read(&gVmbusConnection.NextGpadlHandle);
38210- atomic_inc(&gVmbusConnection.NextGpadlHandle);
38211+ nextGpadlHandle = atomic_read_unchecked(&gVmbusConnection.NextGpadlHandle);
38212+ atomic_inc_unchecked(&gVmbusConnection.NextGpadlHandle);
38213
38214 VmbusChannelCreateGpadlHeader(Kbuffer, Size, &msgInfo, &msgCount);
38215 ASSERT(msgInfo != NULL);
38216diff -urNp linux-2.6.32.43/drivers/staging/hv/Hv.c linux-2.6.32.43/drivers/staging/hv/Hv.c
38217--- linux-2.6.32.43/drivers/staging/hv/Hv.c 2011-03-27 14:31:47.000000000 -0400
38218+++ linux-2.6.32.43/drivers/staging/hv/Hv.c 2011-04-17 15:56:46.000000000 -0400
38219@@ -161,7 +161,7 @@ static u64 HvDoHypercall(u64 Control, vo
38220 u64 outputAddress = (Output) ? virt_to_phys(Output) : 0;
38221 u32 outputAddressHi = outputAddress >> 32;
38222 u32 outputAddressLo = outputAddress & 0xFFFFFFFF;
38223- volatile void *hypercallPage = gHvContext.HypercallPage;
38224+ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage);
38225
38226 DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>",
38227 Control, Input, Output);
38228diff -urNp linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c
38229--- linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c 2011-03-27 14:31:47.000000000 -0400
38230+++ linux-2.6.32.43/drivers/staging/hv/vmbus_drv.c 2011-05-04 17:56:28.000000000 -0400
38231@@ -532,7 +532,7 @@ static int vmbus_child_device_register(s
38232 to_device_context(root_device_obj);
38233 struct device_context *child_device_ctx =
38234 to_device_context(child_device_obj);
38235- static atomic_t device_num = ATOMIC_INIT(0);
38236+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
38237
38238 DPRINT_ENTER(VMBUS_DRV);
38239
38240@@ -541,7 +541,7 @@ static int vmbus_child_device_register(s
38241
38242 /* Set the device name. Otherwise, device_register() will fail. */
38243 dev_set_name(&child_device_ctx->device, "vmbus_0_%d",
38244- atomic_inc_return(&device_num));
38245+ atomic_inc_return_unchecked(&device_num));
38246
38247 /* The new device belongs to this bus */
38248 child_device_ctx->device.bus = &g_vmbus_drv.bus; /* device->dev.bus; */
38249diff -urNp linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h
38250--- linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h 2011-04-17 17:00:52.000000000 -0400
38251+++ linux-2.6.32.43/drivers/staging/hv/VmbusPrivate.h 2011-05-04 17:56:28.000000000 -0400
38252@@ -59,7 +59,7 @@ enum VMBUS_CONNECT_STATE {
38253 struct VMBUS_CONNECTION {
38254 enum VMBUS_CONNECT_STATE ConnectState;
38255
38256- atomic_t NextGpadlHandle;
38257+ atomic_unchecked_t NextGpadlHandle;
38258
38259 /*
38260 * Represents channel interrupts. Each bit position represents a
38261diff -urNp linux-2.6.32.43/drivers/staging/octeon/ethernet.c linux-2.6.32.43/drivers/staging/octeon/ethernet.c
38262--- linux-2.6.32.43/drivers/staging/octeon/ethernet.c 2011-03-27 14:31:47.000000000 -0400
38263+++ linux-2.6.32.43/drivers/staging/octeon/ethernet.c 2011-05-04 17:56:28.000000000 -0400
38264@@ -294,11 +294,11 @@ static struct net_device_stats *cvm_oct_
38265 * since the RX tasklet also increments it.
38266 */
38267 #ifdef CONFIG_64BIT
38268- atomic64_add(rx_status.dropped_packets,
38269- (atomic64_t *)&priv->stats.rx_dropped);
38270+ atomic64_add_unchecked(rx_status.dropped_packets,
38271+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
38272 #else
38273- atomic_add(rx_status.dropped_packets,
38274- (atomic_t *)&priv->stats.rx_dropped);
38275+ atomic_add_unchecked(rx_status.dropped_packets,
38276+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
38277 #endif
38278 }
38279
38280diff -urNp linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c
38281--- linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c 2011-03-27 14:31:47.000000000 -0400
38282+++ linux-2.6.32.43/drivers/staging/octeon/ethernet-rx.c 2011-05-04 17:56:28.000000000 -0400
38283@@ -406,11 +406,11 @@ void cvm_oct_tasklet_rx(unsigned long un
38284 /* Increment RX stats for virtual ports */
38285 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
38286 #ifdef CONFIG_64BIT
38287- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
38288- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
38289+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
38290+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
38291 #else
38292- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
38293- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
38294+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
38295+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
38296 #endif
38297 }
38298 netif_receive_skb(skb);
38299@@ -424,9 +424,9 @@ void cvm_oct_tasklet_rx(unsigned long un
38300 dev->name);
38301 */
38302 #ifdef CONFIG_64BIT
38303- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
38304+ atomic64_add_unchecked(1, (atomic64_t *)&priv->stats.rx_dropped);
38305 #else
38306- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
38307+ atomic_add_unchecked(1, (atomic_t *)&priv->stats.rx_dropped);
38308 #endif
38309 dev_kfree_skb_irq(skb);
38310 }
38311diff -urNp linux-2.6.32.43/drivers/staging/panel/panel.c linux-2.6.32.43/drivers/staging/panel/panel.c
38312--- linux-2.6.32.43/drivers/staging/panel/panel.c 2011-03-27 14:31:47.000000000 -0400
38313+++ linux-2.6.32.43/drivers/staging/panel/panel.c 2011-04-17 15:56:46.000000000 -0400
38314@@ -1305,7 +1305,7 @@ static int lcd_release(struct inode *ino
38315 return 0;
38316 }
38317
38318-static struct file_operations lcd_fops = {
38319+static const struct file_operations lcd_fops = {
38320 .write = lcd_write,
38321 .open = lcd_open,
38322 .release = lcd_release,
38323@@ -1565,7 +1565,7 @@ static int keypad_release(struct inode *
38324 return 0;
38325 }
38326
38327-static struct file_operations keypad_fops = {
38328+static const struct file_operations keypad_fops = {
38329 .read = keypad_read, /* read */
38330 .open = keypad_open, /* open */
38331 .release = keypad_release, /* close */
38332diff -urNp linux-2.6.32.43/drivers/staging/phison/phison.c linux-2.6.32.43/drivers/staging/phison/phison.c
38333--- linux-2.6.32.43/drivers/staging/phison/phison.c 2011-03-27 14:31:47.000000000 -0400
38334+++ linux-2.6.32.43/drivers/staging/phison/phison.c 2011-04-17 15:56:46.000000000 -0400
38335@@ -43,7 +43,7 @@ static struct scsi_host_template phison_
38336 ATA_BMDMA_SHT(DRV_NAME),
38337 };
38338
38339-static struct ata_port_operations phison_ops = {
38340+static const struct ata_port_operations phison_ops = {
38341 .inherits = &ata_bmdma_port_ops,
38342 .prereset = phison_pre_reset,
38343 };
38344diff -urNp linux-2.6.32.43/drivers/staging/poch/poch.c linux-2.6.32.43/drivers/staging/poch/poch.c
38345--- linux-2.6.32.43/drivers/staging/poch/poch.c 2011-03-27 14:31:47.000000000 -0400
38346+++ linux-2.6.32.43/drivers/staging/poch/poch.c 2011-04-17 15:56:46.000000000 -0400
38347@@ -1057,7 +1057,7 @@ static int poch_ioctl(struct inode *inod
38348 return 0;
38349 }
38350
38351-static struct file_operations poch_fops = {
38352+static const struct file_operations poch_fops = {
38353 .owner = THIS_MODULE,
38354 .open = poch_open,
38355 .release = poch_release,
38356diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/inode.c linux-2.6.32.43/drivers/staging/pohmelfs/inode.c
38357--- linux-2.6.32.43/drivers/staging/pohmelfs/inode.c 2011-03-27 14:31:47.000000000 -0400
38358+++ linux-2.6.32.43/drivers/staging/pohmelfs/inode.c 2011-05-04 17:56:20.000000000 -0400
38359@@ -1850,7 +1850,7 @@ static int pohmelfs_fill_super(struct su
38360 mutex_init(&psb->mcache_lock);
38361 psb->mcache_root = RB_ROOT;
38362 psb->mcache_timeout = msecs_to_jiffies(5000);
38363- atomic_long_set(&psb->mcache_gen, 0);
38364+ atomic_long_set_unchecked(&psb->mcache_gen, 0);
38365
38366 psb->trans_max_pages = 100;
38367
38368@@ -1865,7 +1865,7 @@ static int pohmelfs_fill_super(struct su
38369 INIT_LIST_HEAD(&psb->crypto_ready_list);
38370 INIT_LIST_HEAD(&psb->crypto_active_list);
38371
38372- atomic_set(&psb->trans_gen, 1);
38373+ atomic_set_unchecked(&psb->trans_gen, 1);
38374 atomic_long_set(&psb->total_inodes, 0);
38375
38376 mutex_init(&psb->state_lock);
38377diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c
38378--- linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c 2011-03-27 14:31:47.000000000 -0400
38379+++ linux-2.6.32.43/drivers/staging/pohmelfs/mcache.c 2011-04-17 15:56:46.000000000 -0400
38380@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_
38381 m->data = data;
38382 m->start = start;
38383 m->size = size;
38384- m->gen = atomic_long_inc_return(&psb->mcache_gen);
38385+ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen);
38386
38387 mutex_lock(&psb->mcache_lock);
38388 err = pohmelfs_mcache_insert(psb, m);
38389diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h
38390--- linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h 2011-03-27 14:31:47.000000000 -0400
38391+++ linux-2.6.32.43/drivers/staging/pohmelfs/netfs.h 2011-05-04 17:56:20.000000000 -0400
38392@@ -570,14 +570,14 @@ struct pohmelfs_config;
38393 struct pohmelfs_sb {
38394 struct rb_root mcache_root;
38395 struct mutex mcache_lock;
38396- atomic_long_t mcache_gen;
38397+ atomic_long_unchecked_t mcache_gen;
38398 unsigned long mcache_timeout;
38399
38400 unsigned int idx;
38401
38402 unsigned int trans_retries;
38403
38404- atomic_t trans_gen;
38405+ atomic_unchecked_t trans_gen;
38406
38407 unsigned int crypto_attached_size;
38408 unsigned int crypto_align_size;
38409diff -urNp linux-2.6.32.43/drivers/staging/pohmelfs/trans.c linux-2.6.32.43/drivers/staging/pohmelfs/trans.c
38410--- linux-2.6.32.43/drivers/staging/pohmelfs/trans.c 2011-03-27 14:31:47.000000000 -0400
38411+++ linux-2.6.32.43/drivers/staging/pohmelfs/trans.c 2011-05-04 17:56:28.000000000 -0400
38412@@ -492,7 +492,7 @@ int netfs_trans_finish(struct netfs_tran
38413 int err;
38414 struct netfs_cmd *cmd = t->iovec.iov_base;
38415
38416- t->gen = atomic_inc_return(&psb->trans_gen);
38417+ t->gen = atomic_inc_return_unchecked(&psb->trans_gen);
38418
38419 cmd->size = t->iovec.iov_len - sizeof(struct netfs_cmd) +
38420 t->attached_size + t->attached_pages * sizeof(struct netfs_cmd);
38421diff -urNp linux-2.6.32.43/drivers/staging/sep/sep_driver.c linux-2.6.32.43/drivers/staging/sep/sep_driver.c
38422--- linux-2.6.32.43/drivers/staging/sep/sep_driver.c 2011-03-27 14:31:47.000000000 -0400
38423+++ linux-2.6.32.43/drivers/staging/sep/sep_driver.c 2011-04-17 15:56:46.000000000 -0400
38424@@ -2603,7 +2603,7 @@ static struct pci_driver sep_pci_driver
38425 static dev_t sep_devno;
38426
38427 /* the files operations structure of the driver */
38428-static struct file_operations sep_file_operations = {
38429+static const struct file_operations sep_file_operations = {
38430 .owner = THIS_MODULE,
38431 .ioctl = sep_ioctl,
38432 .poll = sep_poll,
38433diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci.h linux-2.6.32.43/drivers/staging/usbip/vhci.h
38434--- linux-2.6.32.43/drivers/staging/usbip/vhci.h 2011-03-27 14:31:47.000000000 -0400
38435+++ linux-2.6.32.43/drivers/staging/usbip/vhci.h 2011-05-04 17:56:28.000000000 -0400
38436@@ -92,7 +92,7 @@ struct vhci_hcd {
38437 unsigned resuming:1;
38438 unsigned long re_timeout;
38439
38440- atomic_t seqnum;
38441+ atomic_unchecked_t seqnum;
38442
38443 /*
38444 * NOTE:
38445diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c
38446--- linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:01.000000000 -0400
38447+++ linux-2.6.32.43/drivers/staging/usbip/vhci_hcd.c 2011-05-10 22:12:33.000000000 -0400
38448@@ -534,7 +534,7 @@ static void vhci_tx_urb(struct urb *urb)
38449 return;
38450 }
38451
38452- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
38453+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38454 if (priv->seqnum == 0xffff)
38455 usbip_uinfo("seqnum max\n");
38456
38457@@ -793,7 +793,7 @@ static int vhci_urb_dequeue(struct usb_h
38458 return -ENOMEM;
38459 }
38460
38461- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
38462+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
38463 if (unlink->seqnum == 0xffff)
38464 usbip_uinfo("seqnum max\n");
38465
38466@@ -988,7 +988,7 @@ static int vhci_start(struct usb_hcd *hc
38467 vdev->rhport = rhport;
38468 }
38469
38470- atomic_set(&vhci->seqnum, 0);
38471+ atomic_set_unchecked(&vhci->seqnum, 0);
38472 spin_lock_init(&vhci->lock);
38473
38474
38475diff -urNp linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c
38476--- linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c 2011-04-17 17:00:52.000000000 -0400
38477+++ linux-2.6.32.43/drivers/staging/usbip/vhci_rx.c 2011-05-04 17:56:28.000000000 -0400
38478@@ -78,7 +78,7 @@ static void vhci_recv_ret_submit(struct
38479 usbip_uerr("cannot find a urb of seqnum %u\n",
38480 pdu->base.seqnum);
38481 usbip_uinfo("max seqnum %d\n",
38482- atomic_read(&the_controller->seqnum));
38483+ atomic_read_unchecked(&the_controller->seqnum));
38484 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
38485 return;
38486 }
38487diff -urNp linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c
38488--- linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c 2011-03-27 14:31:47.000000000 -0400
38489+++ linux-2.6.32.43/drivers/staging/vme/devices/vme_user.c 2011-04-17 15:56:46.000000000 -0400
38490@@ -136,7 +136,7 @@ static int vme_user_ioctl(struct inode *
38491 static int __init vme_user_probe(struct device *, int, int);
38492 static int __exit vme_user_remove(struct device *, int, int);
38493
38494-static struct file_operations vme_user_fops = {
38495+static const struct file_operations vme_user_fops = {
38496 .open = vme_user_open,
38497 .release = vme_user_release,
38498 .read = vme_user_read,
38499diff -urNp linux-2.6.32.43/drivers/telephony/ixj.c linux-2.6.32.43/drivers/telephony/ixj.c
38500--- linux-2.6.32.43/drivers/telephony/ixj.c 2011-03-27 14:31:47.000000000 -0400
38501+++ linux-2.6.32.43/drivers/telephony/ixj.c 2011-05-16 21:46:57.000000000 -0400
38502@@ -4976,6 +4976,8 @@ static int ixj_daa_cid_read(IXJ *j)
38503 bool mContinue;
38504 char *pIn, *pOut;
38505
38506+ pax_track_stack();
38507+
38508 if (!SCI_Prepare(j))
38509 return 0;
38510
38511diff -urNp linux-2.6.32.43/drivers/uio/uio.c linux-2.6.32.43/drivers/uio/uio.c
38512--- linux-2.6.32.43/drivers/uio/uio.c 2011-03-27 14:31:47.000000000 -0400
38513+++ linux-2.6.32.43/drivers/uio/uio.c 2011-05-04 17:56:20.000000000 -0400
38514@@ -23,6 +23,7 @@
38515 #include <linux/string.h>
38516 #include <linux/kobject.h>
38517 #include <linux/uio_driver.h>
38518+#include <asm/local.h>
38519
38520 #define UIO_MAX_DEVICES 255
38521
38522@@ -30,10 +31,10 @@ struct uio_device {
38523 struct module *owner;
38524 struct device *dev;
38525 int minor;
38526- atomic_t event;
38527+ atomic_unchecked_t event;
38528 struct fasync_struct *async_queue;
38529 wait_queue_head_t wait;
38530- int vma_count;
38531+ local_t vma_count;
38532 struct uio_info *info;
38533 struct kobject *map_dir;
38534 struct kobject *portio_dir;
38535@@ -129,7 +130,7 @@ static ssize_t map_type_show(struct kobj
38536 return entry->show(mem, buf);
38537 }
38538
38539-static struct sysfs_ops map_sysfs_ops = {
38540+static const struct sysfs_ops map_sysfs_ops = {
38541 .show = map_type_show,
38542 };
38543
38544@@ -217,7 +218,7 @@ static ssize_t portio_type_show(struct k
38545 return entry->show(port, buf);
38546 }
38547
38548-static struct sysfs_ops portio_sysfs_ops = {
38549+static const struct sysfs_ops portio_sysfs_ops = {
38550 .show = portio_type_show,
38551 };
38552
38553@@ -255,7 +256,7 @@ static ssize_t show_event(struct device
38554 struct uio_device *idev = dev_get_drvdata(dev);
38555 if (idev)
38556 return sprintf(buf, "%u\n",
38557- (unsigned int)atomic_read(&idev->event));
38558+ (unsigned int)atomic_read_unchecked(&idev->event));
38559 else
38560 return -ENODEV;
38561 }
38562@@ -424,7 +425,7 @@ void uio_event_notify(struct uio_info *i
38563 {
38564 struct uio_device *idev = info->uio_dev;
38565
38566- atomic_inc(&idev->event);
38567+ atomic_inc_unchecked(&idev->event);
38568 wake_up_interruptible(&idev->wait);
38569 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
38570 }
38571@@ -477,7 +478,7 @@ static int uio_open(struct inode *inode,
38572 }
38573
38574 listener->dev = idev;
38575- listener->event_count = atomic_read(&idev->event);
38576+ listener->event_count = atomic_read_unchecked(&idev->event);
38577 filep->private_data = listener;
38578
38579 if (idev->info->open) {
38580@@ -528,7 +529,7 @@ static unsigned int uio_poll(struct file
38581 return -EIO;
38582
38583 poll_wait(filep, &idev->wait, wait);
38584- if (listener->event_count != atomic_read(&idev->event))
38585+ if (listener->event_count != atomic_read_unchecked(&idev->event))
38586 return POLLIN | POLLRDNORM;
38587 return 0;
38588 }
38589@@ -553,7 +554,7 @@ static ssize_t uio_read(struct file *fil
38590 do {
38591 set_current_state(TASK_INTERRUPTIBLE);
38592
38593- event_count = atomic_read(&idev->event);
38594+ event_count = atomic_read_unchecked(&idev->event);
38595 if (event_count != listener->event_count) {
38596 if (copy_to_user(buf, &event_count, count))
38597 retval = -EFAULT;
38598@@ -624,13 +625,13 @@ static int uio_find_mem_index(struct vm_
38599 static void uio_vma_open(struct vm_area_struct *vma)
38600 {
38601 struct uio_device *idev = vma->vm_private_data;
38602- idev->vma_count++;
38603+ local_inc(&idev->vma_count);
38604 }
38605
38606 static void uio_vma_close(struct vm_area_struct *vma)
38607 {
38608 struct uio_device *idev = vma->vm_private_data;
38609- idev->vma_count--;
38610+ local_dec(&idev->vma_count);
38611 }
38612
38613 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
38614@@ -840,7 +841,7 @@ int __uio_register_device(struct module
38615 idev->owner = owner;
38616 idev->info = info;
38617 init_waitqueue_head(&idev->wait);
38618- atomic_set(&idev->event, 0);
38619+ atomic_set_unchecked(&idev->event, 0);
38620
38621 ret = uio_get_minor(idev);
38622 if (ret)
38623diff -urNp linux-2.6.32.43/drivers/usb/atm/usbatm.c linux-2.6.32.43/drivers/usb/atm/usbatm.c
38624--- linux-2.6.32.43/drivers/usb/atm/usbatm.c 2011-03-27 14:31:47.000000000 -0400
38625+++ linux-2.6.32.43/drivers/usb/atm/usbatm.c 2011-04-17 15:56:46.000000000 -0400
38626@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru
38627 if (printk_ratelimit())
38628 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
38629 __func__, vpi, vci);
38630- atomic_inc(&vcc->stats->rx_err);
38631+ atomic_inc_unchecked(&vcc->stats->rx_err);
38632 return;
38633 }
38634
38635@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru
38636 if (length > ATM_MAX_AAL5_PDU) {
38637 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
38638 __func__, length, vcc);
38639- atomic_inc(&vcc->stats->rx_err);
38640+ atomic_inc_unchecked(&vcc->stats->rx_err);
38641 goto out;
38642 }
38643
38644@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru
38645 if (sarb->len < pdu_length) {
38646 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
38647 __func__, pdu_length, sarb->len, vcc);
38648- atomic_inc(&vcc->stats->rx_err);
38649+ atomic_inc_unchecked(&vcc->stats->rx_err);
38650 goto out;
38651 }
38652
38653 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
38654 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
38655 __func__, vcc);
38656- atomic_inc(&vcc->stats->rx_err);
38657+ atomic_inc_unchecked(&vcc->stats->rx_err);
38658 goto out;
38659 }
38660
38661@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru
38662 if (printk_ratelimit())
38663 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
38664 __func__, length);
38665- atomic_inc(&vcc->stats->rx_drop);
38666+ atomic_inc_unchecked(&vcc->stats->rx_drop);
38667 goto out;
38668 }
38669
38670@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru
38671
38672 vcc->push(vcc, skb);
38673
38674- atomic_inc(&vcc->stats->rx);
38675+ atomic_inc_unchecked(&vcc->stats->rx);
38676 out:
38677 skb_trim(sarb, 0);
38678 }
38679@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l
38680 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
38681
38682 usbatm_pop(vcc, skb);
38683- atomic_inc(&vcc->stats->tx);
38684+ atomic_inc_unchecked(&vcc->stats->tx);
38685
38686 skb = skb_dequeue(&instance->sndqueue);
38687 }
38688@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a
38689 if (!left--)
38690 return sprintf(page,
38691 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
38692- atomic_read(&atm_dev->stats.aal5.tx),
38693- atomic_read(&atm_dev->stats.aal5.tx_err),
38694- atomic_read(&atm_dev->stats.aal5.rx),
38695- atomic_read(&atm_dev->stats.aal5.rx_err),
38696- atomic_read(&atm_dev->stats.aal5.rx_drop));
38697+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
38698+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
38699+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
38700+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
38701+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
38702
38703 if (!left--) {
38704 if (instance->disconnected)
38705diff -urNp linux-2.6.32.43/drivers/usb/class/cdc-wdm.c linux-2.6.32.43/drivers/usb/class/cdc-wdm.c
38706--- linux-2.6.32.43/drivers/usb/class/cdc-wdm.c 2011-03-27 14:31:47.000000000 -0400
38707+++ linux-2.6.32.43/drivers/usb/class/cdc-wdm.c 2011-04-17 15:56:46.000000000 -0400
38708@@ -314,7 +314,7 @@ static ssize_t wdm_write
38709 if (r < 0)
38710 goto outnp;
38711
38712- if (!file->f_flags && O_NONBLOCK)
38713+ if (!(file->f_flags & O_NONBLOCK))
38714 r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE,
38715 &desc->flags));
38716 else
38717diff -urNp linux-2.6.32.43/drivers/usb/core/hcd.c linux-2.6.32.43/drivers/usb/core/hcd.c
38718--- linux-2.6.32.43/drivers/usb/core/hcd.c 2011-03-27 14:31:47.000000000 -0400
38719+++ linux-2.6.32.43/drivers/usb/core/hcd.c 2011-04-17 15:56:46.000000000 -0400
38720@@ -2216,7 +2216,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd
38721
38722 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38723
38724-struct usb_mon_operations *mon_ops;
38725+const struct usb_mon_operations *mon_ops;
38726
38727 /*
38728 * The registration is unlocked.
38729@@ -2226,7 +2226,7 @@ struct usb_mon_operations *mon_ops;
38730 * symbols from usbcore, usbcore gets referenced and cannot be unloaded first.
38731 */
38732
38733-int usb_mon_register (struct usb_mon_operations *ops)
38734+int usb_mon_register (const struct usb_mon_operations *ops)
38735 {
38736
38737 if (mon_ops)
38738diff -urNp linux-2.6.32.43/drivers/usb/core/hcd.h linux-2.6.32.43/drivers/usb/core/hcd.h
38739--- linux-2.6.32.43/drivers/usb/core/hcd.h 2011-03-27 14:31:47.000000000 -0400
38740+++ linux-2.6.32.43/drivers/usb/core/hcd.h 2011-04-17 15:56:46.000000000 -0400
38741@@ -486,13 +486,13 @@ static inline void usbfs_cleanup(void) {
38742 #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE)
38743
38744 struct usb_mon_operations {
38745- void (*urb_submit)(struct usb_bus *bus, struct urb *urb);
38746- void (*urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38747- void (*urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38748+ void (* const urb_submit)(struct usb_bus *bus, struct urb *urb);
38749+ void (* const urb_submit_error)(struct usb_bus *bus, struct urb *urb, int err);
38750+ void (* const urb_complete)(struct usb_bus *bus, struct urb *urb, int status);
38751 /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */
38752 };
38753
38754-extern struct usb_mon_operations *mon_ops;
38755+extern const struct usb_mon_operations *mon_ops;
38756
38757 static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb)
38758 {
38759@@ -514,7 +514,7 @@ static inline void usbmon_urb_complete(s
38760 (*mon_ops->urb_complete)(bus, urb, status);
38761 }
38762
38763-int usb_mon_register(struct usb_mon_operations *ops);
38764+int usb_mon_register(const struct usb_mon_operations *ops);
38765 void usb_mon_deregister(void);
38766
38767 #else
38768diff -urNp linux-2.6.32.43/drivers/usb/core/message.c linux-2.6.32.43/drivers/usb/core/message.c
38769--- linux-2.6.32.43/drivers/usb/core/message.c 2011-03-27 14:31:47.000000000 -0400
38770+++ linux-2.6.32.43/drivers/usb/core/message.c 2011-04-17 15:56:46.000000000 -0400
38771@@ -914,8 +914,8 @@ char *usb_cache_string(struct usb_device
38772 buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO);
38773 if (buf) {
38774 len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE);
38775- if (len > 0) {
38776- smallbuf = kmalloc(++len, GFP_NOIO);
38777+ if (len++ > 0) {
38778+ smallbuf = kmalloc(len, GFP_NOIO);
38779 if (!smallbuf)
38780 return buf;
38781 memcpy(smallbuf, buf, len);
38782diff -urNp linux-2.6.32.43/drivers/usb/misc/appledisplay.c linux-2.6.32.43/drivers/usb/misc/appledisplay.c
38783--- linux-2.6.32.43/drivers/usb/misc/appledisplay.c 2011-03-27 14:31:47.000000000 -0400
38784+++ linux-2.6.32.43/drivers/usb/misc/appledisplay.c 2011-04-17 15:56:46.000000000 -0400
38785@@ -178,7 +178,7 @@ static int appledisplay_bl_get_brightnes
38786 return pdata->msgdata[1];
38787 }
38788
38789-static struct backlight_ops appledisplay_bl_data = {
38790+static const struct backlight_ops appledisplay_bl_data = {
38791 .get_brightness = appledisplay_bl_get_brightness,
38792 .update_status = appledisplay_bl_update_status,
38793 };
38794diff -urNp linux-2.6.32.43/drivers/usb/mon/mon_main.c linux-2.6.32.43/drivers/usb/mon/mon_main.c
38795--- linux-2.6.32.43/drivers/usb/mon/mon_main.c 2011-03-27 14:31:47.000000000 -0400
38796+++ linux-2.6.32.43/drivers/usb/mon/mon_main.c 2011-04-17 15:56:46.000000000 -0400
38797@@ -238,7 +238,7 @@ static struct notifier_block mon_nb = {
38798 /*
38799 * Ops
38800 */
38801-static struct usb_mon_operations mon_ops_0 = {
38802+static const struct usb_mon_operations mon_ops_0 = {
38803 .urb_submit = mon_submit,
38804 .urb_submit_error = mon_submit_error,
38805 .urb_complete = mon_complete,
38806diff -urNp linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h
38807--- linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h 2011-03-27 14:31:47.000000000 -0400
38808+++ linux-2.6.32.43/drivers/usb/wusbcore/wa-hc.h 2011-05-04 17:56:28.000000000 -0400
38809@@ -192,7 +192,7 @@ struct wahc {
38810 struct list_head xfer_delayed_list;
38811 spinlock_t xfer_list_lock;
38812 struct work_struct xfer_work;
38813- atomic_t xfer_id_count;
38814+ atomic_unchecked_t xfer_id_count;
38815 };
38816
38817
38818@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *
38819 INIT_LIST_HEAD(&wa->xfer_delayed_list);
38820 spin_lock_init(&wa->xfer_list_lock);
38821 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
38822- atomic_set(&wa->xfer_id_count, 1);
38823+ atomic_set_unchecked(&wa->xfer_id_count, 1);
38824 }
38825
38826 /**
38827diff -urNp linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c
38828--- linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c 2011-03-27 14:31:47.000000000 -0400
38829+++ linux-2.6.32.43/drivers/usb/wusbcore/wa-xfer.c 2011-05-04 17:56:28.000000000 -0400
38830@@ -293,7 +293,7 @@ out:
38831 */
38832 static void wa_xfer_id_init(struct wa_xfer *xfer)
38833 {
38834- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
38835+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
38836 }
38837
38838 /*
38839diff -urNp linux-2.6.32.43/drivers/uwb/wlp/messages.c linux-2.6.32.43/drivers/uwb/wlp/messages.c
38840--- linux-2.6.32.43/drivers/uwb/wlp/messages.c 2011-03-27 14:31:47.000000000 -0400
38841+++ linux-2.6.32.43/drivers/uwb/wlp/messages.c 2011-04-17 15:56:46.000000000 -0400
38842@@ -903,7 +903,7 @@ int wlp_parse_f0(struct wlp *wlp, struct
38843 size_t len = skb->len;
38844 size_t used;
38845 ssize_t result;
38846- struct wlp_nonce enonce, rnonce;
38847+ struct wlp_nonce enonce = {{0}}, rnonce = {{0}};
38848 enum wlp_assc_error assc_err;
38849 char enonce_buf[WLP_WSS_NONCE_STRSIZE];
38850 char rnonce_buf[WLP_WSS_NONCE_STRSIZE];
38851diff -urNp linux-2.6.32.43/drivers/uwb/wlp/sysfs.c linux-2.6.32.43/drivers/uwb/wlp/sysfs.c
38852--- linux-2.6.32.43/drivers/uwb/wlp/sysfs.c 2011-03-27 14:31:47.000000000 -0400
38853+++ linux-2.6.32.43/drivers/uwb/wlp/sysfs.c 2011-04-17 15:56:46.000000000 -0400
38854@@ -615,8 +615,7 @@ ssize_t wlp_wss_attr_store(struct kobjec
38855 return ret;
38856 }
38857
38858-static
38859-struct sysfs_ops wss_sysfs_ops = {
38860+static const struct sysfs_ops wss_sysfs_ops = {
38861 .show = wlp_wss_attr_show,
38862 .store = wlp_wss_attr_store,
38863 };
38864diff -urNp linux-2.6.32.43/drivers/video/atmel_lcdfb.c linux-2.6.32.43/drivers/video/atmel_lcdfb.c
38865--- linux-2.6.32.43/drivers/video/atmel_lcdfb.c 2011-03-27 14:31:47.000000000 -0400
38866+++ linux-2.6.32.43/drivers/video/atmel_lcdfb.c 2011-04-17 15:56:46.000000000 -0400
38867@@ -110,7 +110,7 @@ static int atmel_bl_get_brightness(struc
38868 return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL);
38869 }
38870
38871-static struct backlight_ops atmel_lcdc_bl_ops = {
38872+static const struct backlight_ops atmel_lcdc_bl_ops = {
38873 .update_status = atmel_bl_update_status,
38874 .get_brightness = atmel_bl_get_brightness,
38875 };
38876diff -urNp linux-2.6.32.43/drivers/video/aty/aty128fb.c linux-2.6.32.43/drivers/video/aty/aty128fb.c
38877--- linux-2.6.32.43/drivers/video/aty/aty128fb.c 2011-03-27 14:31:47.000000000 -0400
38878+++ linux-2.6.32.43/drivers/video/aty/aty128fb.c 2011-04-17 15:56:46.000000000 -0400
38879@@ -1787,7 +1787,7 @@ static int aty128_bl_get_brightness(stru
38880 return bd->props.brightness;
38881 }
38882
38883-static struct backlight_ops aty128_bl_data = {
38884+static const struct backlight_ops aty128_bl_data = {
38885 .get_brightness = aty128_bl_get_brightness,
38886 .update_status = aty128_bl_update_status,
38887 };
38888diff -urNp linux-2.6.32.43/drivers/video/aty/atyfb_base.c linux-2.6.32.43/drivers/video/aty/atyfb_base.c
38889--- linux-2.6.32.43/drivers/video/aty/atyfb_base.c 2011-03-27 14:31:47.000000000 -0400
38890+++ linux-2.6.32.43/drivers/video/aty/atyfb_base.c 2011-04-17 15:56:46.000000000 -0400
38891@@ -2225,7 +2225,7 @@ static int aty_bl_get_brightness(struct
38892 return bd->props.brightness;
38893 }
38894
38895-static struct backlight_ops aty_bl_data = {
38896+static const struct backlight_ops aty_bl_data = {
38897 .get_brightness = aty_bl_get_brightness,
38898 .update_status = aty_bl_update_status,
38899 };
38900diff -urNp linux-2.6.32.43/drivers/video/aty/radeon_backlight.c linux-2.6.32.43/drivers/video/aty/radeon_backlight.c
38901--- linux-2.6.32.43/drivers/video/aty/radeon_backlight.c 2011-03-27 14:31:47.000000000 -0400
38902+++ linux-2.6.32.43/drivers/video/aty/radeon_backlight.c 2011-04-17 15:56:46.000000000 -0400
38903@@ -127,7 +127,7 @@ static int radeon_bl_get_brightness(stru
38904 return bd->props.brightness;
38905 }
38906
38907-static struct backlight_ops radeon_bl_data = {
38908+static const struct backlight_ops radeon_bl_data = {
38909 .get_brightness = radeon_bl_get_brightness,
38910 .update_status = radeon_bl_update_status,
38911 };
38912diff -urNp linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c
38913--- linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c 2011-03-27 14:31:47.000000000 -0400
38914+++ linux-2.6.32.43/drivers/video/backlight/adp5520_bl.c 2011-04-17 15:56:46.000000000 -0400
38915@@ -84,7 +84,7 @@ static int adp5520_bl_get_brightness(str
38916 return error ? data->current_brightness : reg_val;
38917 }
38918
38919-static struct backlight_ops adp5520_bl_ops = {
38920+static const struct backlight_ops adp5520_bl_ops = {
38921 .update_status = adp5520_bl_update_status,
38922 .get_brightness = adp5520_bl_get_brightness,
38923 };
38924diff -urNp linux-2.6.32.43/drivers/video/backlight/adx_bl.c linux-2.6.32.43/drivers/video/backlight/adx_bl.c
38925--- linux-2.6.32.43/drivers/video/backlight/adx_bl.c 2011-03-27 14:31:47.000000000 -0400
38926+++ linux-2.6.32.43/drivers/video/backlight/adx_bl.c 2011-04-17 15:56:46.000000000 -0400
38927@@ -61,7 +61,7 @@ static int adx_backlight_check_fb(struct
38928 return 1;
38929 }
38930
38931-static struct backlight_ops adx_backlight_ops = {
38932+static const struct backlight_ops adx_backlight_ops = {
38933 .options = 0,
38934 .update_status = adx_backlight_update_status,
38935 .get_brightness = adx_backlight_get_brightness,
38936diff -urNp linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c
38937--- linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c 2011-03-27 14:31:47.000000000 -0400
38938+++ linux-2.6.32.43/drivers/video/backlight/atmel-pwm-bl.c 2011-04-17 15:56:46.000000000 -0400
38939@@ -113,7 +113,7 @@ static int atmel_pwm_bl_init_pwm(struct
38940 return pwm_channel_enable(&pwmbl->pwmc);
38941 }
38942
38943-static struct backlight_ops atmel_pwm_bl_ops = {
38944+static const struct backlight_ops atmel_pwm_bl_ops = {
38945 .get_brightness = atmel_pwm_bl_get_intensity,
38946 .update_status = atmel_pwm_bl_set_intensity,
38947 };
38948diff -urNp linux-2.6.32.43/drivers/video/backlight/backlight.c linux-2.6.32.43/drivers/video/backlight/backlight.c
38949--- linux-2.6.32.43/drivers/video/backlight/backlight.c 2011-03-27 14:31:47.000000000 -0400
38950+++ linux-2.6.32.43/drivers/video/backlight/backlight.c 2011-04-17 15:56:46.000000000 -0400
38951@@ -269,7 +269,7 @@ EXPORT_SYMBOL(backlight_force_update);
38952 * ERR_PTR() or a pointer to the newly allocated device.
38953 */
38954 struct backlight_device *backlight_device_register(const char *name,
38955- struct device *parent, void *devdata, struct backlight_ops *ops)
38956+ struct device *parent, void *devdata, const struct backlight_ops *ops)
38957 {
38958 struct backlight_device *new_bd;
38959 int rc;
38960diff -urNp linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c
38961--- linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c 2011-03-27 14:31:47.000000000 -0400
38962+++ linux-2.6.32.43/drivers/video/backlight/corgi_lcd.c 2011-04-17 15:56:46.000000000 -0400
38963@@ -451,7 +451,7 @@ void corgi_lcd_limit_intensity(int limit
38964 }
38965 EXPORT_SYMBOL(corgi_lcd_limit_intensity);
38966
38967-static struct backlight_ops corgi_bl_ops = {
38968+static const struct backlight_ops corgi_bl_ops = {
38969 .get_brightness = corgi_bl_get_intensity,
38970 .update_status = corgi_bl_update_status,
38971 };
38972diff -urNp linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c
38973--- linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c 2011-03-27 14:31:47.000000000 -0400
38974+++ linux-2.6.32.43/drivers/video/backlight/cr_bllcd.c 2011-04-17 15:56:46.000000000 -0400
38975@@ -108,7 +108,7 @@ static int cr_backlight_get_intensity(st
38976 return intensity;
38977 }
38978
38979-static struct backlight_ops cr_backlight_ops = {
38980+static const struct backlight_ops cr_backlight_ops = {
38981 .get_brightness = cr_backlight_get_intensity,
38982 .update_status = cr_backlight_set_intensity,
38983 };
38984diff -urNp linux-2.6.32.43/drivers/video/backlight/da903x_bl.c linux-2.6.32.43/drivers/video/backlight/da903x_bl.c
38985--- linux-2.6.32.43/drivers/video/backlight/da903x_bl.c 2011-03-27 14:31:47.000000000 -0400
38986+++ linux-2.6.32.43/drivers/video/backlight/da903x_bl.c 2011-04-17 15:56:46.000000000 -0400
38987@@ -94,7 +94,7 @@ static int da903x_backlight_get_brightne
38988 return data->current_brightness;
38989 }
38990
38991-static struct backlight_ops da903x_backlight_ops = {
38992+static const struct backlight_ops da903x_backlight_ops = {
38993 .update_status = da903x_backlight_update_status,
38994 .get_brightness = da903x_backlight_get_brightness,
38995 };
38996diff -urNp linux-2.6.32.43/drivers/video/backlight/generic_bl.c linux-2.6.32.43/drivers/video/backlight/generic_bl.c
38997--- linux-2.6.32.43/drivers/video/backlight/generic_bl.c 2011-03-27 14:31:47.000000000 -0400
38998+++ linux-2.6.32.43/drivers/video/backlight/generic_bl.c 2011-04-17 15:56:46.000000000 -0400
38999@@ -70,7 +70,7 @@ void corgibl_limit_intensity(int limit)
39000 }
39001 EXPORT_SYMBOL(corgibl_limit_intensity);
39002
39003-static struct backlight_ops genericbl_ops = {
39004+static const struct backlight_ops genericbl_ops = {
39005 .options = BL_CORE_SUSPENDRESUME,
39006 .get_brightness = genericbl_get_intensity,
39007 .update_status = genericbl_send_intensity,
39008diff -urNp linux-2.6.32.43/drivers/video/backlight/hp680_bl.c linux-2.6.32.43/drivers/video/backlight/hp680_bl.c
39009--- linux-2.6.32.43/drivers/video/backlight/hp680_bl.c 2011-03-27 14:31:47.000000000 -0400
39010+++ linux-2.6.32.43/drivers/video/backlight/hp680_bl.c 2011-04-17 15:56:46.000000000 -0400
39011@@ -98,7 +98,7 @@ static int hp680bl_get_intensity(struct
39012 return current_intensity;
39013 }
39014
39015-static struct backlight_ops hp680bl_ops = {
39016+static const struct backlight_ops hp680bl_ops = {
39017 .get_brightness = hp680bl_get_intensity,
39018 .update_status = hp680bl_set_intensity,
39019 };
39020diff -urNp linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c
39021--- linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c 2011-03-27 14:31:47.000000000 -0400
39022+++ linux-2.6.32.43/drivers/video/backlight/jornada720_bl.c 2011-04-17 15:56:46.000000000 -0400
39023@@ -93,7 +93,7 @@ out:
39024 return ret;
39025 }
39026
39027-static struct backlight_ops jornada_bl_ops = {
39028+static const struct backlight_ops jornada_bl_ops = {
39029 .get_brightness = jornada_bl_get_brightness,
39030 .update_status = jornada_bl_update_status,
39031 .options = BL_CORE_SUSPENDRESUME,
39032diff -urNp linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c
39033--- linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c 2011-03-27 14:31:47.000000000 -0400
39034+++ linux-2.6.32.43/drivers/video/backlight/kb3886_bl.c 2011-04-17 15:56:46.000000000 -0400
39035@@ -134,7 +134,7 @@ static int kb3886bl_get_intensity(struct
39036 return kb3886bl_intensity;
39037 }
39038
39039-static struct backlight_ops kb3886bl_ops = {
39040+static const struct backlight_ops kb3886bl_ops = {
39041 .get_brightness = kb3886bl_get_intensity,
39042 .update_status = kb3886bl_send_intensity,
39043 };
39044diff -urNp linux-2.6.32.43/drivers/video/backlight/locomolcd.c linux-2.6.32.43/drivers/video/backlight/locomolcd.c
39045--- linux-2.6.32.43/drivers/video/backlight/locomolcd.c 2011-03-27 14:31:47.000000000 -0400
39046+++ linux-2.6.32.43/drivers/video/backlight/locomolcd.c 2011-04-17 15:56:46.000000000 -0400
39047@@ -141,7 +141,7 @@ static int locomolcd_get_intensity(struc
39048 return current_intensity;
39049 }
39050
39051-static struct backlight_ops locomobl_data = {
39052+static const struct backlight_ops locomobl_data = {
39053 .get_brightness = locomolcd_get_intensity,
39054 .update_status = locomolcd_set_intensity,
39055 };
39056diff -urNp linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c
39057--- linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:01.000000000 -0400
39058+++ linux-2.6.32.43/drivers/video/backlight/mbp_nvidia_bl.c 2011-05-10 22:12:33.000000000 -0400
39059@@ -33,7 +33,7 @@ struct dmi_match_data {
39060 unsigned long iostart;
39061 unsigned long iolen;
39062 /* Backlight operations structure. */
39063- struct backlight_ops backlight_ops;
39064+ const struct backlight_ops backlight_ops;
39065 };
39066
39067 /* Module parameters. */
39068diff -urNp linux-2.6.32.43/drivers/video/backlight/omap1_bl.c linux-2.6.32.43/drivers/video/backlight/omap1_bl.c
39069--- linux-2.6.32.43/drivers/video/backlight/omap1_bl.c 2011-03-27 14:31:47.000000000 -0400
39070+++ linux-2.6.32.43/drivers/video/backlight/omap1_bl.c 2011-04-17 15:56:46.000000000 -0400
39071@@ -125,7 +125,7 @@ static int omapbl_get_intensity(struct b
39072 return bl->current_intensity;
39073 }
39074
39075-static struct backlight_ops omapbl_ops = {
39076+static const struct backlight_ops omapbl_ops = {
39077 .get_brightness = omapbl_get_intensity,
39078 .update_status = omapbl_update_status,
39079 };
39080diff -urNp linux-2.6.32.43/drivers/video/backlight/progear_bl.c linux-2.6.32.43/drivers/video/backlight/progear_bl.c
39081--- linux-2.6.32.43/drivers/video/backlight/progear_bl.c 2011-03-27 14:31:47.000000000 -0400
39082+++ linux-2.6.32.43/drivers/video/backlight/progear_bl.c 2011-04-17 15:56:46.000000000 -0400
39083@@ -54,7 +54,7 @@ static int progearbl_get_intensity(struc
39084 return intensity - HW_LEVEL_MIN;
39085 }
39086
39087-static struct backlight_ops progearbl_ops = {
39088+static const struct backlight_ops progearbl_ops = {
39089 .get_brightness = progearbl_get_intensity,
39090 .update_status = progearbl_set_intensity,
39091 };
39092diff -urNp linux-2.6.32.43/drivers/video/backlight/pwm_bl.c linux-2.6.32.43/drivers/video/backlight/pwm_bl.c
39093--- linux-2.6.32.43/drivers/video/backlight/pwm_bl.c 2011-03-27 14:31:47.000000000 -0400
39094+++ linux-2.6.32.43/drivers/video/backlight/pwm_bl.c 2011-04-17 15:56:46.000000000 -0400
39095@@ -56,7 +56,7 @@ static int pwm_backlight_get_brightness(
39096 return bl->props.brightness;
39097 }
39098
39099-static struct backlight_ops pwm_backlight_ops = {
39100+static const struct backlight_ops pwm_backlight_ops = {
39101 .update_status = pwm_backlight_update_status,
39102 .get_brightness = pwm_backlight_get_brightness,
39103 };
39104diff -urNp linux-2.6.32.43/drivers/video/backlight/tosa_bl.c linux-2.6.32.43/drivers/video/backlight/tosa_bl.c
39105--- linux-2.6.32.43/drivers/video/backlight/tosa_bl.c 2011-03-27 14:31:47.000000000 -0400
39106+++ linux-2.6.32.43/drivers/video/backlight/tosa_bl.c 2011-04-17 15:56:46.000000000 -0400
39107@@ -72,7 +72,7 @@ static int tosa_bl_get_brightness(struct
39108 return props->brightness;
39109 }
39110
39111-static struct backlight_ops bl_ops = {
39112+static const struct backlight_ops bl_ops = {
39113 .get_brightness = tosa_bl_get_brightness,
39114 .update_status = tosa_bl_update_status,
39115 };
39116diff -urNp linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c
39117--- linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c 2011-03-27 14:31:47.000000000 -0400
39118+++ linux-2.6.32.43/drivers/video/backlight/wm831x_bl.c 2011-04-17 15:56:46.000000000 -0400
39119@@ -112,7 +112,7 @@ static int wm831x_backlight_get_brightne
39120 return data->current_brightness;
39121 }
39122
39123-static struct backlight_ops wm831x_backlight_ops = {
39124+static const struct backlight_ops wm831x_backlight_ops = {
39125 .options = BL_CORE_SUSPENDRESUME,
39126 .update_status = wm831x_backlight_update_status,
39127 .get_brightness = wm831x_backlight_get_brightness,
39128diff -urNp linux-2.6.32.43/drivers/video/bf54x-lq043fb.c linux-2.6.32.43/drivers/video/bf54x-lq043fb.c
39129--- linux-2.6.32.43/drivers/video/bf54x-lq043fb.c 2011-03-27 14:31:47.000000000 -0400
39130+++ linux-2.6.32.43/drivers/video/bf54x-lq043fb.c 2011-04-17 15:56:46.000000000 -0400
39131@@ -463,7 +463,7 @@ static int bl_get_brightness(struct back
39132 return 0;
39133 }
39134
39135-static struct backlight_ops bfin_lq043fb_bl_ops = {
39136+static const struct backlight_ops bfin_lq043fb_bl_ops = {
39137 .get_brightness = bl_get_brightness,
39138 };
39139
39140diff -urNp linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c
39141--- linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c 2011-03-27 14:31:47.000000000 -0400
39142+++ linux-2.6.32.43/drivers/video/bfin-t350mcqb-fb.c 2011-04-17 15:56:46.000000000 -0400
39143@@ -381,7 +381,7 @@ static int bl_get_brightness(struct back
39144 return 0;
39145 }
39146
39147-static struct backlight_ops bfin_lq043fb_bl_ops = {
39148+static const struct backlight_ops bfin_lq043fb_bl_ops = {
39149 .get_brightness = bl_get_brightness,
39150 };
39151
39152diff -urNp linux-2.6.32.43/drivers/video/fbcmap.c linux-2.6.32.43/drivers/video/fbcmap.c
39153--- linux-2.6.32.43/drivers/video/fbcmap.c 2011-03-27 14:31:47.000000000 -0400
39154+++ linux-2.6.32.43/drivers/video/fbcmap.c 2011-04-17 15:56:46.000000000 -0400
39155@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user
39156 rc = -ENODEV;
39157 goto out;
39158 }
39159- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
39160- !info->fbops->fb_setcmap)) {
39161+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
39162 rc = -EINVAL;
39163 goto out1;
39164 }
39165diff -urNp linux-2.6.32.43/drivers/video/fbmem.c linux-2.6.32.43/drivers/video/fbmem.c
39166--- linux-2.6.32.43/drivers/video/fbmem.c 2011-03-27 14:31:47.000000000 -0400
39167+++ linux-2.6.32.43/drivers/video/fbmem.c 2011-05-16 21:46:57.000000000 -0400
39168@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in
39169 image->dx += image->width + 8;
39170 }
39171 } else if (rotate == FB_ROTATE_UD) {
39172- for (x = 0; x < num && image->dx >= 0; x++) {
39173+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
39174 info->fbops->fb_imageblit(info, image);
39175 image->dx -= image->width + 8;
39176 }
39177@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in
39178 image->dy += image->height + 8;
39179 }
39180 } else if (rotate == FB_ROTATE_CCW) {
39181- for (x = 0; x < num && image->dy >= 0; x++) {
39182+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
39183 info->fbops->fb_imageblit(info, image);
39184 image->dy -= image->height + 8;
39185 }
39186@@ -915,6 +915,8 @@ fb_set_var(struct fb_info *info, struct
39187 int flags = info->flags;
39188 int ret = 0;
39189
39190+ pax_track_stack();
39191+
39192 if (var->activate & FB_ACTIVATE_INV_MODE) {
39193 struct fb_videomode mode1, mode2;
39194
39195@@ -1040,6 +1042,8 @@ static long do_fb_ioctl(struct fb_info *
39196 void __user *argp = (void __user *)arg;
39197 long ret = 0;
39198
39199+ pax_track_stack();
39200+
39201 switch (cmd) {
39202 case FBIOGET_VSCREENINFO:
39203 if (!lock_fb_info(info))
39204@@ -1119,7 +1123,7 @@ static long do_fb_ioctl(struct fb_info *
39205 return -EFAULT;
39206 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
39207 return -EINVAL;
39208- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
39209+ if (con2fb.framebuffer >= FB_MAX)
39210 return -EINVAL;
39211 if (!registered_fb[con2fb.framebuffer])
39212 request_module("fb%d", con2fb.framebuffer);
39213diff -urNp linux-2.6.32.43/drivers/video/i810/i810_accel.c linux-2.6.32.43/drivers/video/i810/i810_accel.c
39214--- linux-2.6.32.43/drivers/video/i810/i810_accel.c 2011-03-27 14:31:47.000000000 -0400
39215+++ linux-2.6.32.43/drivers/video/i810/i810_accel.c 2011-04-17 15:56:46.000000000 -0400
39216@@ -73,6 +73,7 @@ static inline int wait_for_space(struct
39217 }
39218 }
39219 printk("ringbuffer lockup!!!\n");
39220+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
39221 i810_report_error(mmio);
39222 par->dev_flags |= LOCKUP;
39223 info->pixmap.scan_align = 1;
39224diff -urNp linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c
39225--- linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c 2011-03-27 14:31:47.000000000 -0400
39226+++ linux-2.6.32.43/drivers/video/nvidia/nv_backlight.c 2011-04-17 15:56:46.000000000 -0400
39227@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru
39228 return bd->props.brightness;
39229 }
39230
39231-static struct backlight_ops nvidia_bl_ops = {
39232+static const struct backlight_ops nvidia_bl_ops = {
39233 .get_brightness = nvidia_bl_get_brightness,
39234 .update_status = nvidia_bl_update_status,
39235 };
39236diff -urNp linux-2.6.32.43/drivers/video/riva/fbdev.c linux-2.6.32.43/drivers/video/riva/fbdev.c
39237--- linux-2.6.32.43/drivers/video/riva/fbdev.c 2011-03-27 14:31:47.000000000 -0400
39238+++ linux-2.6.32.43/drivers/video/riva/fbdev.c 2011-04-17 15:56:46.000000000 -0400
39239@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct
39240 return bd->props.brightness;
39241 }
39242
39243-static struct backlight_ops riva_bl_ops = {
39244+static const struct backlight_ops riva_bl_ops = {
39245 .get_brightness = riva_bl_get_brightness,
39246 .update_status = riva_bl_update_status,
39247 };
39248diff -urNp linux-2.6.32.43/drivers/video/uvesafb.c linux-2.6.32.43/drivers/video/uvesafb.c
39249--- linux-2.6.32.43/drivers/video/uvesafb.c 2011-03-27 14:31:47.000000000 -0400
39250+++ linux-2.6.32.43/drivers/video/uvesafb.c 2011-04-17 15:56:46.000000000 -0400
39251@@ -18,6 +18,7 @@
39252 #include <linux/fb.h>
39253 #include <linux/io.h>
39254 #include <linux/mutex.h>
39255+#include <linux/moduleloader.h>
39256 #include <video/edid.h>
39257 #include <video/uvesafb.h>
39258 #ifdef CONFIG_X86
39259@@ -120,7 +121,7 @@ static int uvesafb_helper_start(void)
39260 NULL,
39261 };
39262
39263- return call_usermodehelper(v86d_path, argv, envp, 1);
39264+ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC);
39265 }
39266
39267 /*
39268@@ -568,10 +569,32 @@ static int __devinit uvesafb_vbe_getpmi(
39269 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
39270 par->pmi_setpal = par->ypan = 0;
39271 } else {
39272+
39273+#ifdef CONFIG_PAX_KERNEXEC
39274+#ifdef CONFIG_MODULES
39275+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
39276+#endif
39277+ if (!par->pmi_code) {
39278+ par->pmi_setpal = par->ypan = 0;
39279+ return 0;
39280+ }
39281+#endif
39282+
39283 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
39284 + task->t.regs.edi);
39285+
39286+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39287+ pax_open_kernel();
39288+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
39289+ pax_close_kernel();
39290+
39291+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
39292+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
39293+#else
39294 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
39295 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
39296+#endif
39297+
39298 printk(KERN_INFO "uvesafb: protected mode interface info at "
39299 "%04x:%04x\n",
39300 (u16)task->t.regs.es, (u16)task->t.regs.edi);
39301@@ -1799,6 +1822,11 @@ out:
39302 if (par->vbe_modes)
39303 kfree(par->vbe_modes);
39304
39305+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39306+ if (par->pmi_code)
39307+ module_free_exec(NULL, par->pmi_code);
39308+#endif
39309+
39310 framebuffer_release(info);
39311 return err;
39312 }
39313@@ -1825,6 +1853,12 @@ static int uvesafb_remove(struct platfor
39314 kfree(par->vbe_state_orig);
39315 if (par->vbe_state_saved)
39316 kfree(par->vbe_state_saved);
39317+
39318+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39319+ if (par->pmi_code)
39320+ module_free_exec(NULL, par->pmi_code);
39321+#endif
39322+
39323 }
39324
39325 framebuffer_release(info);
39326diff -urNp linux-2.6.32.43/drivers/video/vesafb.c linux-2.6.32.43/drivers/video/vesafb.c
39327--- linux-2.6.32.43/drivers/video/vesafb.c 2011-03-27 14:31:47.000000000 -0400
39328+++ linux-2.6.32.43/drivers/video/vesafb.c 2011-08-05 20:33:55.000000000 -0400
39329@@ -9,6 +9,7 @@
39330 */
39331
39332 #include <linux/module.h>
39333+#include <linux/moduleloader.h>
39334 #include <linux/kernel.h>
39335 #include <linux/errno.h>
39336 #include <linux/string.h>
39337@@ -53,8 +54,8 @@ static int vram_remap __initdata; /*
39338 static int vram_total __initdata; /* Set total amount of memory */
39339 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
39340 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
39341-static void (*pmi_start)(void) __read_mostly;
39342-static void (*pmi_pal) (void) __read_mostly;
39343+static void (*pmi_start)(void) __read_only;
39344+static void (*pmi_pal) (void) __read_only;
39345 static int depth __read_mostly;
39346 static int vga_compat __read_mostly;
39347 /* --------------------------------------------------------------------- */
39348@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct pl
39349 unsigned int size_vmode;
39350 unsigned int size_remap;
39351 unsigned int size_total;
39352+ void *pmi_code = NULL;
39353
39354 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
39355 return -ENODEV;
39356@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct pl
39357 size_remap = size_total;
39358 vesafb_fix.smem_len = size_remap;
39359
39360-#ifndef __i386__
39361- screen_info.vesapm_seg = 0;
39362-#endif
39363-
39364 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
39365 printk(KERN_WARNING
39366 "vesafb: cannot reserve video memory at 0x%lx\n",
39367@@ -315,9 +313,21 @@ static int __init vesafb_probe(struct pl
39368 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
39369 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
39370
39371+#ifdef __i386__
39372+
39373+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39374+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
39375+ if (!pmi_code)
39376+#elif !defined(CONFIG_PAX_KERNEXEC)
39377+ if (0)
39378+#endif
39379+
39380+#endif
39381+ screen_info.vesapm_seg = 0;
39382+
39383 if (screen_info.vesapm_seg) {
39384- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
39385- screen_info.vesapm_seg,screen_info.vesapm_off);
39386+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
39387+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
39388 }
39389
39390 if (screen_info.vesapm_seg < 0xc000)
39391@@ -325,9 +335,25 @@ static int __init vesafb_probe(struct pl
39392
39393 if (ypan || pmi_setpal) {
39394 unsigned short *pmi_base;
39395+
39396 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
39397- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
39398- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
39399+
39400+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39401+ pax_open_kernel();
39402+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
39403+#else
39404+ pmi_code = pmi_base;
39405+#endif
39406+
39407+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
39408+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
39409+
39410+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39411+ pmi_start = ktva_ktla(pmi_start);
39412+ pmi_pal = ktva_ktla(pmi_pal);
39413+ pax_close_kernel();
39414+#endif
39415+
39416 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
39417 if (pmi_base[3]) {
39418 printk(KERN_INFO "vesafb: pmi: ports = ");
39419@@ -469,6 +495,11 @@ static int __init vesafb_probe(struct pl
39420 info->node, info->fix.id);
39421 return 0;
39422 err:
39423+
39424+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
39425+ module_free_exec(NULL, pmi_code);
39426+#endif
39427+
39428 if (info->screen_base)
39429 iounmap(info->screen_base);
39430 framebuffer_release(info);
39431diff -urNp linux-2.6.32.43/drivers/xen/sys-hypervisor.c linux-2.6.32.43/drivers/xen/sys-hypervisor.c
39432--- linux-2.6.32.43/drivers/xen/sys-hypervisor.c 2011-03-27 14:31:47.000000000 -0400
39433+++ linux-2.6.32.43/drivers/xen/sys-hypervisor.c 2011-04-17 15:56:46.000000000 -0400
39434@@ -425,7 +425,7 @@ static ssize_t hyp_sysfs_store(struct ko
39435 return 0;
39436 }
39437
39438-static struct sysfs_ops hyp_sysfs_ops = {
39439+static const struct sysfs_ops hyp_sysfs_ops = {
39440 .show = hyp_sysfs_show,
39441 .store = hyp_sysfs_store,
39442 };
39443diff -urNp linux-2.6.32.43/fs/9p/vfs_inode.c linux-2.6.32.43/fs/9p/vfs_inode.c
39444--- linux-2.6.32.43/fs/9p/vfs_inode.c 2011-03-27 14:31:47.000000000 -0400
39445+++ linux-2.6.32.43/fs/9p/vfs_inode.c 2011-04-17 15:56:46.000000000 -0400
39446@@ -1079,7 +1079,7 @@ static void *v9fs_vfs_follow_link(struct
39447 static void
39448 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
39449 {
39450- char *s = nd_get_link(nd);
39451+ const char *s = nd_get_link(nd);
39452
39453 P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name,
39454 IS_ERR(s) ? "<error>" : s);
39455diff -urNp linux-2.6.32.43/fs/aio.c linux-2.6.32.43/fs/aio.c
39456--- linux-2.6.32.43/fs/aio.c 2011-03-27 14:31:47.000000000 -0400
39457+++ linux-2.6.32.43/fs/aio.c 2011-06-04 20:40:21.000000000 -0400
39458@@ -115,7 +115,7 @@ static int aio_setup_ring(struct kioctx
39459 size += sizeof(struct io_event) * nr_events;
39460 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
39461
39462- if (nr_pages < 0)
39463+ if (nr_pages <= 0)
39464 return -EINVAL;
39465
39466 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
39467@@ -1089,6 +1089,8 @@ static int read_events(struct kioctx *ct
39468 struct aio_timeout to;
39469 int retry = 0;
39470
39471+ pax_track_stack();
39472+
39473 /* needed to zero any padding within an entry (there shouldn't be
39474 * any, but C is fun!
39475 */
39476@@ -1382,13 +1384,18 @@ static ssize_t aio_fsync(struct kiocb *i
39477 static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb)
39478 {
39479 ssize_t ret;
39480+ struct iovec iovstack;
39481
39482 ret = rw_copy_check_uvector(type, (struct iovec __user *)kiocb->ki_buf,
39483 kiocb->ki_nbytes, 1,
39484- &kiocb->ki_inline_vec, &kiocb->ki_iovec);
39485+ &iovstack, &kiocb->ki_iovec);
39486 if (ret < 0)
39487 goto out;
39488
39489+ if (kiocb->ki_iovec == &iovstack) {
39490+ kiocb->ki_inline_vec = iovstack;
39491+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
39492+ }
39493 kiocb->ki_nr_segs = kiocb->ki_nbytes;
39494 kiocb->ki_cur_seg = 0;
39495 /* ki_nbytes/left now reflect bytes instead of segs */
39496diff -urNp linux-2.6.32.43/fs/attr.c linux-2.6.32.43/fs/attr.c
39497--- linux-2.6.32.43/fs/attr.c 2011-03-27 14:31:47.000000000 -0400
39498+++ linux-2.6.32.43/fs/attr.c 2011-04-17 15:56:46.000000000 -0400
39499@@ -83,6 +83,7 @@ int inode_newsize_ok(const struct inode
39500 unsigned long limit;
39501
39502 limit = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
39503+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
39504 if (limit != RLIM_INFINITY && offset > limit)
39505 goto out_sig;
39506 if (offset > inode->i_sb->s_maxbytes)
39507diff -urNp linux-2.6.32.43/fs/autofs/root.c linux-2.6.32.43/fs/autofs/root.c
39508--- linux-2.6.32.43/fs/autofs/root.c 2011-03-27 14:31:47.000000000 -0400
39509+++ linux-2.6.32.43/fs/autofs/root.c 2011-04-17 15:56:46.000000000 -0400
39510@@ -299,7 +299,8 @@ static int autofs_root_symlink(struct in
39511 set_bit(n,sbi->symlink_bitmap);
39512 sl = &sbi->symlink[n];
39513 sl->len = strlen(symname);
39514- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL);
39515+ slsize = sl->len+1;
39516+ sl->data = kmalloc(slsize, GFP_KERNEL);
39517 if (!sl->data) {
39518 clear_bit(n,sbi->symlink_bitmap);
39519 unlock_kernel();
39520diff -urNp linux-2.6.32.43/fs/autofs4/symlink.c linux-2.6.32.43/fs/autofs4/symlink.c
39521--- linux-2.6.32.43/fs/autofs4/symlink.c 2011-03-27 14:31:47.000000000 -0400
39522+++ linux-2.6.32.43/fs/autofs4/symlink.c 2011-04-17 15:56:46.000000000 -0400
39523@@ -15,7 +15,7 @@
39524 static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
39525 {
39526 struct autofs_info *ino = autofs4_dentry_ino(dentry);
39527- nd_set_link(nd, (char *)ino->u.symlink);
39528+ nd_set_link(nd, ino->u.symlink);
39529 return NULL;
39530 }
39531
39532diff -urNp linux-2.6.32.43/fs/befs/linuxvfs.c linux-2.6.32.43/fs/befs/linuxvfs.c
39533--- linux-2.6.32.43/fs/befs/linuxvfs.c 2011-03-27 14:31:47.000000000 -0400
39534+++ linux-2.6.32.43/fs/befs/linuxvfs.c 2011-04-17 15:56:46.000000000 -0400
39535@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry
39536 {
39537 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
39538 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
39539- char *link = nd_get_link(nd);
39540+ const char *link = nd_get_link(nd);
39541 if (!IS_ERR(link))
39542 kfree(link);
39543 }
39544diff -urNp linux-2.6.32.43/fs/binfmt_aout.c linux-2.6.32.43/fs/binfmt_aout.c
39545--- linux-2.6.32.43/fs/binfmt_aout.c 2011-03-27 14:31:47.000000000 -0400
39546+++ linux-2.6.32.43/fs/binfmt_aout.c 2011-04-17 15:56:46.000000000 -0400
39547@@ -16,6 +16,7 @@
39548 #include <linux/string.h>
39549 #include <linux/fs.h>
39550 #include <linux/file.h>
39551+#include <linux/security.h>
39552 #include <linux/stat.h>
39553 #include <linux/fcntl.h>
39554 #include <linux/ptrace.h>
39555@@ -102,6 +103,8 @@ static int aout_core_dump(long signr, st
39556 #endif
39557 # define START_STACK(u) (u.start_stack)
39558
39559+ memset(&dump, 0, sizeof(dump));
39560+
39561 fs = get_fs();
39562 set_fs(KERNEL_DS);
39563 has_dumped = 1;
39564@@ -113,10 +116,12 @@ static int aout_core_dump(long signr, st
39565
39566 /* If the size of the dump file exceeds the rlimit, then see what would happen
39567 if we wrote the stack, but not the data area. */
39568+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
39569 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > limit)
39570 dump.u_dsize = 0;
39571
39572 /* Make sure we have enough room to write the stack and data areas. */
39573+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
39574 if ((dump.u_ssize + 1) * PAGE_SIZE > limit)
39575 dump.u_ssize = 0;
39576
39577@@ -146,9 +151,7 @@ static int aout_core_dump(long signr, st
39578 dump_size = dump.u_ssize << PAGE_SHIFT;
39579 DUMP_WRITE(dump_start,dump_size);
39580 }
39581-/* Finally dump the task struct. Not be used by gdb, but could be useful */
39582- set_fs(KERNEL_DS);
39583- DUMP_WRITE(current,sizeof(*current));
39584+/* Finally, let's not dump the task struct. Not be used by gdb, but could be useful to an attacker */
39585 end_coredump:
39586 set_fs(fs);
39587 return has_dumped;
39588@@ -249,6 +252,8 @@ static int load_aout_binary(struct linux
39589 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
39590 if (rlim >= RLIM_INFINITY)
39591 rlim = ~0;
39592+
39593+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
39594 if (ex.a_data + ex.a_bss > rlim)
39595 return -ENOMEM;
39596
39597@@ -277,6 +282,27 @@ static int load_aout_binary(struct linux
39598 install_exec_creds(bprm);
39599 current->flags &= ~PF_FORKNOEXEC;
39600
39601+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39602+ current->mm->pax_flags = 0UL;
39603+#endif
39604+
39605+#ifdef CONFIG_PAX_PAGEEXEC
39606+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
39607+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
39608+
39609+#ifdef CONFIG_PAX_EMUTRAMP
39610+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
39611+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
39612+#endif
39613+
39614+#ifdef CONFIG_PAX_MPROTECT
39615+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
39616+ current->mm->pax_flags |= MF_PAX_MPROTECT;
39617+#endif
39618+
39619+ }
39620+#endif
39621+
39622 if (N_MAGIC(ex) == OMAGIC) {
39623 unsigned long text_addr, map_size;
39624 loff_t pos;
39625@@ -349,7 +375,7 @@ static int load_aout_binary(struct linux
39626
39627 down_write(&current->mm->mmap_sem);
39628 error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
39629- PROT_READ | PROT_WRITE | PROT_EXEC,
39630+ PROT_READ | PROT_WRITE,
39631 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
39632 fd_offset + ex.a_text);
39633 up_write(&current->mm->mmap_sem);
39634diff -urNp linux-2.6.32.43/fs/binfmt_elf.c linux-2.6.32.43/fs/binfmt_elf.c
39635--- linux-2.6.32.43/fs/binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
39636+++ linux-2.6.32.43/fs/binfmt_elf.c 2011-05-16 21:46:57.000000000 -0400
39637@@ -50,6 +50,10 @@ static int elf_core_dump(long signr, str
39638 #define elf_core_dump NULL
39639 #endif
39640
39641+#ifdef CONFIG_PAX_MPROTECT
39642+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
39643+#endif
39644+
39645 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
39646 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
39647 #else
39648@@ -69,6 +73,11 @@ static struct linux_binfmt elf_format =
39649 .load_binary = load_elf_binary,
39650 .load_shlib = load_elf_library,
39651 .core_dump = elf_core_dump,
39652+
39653+#ifdef CONFIG_PAX_MPROTECT
39654+ .handle_mprotect= elf_handle_mprotect,
39655+#endif
39656+
39657 .min_coredump = ELF_EXEC_PAGESIZE,
39658 .hasvdso = 1
39659 };
39660@@ -77,6 +86,8 @@ static struct linux_binfmt elf_format =
39661
39662 static int set_brk(unsigned long start, unsigned long end)
39663 {
39664+ unsigned long e = end;
39665+
39666 start = ELF_PAGEALIGN(start);
39667 end = ELF_PAGEALIGN(end);
39668 if (end > start) {
39669@@ -87,7 +98,7 @@ static int set_brk(unsigned long start,
39670 if (BAD_ADDR(addr))
39671 return addr;
39672 }
39673- current->mm->start_brk = current->mm->brk = end;
39674+ current->mm->start_brk = current->mm->brk = e;
39675 return 0;
39676 }
39677
39678@@ -148,12 +159,15 @@ create_elf_tables(struct linux_binprm *b
39679 elf_addr_t __user *u_rand_bytes;
39680 const char *k_platform = ELF_PLATFORM;
39681 const char *k_base_platform = ELF_BASE_PLATFORM;
39682- unsigned char k_rand_bytes[16];
39683+ u32 k_rand_bytes[4];
39684 int items;
39685 elf_addr_t *elf_info;
39686 int ei_index = 0;
39687 const struct cred *cred = current_cred();
39688 struct vm_area_struct *vma;
39689+ unsigned long saved_auxv[AT_VECTOR_SIZE];
39690+
39691+ pax_track_stack();
39692
39693 /*
39694 * In some cases (e.g. Hyper-Threading), we want to avoid L1
39695@@ -195,8 +209,12 @@ create_elf_tables(struct linux_binprm *b
39696 * Generate 16 random bytes for userspace PRNG seeding.
39697 */
39698 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
39699- u_rand_bytes = (elf_addr_t __user *)
39700- STACK_ALLOC(p, sizeof(k_rand_bytes));
39701+ srandom32(k_rand_bytes[0] ^ random32());
39702+ srandom32(k_rand_bytes[1] ^ random32());
39703+ srandom32(k_rand_bytes[2] ^ random32());
39704+ srandom32(k_rand_bytes[3] ^ random32());
39705+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
39706+ u_rand_bytes = (elf_addr_t __user *) p;
39707 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
39708 return -EFAULT;
39709
39710@@ -308,9 +326,11 @@ create_elf_tables(struct linux_binprm *b
39711 return -EFAULT;
39712 current->mm->env_end = p;
39713
39714+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
39715+
39716 /* Put the elf_info on the stack in the right place. */
39717 sp = (elf_addr_t __user *)envp + 1;
39718- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
39719+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
39720 return -EFAULT;
39721 return 0;
39722 }
39723@@ -385,10 +405,10 @@ static unsigned long load_elf_interp(str
39724 {
39725 struct elf_phdr *elf_phdata;
39726 struct elf_phdr *eppnt;
39727- unsigned long load_addr = 0;
39728+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
39729 int load_addr_set = 0;
39730 unsigned long last_bss = 0, elf_bss = 0;
39731- unsigned long error = ~0UL;
39732+ unsigned long error = -EINVAL;
39733 unsigned long total_size;
39734 int retval, i, size;
39735
39736@@ -434,6 +454,11 @@ static unsigned long load_elf_interp(str
39737 goto out_close;
39738 }
39739
39740+#ifdef CONFIG_PAX_SEGMEXEC
39741+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
39742+ pax_task_size = SEGMEXEC_TASK_SIZE;
39743+#endif
39744+
39745 eppnt = elf_phdata;
39746 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
39747 if (eppnt->p_type == PT_LOAD) {
39748@@ -477,8 +502,8 @@ static unsigned long load_elf_interp(str
39749 k = load_addr + eppnt->p_vaddr;
39750 if (BAD_ADDR(k) ||
39751 eppnt->p_filesz > eppnt->p_memsz ||
39752- eppnt->p_memsz > TASK_SIZE ||
39753- TASK_SIZE - eppnt->p_memsz < k) {
39754+ eppnt->p_memsz > pax_task_size ||
39755+ pax_task_size - eppnt->p_memsz < k) {
39756 error = -ENOMEM;
39757 goto out_close;
39758 }
39759@@ -532,6 +557,194 @@ out:
39760 return error;
39761 }
39762
39763+#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
39764+static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
39765+{
39766+ unsigned long pax_flags = 0UL;
39767+
39768+#ifdef CONFIG_PAX_PAGEEXEC
39769+ if (elf_phdata->p_flags & PF_PAGEEXEC)
39770+ pax_flags |= MF_PAX_PAGEEXEC;
39771+#endif
39772+
39773+#ifdef CONFIG_PAX_SEGMEXEC
39774+ if (elf_phdata->p_flags & PF_SEGMEXEC)
39775+ pax_flags |= MF_PAX_SEGMEXEC;
39776+#endif
39777+
39778+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39779+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39780+ if (nx_enabled)
39781+ pax_flags &= ~MF_PAX_SEGMEXEC;
39782+ else
39783+ pax_flags &= ~MF_PAX_PAGEEXEC;
39784+ }
39785+#endif
39786+
39787+#ifdef CONFIG_PAX_EMUTRAMP
39788+ if (elf_phdata->p_flags & PF_EMUTRAMP)
39789+ pax_flags |= MF_PAX_EMUTRAMP;
39790+#endif
39791+
39792+#ifdef CONFIG_PAX_MPROTECT
39793+ if (elf_phdata->p_flags & PF_MPROTECT)
39794+ pax_flags |= MF_PAX_MPROTECT;
39795+#endif
39796+
39797+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39798+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
39799+ pax_flags |= MF_PAX_RANDMMAP;
39800+#endif
39801+
39802+ return pax_flags;
39803+}
39804+#endif
39805+
39806+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39807+static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
39808+{
39809+ unsigned long pax_flags = 0UL;
39810+
39811+#ifdef CONFIG_PAX_PAGEEXEC
39812+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
39813+ pax_flags |= MF_PAX_PAGEEXEC;
39814+#endif
39815+
39816+#ifdef CONFIG_PAX_SEGMEXEC
39817+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
39818+ pax_flags |= MF_PAX_SEGMEXEC;
39819+#endif
39820+
39821+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39822+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39823+ if (nx_enabled)
39824+ pax_flags &= ~MF_PAX_SEGMEXEC;
39825+ else
39826+ pax_flags &= ~MF_PAX_PAGEEXEC;
39827+ }
39828+#endif
39829+
39830+#ifdef CONFIG_PAX_EMUTRAMP
39831+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
39832+ pax_flags |= MF_PAX_EMUTRAMP;
39833+#endif
39834+
39835+#ifdef CONFIG_PAX_MPROTECT
39836+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
39837+ pax_flags |= MF_PAX_MPROTECT;
39838+#endif
39839+
39840+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
39841+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
39842+ pax_flags |= MF_PAX_RANDMMAP;
39843+#endif
39844+
39845+ return pax_flags;
39846+}
39847+#endif
39848+
39849+#ifdef CONFIG_PAX_EI_PAX
39850+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
39851+{
39852+ unsigned long pax_flags = 0UL;
39853+
39854+#ifdef CONFIG_PAX_PAGEEXEC
39855+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
39856+ pax_flags |= MF_PAX_PAGEEXEC;
39857+#endif
39858+
39859+#ifdef CONFIG_PAX_SEGMEXEC
39860+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
39861+ pax_flags |= MF_PAX_SEGMEXEC;
39862+#endif
39863+
39864+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
39865+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
39866+ if (nx_enabled)
39867+ pax_flags &= ~MF_PAX_SEGMEXEC;
39868+ else
39869+ pax_flags &= ~MF_PAX_PAGEEXEC;
39870+ }
39871+#endif
39872+
39873+#ifdef CONFIG_PAX_EMUTRAMP
39874+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
39875+ pax_flags |= MF_PAX_EMUTRAMP;
39876+#endif
39877+
39878+#ifdef CONFIG_PAX_MPROTECT
39879+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
39880+ pax_flags |= MF_PAX_MPROTECT;
39881+#endif
39882+
39883+#ifdef CONFIG_PAX_ASLR
39884+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
39885+ pax_flags |= MF_PAX_RANDMMAP;
39886+#endif
39887+
39888+ return pax_flags;
39889+}
39890+#endif
39891+
39892+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
39893+static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
39894+{
39895+ unsigned long pax_flags = 0UL;
39896+
39897+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39898+ unsigned long i;
39899+ int found_flags = 0;
39900+#endif
39901+
39902+#ifdef CONFIG_PAX_EI_PAX
39903+ pax_flags = pax_parse_ei_pax(elf_ex);
39904+#endif
39905+
39906+#ifdef CONFIG_PAX_PT_PAX_FLAGS
39907+ for (i = 0UL; i < elf_ex->e_phnum; i++)
39908+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
39909+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
39910+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
39911+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
39912+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
39913+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
39914+ return -EINVAL;
39915+
39916+#ifdef CONFIG_PAX_SOFTMODE
39917+ if (pax_softmode)
39918+ pax_flags = pax_parse_softmode(&elf_phdata[i]);
39919+ else
39920+#endif
39921+
39922+ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
39923+ found_flags = 1;
39924+ break;
39925+ }
39926+#endif
39927+
39928+#if !defined(CONFIG_PAX_EI_PAX) && defined(CONFIG_PAX_PT_PAX_FLAGS)
39929+ if (found_flags == 0) {
39930+ struct elf_phdr phdr;
39931+ memset(&phdr, 0, sizeof(phdr));
39932+ phdr.p_flags = PF_NOEMUTRAMP;
39933+#ifdef CONFIG_PAX_SOFTMODE
39934+ if (pax_softmode)
39935+ pax_flags = pax_parse_softmode(&phdr);
39936+ else
39937+#endif
39938+ pax_flags = pax_parse_hardmode(&phdr);
39939+ }
39940+#endif
39941+
39942+
39943+ if (0 > pax_check_flags(&pax_flags))
39944+ return -EINVAL;
39945+
39946+ current->mm->pax_flags = pax_flags;
39947+ return 0;
39948+}
39949+#endif
39950+
39951 /*
39952 * These are the functions used to load ELF style executables and shared
39953 * libraries. There is no binary dependent code anywhere else.
39954@@ -548,6 +761,11 @@ static unsigned long randomize_stack_top
39955 {
39956 unsigned int random_variable = 0;
39957
39958+#ifdef CONFIG_PAX_RANDUSTACK
39959+ if (randomize_va_space)
39960+ return stack_top - current->mm->delta_stack;
39961+#endif
39962+
39963 if ((current->flags & PF_RANDOMIZE) &&
39964 !(current->personality & ADDR_NO_RANDOMIZE)) {
39965 random_variable = get_random_int() & STACK_RND_MASK;
39966@@ -566,7 +784,7 @@ static int load_elf_binary(struct linux_
39967 unsigned long load_addr = 0, load_bias = 0;
39968 int load_addr_set = 0;
39969 char * elf_interpreter = NULL;
39970- unsigned long error;
39971+ unsigned long error = 0;
39972 struct elf_phdr *elf_ppnt, *elf_phdata;
39973 unsigned long elf_bss, elf_brk;
39974 int retval, i;
39975@@ -576,11 +794,11 @@ static int load_elf_binary(struct linux_
39976 unsigned long start_code, end_code, start_data, end_data;
39977 unsigned long reloc_func_desc = 0;
39978 int executable_stack = EXSTACK_DEFAULT;
39979- unsigned long def_flags = 0;
39980 struct {
39981 struct elfhdr elf_ex;
39982 struct elfhdr interp_elf_ex;
39983 } *loc;
39984+ unsigned long pax_task_size = TASK_SIZE;
39985
39986 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
39987 if (!loc) {
39988@@ -718,11 +936,80 @@ static int load_elf_binary(struct linux_
39989
39990 /* OK, This is the point of no return */
39991 current->flags &= ~PF_FORKNOEXEC;
39992- current->mm->def_flags = def_flags;
39993+
39994+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
39995+ current->mm->pax_flags = 0UL;
39996+#endif
39997+
39998+#ifdef CONFIG_PAX_DLRESOLVE
39999+ current->mm->call_dl_resolve = 0UL;
40000+#endif
40001+
40002+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
40003+ current->mm->call_syscall = 0UL;
40004+#endif
40005+
40006+#ifdef CONFIG_PAX_ASLR
40007+ current->mm->delta_mmap = 0UL;
40008+ current->mm->delta_stack = 0UL;
40009+#endif
40010+
40011+ current->mm->def_flags = 0;
40012+
40013+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
40014+ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) {
40015+ send_sig(SIGKILL, current, 0);
40016+ goto out_free_dentry;
40017+ }
40018+#endif
40019+
40020+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
40021+ pax_set_initial_flags(bprm);
40022+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
40023+ if (pax_set_initial_flags_func)
40024+ (pax_set_initial_flags_func)(bprm);
40025+#endif
40026+
40027+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
40028+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !nx_enabled) {
40029+ current->mm->context.user_cs_limit = PAGE_SIZE;
40030+ current->mm->def_flags |= VM_PAGEEXEC;
40031+ }
40032+#endif
40033+
40034+#ifdef CONFIG_PAX_SEGMEXEC
40035+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
40036+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
40037+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
40038+ pax_task_size = SEGMEXEC_TASK_SIZE;
40039+ }
40040+#endif
40041+
40042+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
40043+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40044+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
40045+ put_cpu();
40046+ }
40047+#endif
40048
40049 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
40050 may depend on the personality. */
40051 SET_PERSONALITY(loc->elf_ex);
40052+
40053+#ifdef CONFIG_PAX_ASLR
40054+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
40055+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
40056+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
40057+ }
40058+#endif
40059+
40060+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
40061+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
40062+ executable_stack = EXSTACK_DISABLE_X;
40063+ current->personality &= ~READ_IMPLIES_EXEC;
40064+ } else
40065+#endif
40066+
40067 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
40068 current->personality |= READ_IMPLIES_EXEC;
40069
40070@@ -804,6 +1091,20 @@ static int load_elf_binary(struct linux_
40071 #else
40072 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
40073 #endif
40074+
40075+#ifdef CONFIG_PAX_RANDMMAP
40076+ /* PaX: randomize base address at the default exe base if requested */
40077+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
40078+#ifdef CONFIG_SPARC64
40079+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
40080+#else
40081+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
40082+#endif
40083+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
40084+ elf_flags |= MAP_FIXED;
40085+ }
40086+#endif
40087+
40088 }
40089
40090 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
40091@@ -836,9 +1137,9 @@ static int load_elf_binary(struct linux_
40092 * allowed task size. Note that p_filesz must always be
40093 * <= p_memsz so it is only necessary to check p_memsz.
40094 */
40095- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40096- elf_ppnt->p_memsz > TASK_SIZE ||
40097- TASK_SIZE - elf_ppnt->p_memsz < k) {
40098+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
40099+ elf_ppnt->p_memsz > pax_task_size ||
40100+ pax_task_size - elf_ppnt->p_memsz < k) {
40101 /* set_brk can never work. Avoid overflows. */
40102 send_sig(SIGKILL, current, 0);
40103 retval = -EINVAL;
40104@@ -866,6 +1167,11 @@ static int load_elf_binary(struct linux_
40105 start_data += load_bias;
40106 end_data += load_bias;
40107
40108+#ifdef CONFIG_PAX_RANDMMAP
40109+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
40110+ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4);
40111+#endif
40112+
40113 /* Calling set_brk effectively mmaps the pages that we need
40114 * for the bss and break sections. We must do this before
40115 * mapping in the interpreter, to make sure it doesn't wind
40116@@ -877,9 +1183,11 @@ static int load_elf_binary(struct linux_
40117 goto out_free_dentry;
40118 }
40119 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
40120- send_sig(SIGSEGV, current, 0);
40121- retval = -EFAULT; /* Nobody gets to see this, but.. */
40122- goto out_free_dentry;
40123+ /*
40124+ * This bss-zeroing can fail if the ELF
40125+ * file specifies odd protections. So
40126+ * we don't check the return value
40127+ */
40128 }
40129
40130 if (elf_interpreter) {
40131@@ -1112,8 +1420,10 @@ static int dump_seek(struct file *file,
40132 unsigned long n = off;
40133 if (n > PAGE_SIZE)
40134 n = PAGE_SIZE;
40135- if (!dump_write(file, buf, n))
40136+ if (!dump_write(file, buf, n)) {
40137+ free_page((unsigned long)buf);
40138 return 0;
40139+ }
40140 off -= n;
40141 }
40142 free_page((unsigned long)buf);
40143@@ -1125,7 +1435,7 @@ static int dump_seek(struct file *file,
40144 * Decide what to dump of a segment, part, all or none.
40145 */
40146 static unsigned long vma_dump_size(struct vm_area_struct *vma,
40147- unsigned long mm_flags)
40148+ unsigned long mm_flags, long signr)
40149 {
40150 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
40151
40152@@ -1159,7 +1469,7 @@ static unsigned long vma_dump_size(struc
40153 if (vma->vm_file == NULL)
40154 return 0;
40155
40156- if (FILTER(MAPPED_PRIVATE))
40157+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
40158 goto whole;
40159
40160 /*
40161@@ -1255,8 +1565,11 @@ static int writenote(struct memelfnote *
40162 #undef DUMP_WRITE
40163
40164 #define DUMP_WRITE(addr, nr) \
40165+ do { \
40166+ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
40167 if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
40168- goto end_coredump;
40169+ goto end_coredump; \
40170+ } while (0);
40171
40172 static void fill_elf_header(struct elfhdr *elf, int segs,
40173 u16 machine, u32 flags, u8 osabi)
40174@@ -1385,9 +1698,9 @@ static void fill_auxv_note(struct memelf
40175 {
40176 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
40177 int i = 0;
40178- do
40179+ do {
40180 i += 2;
40181- while (auxv[i - 2] != AT_NULL);
40182+ } while (auxv[i - 2] != AT_NULL);
40183 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
40184 }
40185
40186@@ -1973,7 +2286,7 @@ static int elf_core_dump(long signr, str
40187 phdr.p_offset = offset;
40188 phdr.p_vaddr = vma->vm_start;
40189 phdr.p_paddr = 0;
40190- phdr.p_filesz = vma_dump_size(vma, mm_flags);
40191+ phdr.p_filesz = vma_dump_size(vma, mm_flags, signr);
40192 phdr.p_memsz = vma->vm_end - vma->vm_start;
40193 offset += phdr.p_filesz;
40194 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
40195@@ -2006,7 +2319,7 @@ static int elf_core_dump(long signr, str
40196 unsigned long addr;
40197 unsigned long end;
40198
40199- end = vma->vm_start + vma_dump_size(vma, mm_flags);
40200+ end = vma->vm_start + vma_dump_size(vma, mm_flags, signr);
40201
40202 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
40203 struct page *page;
40204@@ -2015,6 +2328,7 @@ static int elf_core_dump(long signr, str
40205 page = get_dump_page(addr);
40206 if (page) {
40207 void *kaddr = kmap(page);
40208+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
40209 stop = ((size += PAGE_SIZE) > limit) ||
40210 !dump_write(file, kaddr, PAGE_SIZE);
40211 kunmap(page);
40212@@ -2042,6 +2356,97 @@ out:
40213
40214 #endif /* USE_ELF_CORE_DUMP */
40215
40216+#ifdef CONFIG_PAX_MPROTECT
40217+/* PaX: non-PIC ELF libraries need relocations on their executable segments
40218+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
40219+ * we'll remove VM_MAYWRITE for good on RELRO segments.
40220+ *
40221+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
40222+ * basis because we want to allow the common case and not the special ones.
40223+ */
40224+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
40225+{
40226+ struct elfhdr elf_h;
40227+ struct elf_phdr elf_p;
40228+ unsigned long i;
40229+ unsigned long oldflags;
40230+ bool is_textrel_rw, is_textrel_rx, is_relro;
40231+
40232+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
40233+ return;
40234+
40235+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
40236+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
40237+
40238+#ifdef CONFIG_PAX_ELFRELOCS
40239+ /* possible TEXTREL */
40240+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
40241+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
40242+#else
40243+ is_textrel_rw = false;
40244+ is_textrel_rx = false;
40245+#endif
40246+
40247+ /* possible RELRO */
40248+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
40249+
40250+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
40251+ return;
40252+
40253+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
40254+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
40255+
40256+#ifdef CONFIG_PAX_ETEXECRELOCS
40257+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40258+#else
40259+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
40260+#endif
40261+
40262+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
40263+ !elf_check_arch(&elf_h) ||
40264+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
40265+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
40266+ return;
40267+
40268+ for (i = 0UL; i < elf_h.e_phnum; i++) {
40269+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
40270+ return;
40271+ switch (elf_p.p_type) {
40272+ case PT_DYNAMIC:
40273+ if (!is_textrel_rw && !is_textrel_rx)
40274+ continue;
40275+ i = 0UL;
40276+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
40277+ elf_dyn dyn;
40278+
40279+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
40280+ return;
40281+ if (dyn.d_tag == DT_NULL)
40282+ return;
40283+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
40284+ gr_log_textrel(vma);
40285+ if (is_textrel_rw)
40286+ vma->vm_flags |= VM_MAYWRITE;
40287+ else
40288+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
40289+ vma->vm_flags &= ~VM_MAYWRITE;
40290+ return;
40291+ }
40292+ i++;
40293+ }
40294+ return;
40295+
40296+ case PT_GNU_RELRO:
40297+ if (!is_relro)
40298+ continue;
40299+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
40300+ vma->vm_flags &= ~VM_MAYWRITE;
40301+ return;
40302+ }
40303+ }
40304+}
40305+#endif
40306+
40307 static int __init init_elf_binfmt(void)
40308 {
40309 return register_binfmt(&elf_format);
40310diff -urNp linux-2.6.32.43/fs/binfmt_flat.c linux-2.6.32.43/fs/binfmt_flat.c
40311--- linux-2.6.32.43/fs/binfmt_flat.c 2011-03-27 14:31:47.000000000 -0400
40312+++ linux-2.6.32.43/fs/binfmt_flat.c 2011-04-17 15:56:46.000000000 -0400
40313@@ -564,7 +564,9 @@ static int load_flat_file(struct linux_b
40314 realdatastart = (unsigned long) -ENOMEM;
40315 printk("Unable to allocate RAM for process data, errno %d\n",
40316 (int)-realdatastart);
40317+ down_write(&current->mm->mmap_sem);
40318 do_munmap(current->mm, textpos, text_len);
40319+ up_write(&current->mm->mmap_sem);
40320 ret = realdatastart;
40321 goto err;
40322 }
40323@@ -588,8 +590,10 @@ static int load_flat_file(struct linux_b
40324 }
40325 if (IS_ERR_VALUE(result)) {
40326 printk("Unable to read data+bss, errno %d\n", (int)-result);
40327+ down_write(&current->mm->mmap_sem);
40328 do_munmap(current->mm, textpos, text_len);
40329 do_munmap(current->mm, realdatastart, data_len + extra);
40330+ up_write(&current->mm->mmap_sem);
40331 ret = result;
40332 goto err;
40333 }
40334@@ -658,8 +662,10 @@ static int load_flat_file(struct linux_b
40335 }
40336 if (IS_ERR_VALUE(result)) {
40337 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
40338+ down_write(&current->mm->mmap_sem);
40339 do_munmap(current->mm, textpos, text_len + data_len + extra +
40340 MAX_SHARED_LIBS * sizeof(unsigned long));
40341+ up_write(&current->mm->mmap_sem);
40342 ret = result;
40343 goto err;
40344 }
40345diff -urNp linux-2.6.32.43/fs/bio.c linux-2.6.32.43/fs/bio.c
40346--- linux-2.6.32.43/fs/bio.c 2011-03-27 14:31:47.000000000 -0400
40347+++ linux-2.6.32.43/fs/bio.c 2011-04-17 15:56:46.000000000 -0400
40348@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_cr
40349
40350 i = 0;
40351 while (i < bio_slab_nr) {
40352- struct bio_slab *bslab = &bio_slabs[i];
40353+ bslab = &bio_slabs[i];
40354
40355 if (!bslab->slab && entry == -1)
40356 entry = i;
40357@@ -1236,7 +1236,7 @@ static void bio_copy_kern_endio(struct b
40358 const int read = bio_data_dir(bio) == READ;
40359 struct bio_map_data *bmd = bio->bi_private;
40360 int i;
40361- char *p = bmd->sgvecs[0].iov_base;
40362+ char *p = (__force char *)bmd->sgvecs[0].iov_base;
40363
40364 __bio_for_each_segment(bvec, bio, i, 0) {
40365 char *addr = page_address(bvec->bv_page);
40366diff -urNp linux-2.6.32.43/fs/block_dev.c linux-2.6.32.43/fs/block_dev.c
40367--- linux-2.6.32.43/fs/block_dev.c 2011-06-25 12:55:34.000000000 -0400
40368+++ linux-2.6.32.43/fs/block_dev.c 2011-06-25 12:56:37.000000000 -0400
40369@@ -664,7 +664,7 @@ int bd_claim(struct block_device *bdev,
40370 else if (bdev->bd_contains == bdev)
40371 res = 0; /* is a whole device which isn't held */
40372
40373- else if (bdev->bd_contains->bd_holder == bd_claim)
40374+ else if (bdev->bd_contains->bd_holder == (void *)bd_claim)
40375 res = 0; /* is a partition of a device that is being partitioned */
40376 else if (bdev->bd_contains->bd_holder != NULL)
40377 res = -EBUSY; /* is a partition of a held device */
40378diff -urNp linux-2.6.32.43/fs/btrfs/ctree.c linux-2.6.32.43/fs/btrfs/ctree.c
40379--- linux-2.6.32.43/fs/btrfs/ctree.c 2011-03-27 14:31:47.000000000 -0400
40380+++ linux-2.6.32.43/fs/btrfs/ctree.c 2011-04-17 15:56:46.000000000 -0400
40381@@ -461,9 +461,12 @@ static noinline int __btrfs_cow_block(st
40382 free_extent_buffer(buf);
40383 add_root_to_dirty_list(root);
40384 } else {
40385- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
40386- parent_start = parent->start;
40387- else
40388+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
40389+ if (parent)
40390+ parent_start = parent->start;
40391+ else
40392+ parent_start = 0;
40393+ } else
40394 parent_start = 0;
40395
40396 WARN_ON(trans->transid != btrfs_header_generation(parent));
40397@@ -3645,7 +3648,6 @@ setup_items_for_insert(struct btrfs_tran
40398
40399 ret = 0;
40400 if (slot == 0) {
40401- struct btrfs_disk_key disk_key;
40402 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
40403 ret = fixup_low_keys(trans, root, path, &disk_key, 1);
40404 }
40405diff -urNp linux-2.6.32.43/fs/btrfs/disk-io.c linux-2.6.32.43/fs/btrfs/disk-io.c
40406--- linux-2.6.32.43/fs/btrfs/disk-io.c 2011-04-17 17:00:52.000000000 -0400
40407+++ linux-2.6.32.43/fs/btrfs/disk-io.c 2011-04-17 17:03:11.000000000 -0400
40408@@ -39,7 +39,7 @@
40409 #include "tree-log.h"
40410 #include "free-space-cache.h"
40411
40412-static struct extent_io_ops btree_extent_io_ops;
40413+static const struct extent_io_ops btree_extent_io_ops;
40414 static void end_workqueue_fn(struct btrfs_work *work);
40415 static void free_fs_root(struct btrfs_root *root);
40416
40417@@ -2607,7 +2607,7 @@ out:
40418 return 0;
40419 }
40420
40421-static struct extent_io_ops btree_extent_io_ops = {
40422+static const struct extent_io_ops btree_extent_io_ops = {
40423 .write_cache_pages_lock_hook = btree_lock_page_hook,
40424 .readpage_end_io_hook = btree_readpage_end_io_hook,
40425 .submit_bio_hook = btree_submit_bio_hook,
40426diff -urNp linux-2.6.32.43/fs/btrfs/extent_io.h linux-2.6.32.43/fs/btrfs/extent_io.h
40427--- linux-2.6.32.43/fs/btrfs/extent_io.h 2011-03-27 14:31:47.000000000 -0400
40428+++ linux-2.6.32.43/fs/btrfs/extent_io.h 2011-04-17 15:56:46.000000000 -0400
40429@@ -49,36 +49,36 @@ typedef int (extent_submit_bio_hook_t)(s
40430 struct bio *bio, int mirror_num,
40431 unsigned long bio_flags);
40432 struct extent_io_ops {
40433- int (*fill_delalloc)(struct inode *inode, struct page *locked_page,
40434+ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page,
40435 u64 start, u64 end, int *page_started,
40436 unsigned long *nr_written);
40437- int (*writepage_start_hook)(struct page *page, u64 start, u64 end);
40438- int (*writepage_io_hook)(struct page *page, u64 start, u64 end);
40439+ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end);
40440+ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end);
40441 extent_submit_bio_hook_t *submit_bio_hook;
40442- int (*merge_bio_hook)(struct page *page, unsigned long offset,
40443+ int (* const merge_bio_hook)(struct page *page, unsigned long offset,
40444 size_t size, struct bio *bio,
40445 unsigned long bio_flags);
40446- int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
40447- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page,
40448+ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end);
40449+ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page,
40450 u64 start, u64 end,
40451 struct extent_state *state);
40452- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
40453+ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page,
40454 u64 start, u64 end,
40455 struct extent_state *state);
40456- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40457+ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end,
40458 struct extent_state *state);
40459- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40460+ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end,
40461 struct extent_state *state, int uptodate);
40462- int (*set_bit_hook)(struct inode *inode, u64 start, u64 end,
40463+ int (* const set_bit_hook)(struct inode *inode, u64 start, u64 end,
40464 unsigned long old, unsigned long bits);
40465- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state,
40466+ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state,
40467 unsigned long bits);
40468- int (*merge_extent_hook)(struct inode *inode,
40469+ int (* const merge_extent_hook)(struct inode *inode,
40470 struct extent_state *new,
40471 struct extent_state *other);
40472- int (*split_extent_hook)(struct inode *inode,
40473+ int (* const split_extent_hook)(struct inode *inode,
40474 struct extent_state *orig, u64 split);
40475- int (*write_cache_pages_lock_hook)(struct page *page);
40476+ int (* const write_cache_pages_lock_hook)(struct page *page);
40477 };
40478
40479 struct extent_io_tree {
40480@@ -88,7 +88,7 @@ struct extent_io_tree {
40481 u64 dirty_bytes;
40482 spinlock_t lock;
40483 spinlock_t buffer_lock;
40484- struct extent_io_ops *ops;
40485+ const struct extent_io_ops *ops;
40486 };
40487
40488 struct extent_state {
40489diff -urNp linux-2.6.32.43/fs/btrfs/extent-tree.c linux-2.6.32.43/fs/btrfs/extent-tree.c
40490--- linux-2.6.32.43/fs/btrfs/extent-tree.c 2011-03-27 14:31:47.000000000 -0400
40491+++ linux-2.6.32.43/fs/btrfs/extent-tree.c 2011-06-12 06:39:08.000000000 -0400
40492@@ -7141,6 +7141,10 @@ static noinline int relocate_one_extent(
40493 u64 group_start = group->key.objectid;
40494 new_extents = kmalloc(sizeof(*new_extents),
40495 GFP_NOFS);
40496+ if (!new_extents) {
40497+ ret = -ENOMEM;
40498+ goto out;
40499+ }
40500 nr_extents = 1;
40501 ret = get_new_locations(reloc_inode,
40502 extent_key,
40503diff -urNp linux-2.6.32.43/fs/btrfs/free-space-cache.c linux-2.6.32.43/fs/btrfs/free-space-cache.c
40504--- linux-2.6.32.43/fs/btrfs/free-space-cache.c 2011-03-27 14:31:47.000000000 -0400
40505+++ linux-2.6.32.43/fs/btrfs/free-space-cache.c 2011-04-17 15:56:46.000000000 -0400
40506@@ -1074,8 +1074,6 @@ u64 btrfs_alloc_from_cluster(struct btrf
40507
40508 while(1) {
40509 if (entry->bytes < bytes || entry->offset < min_start) {
40510- struct rb_node *node;
40511-
40512 node = rb_next(&entry->offset_index);
40513 if (!node)
40514 break;
40515@@ -1226,7 +1224,7 @@ again:
40516 */
40517 while (entry->bitmap || found_bitmap ||
40518 (!entry->bitmap && entry->bytes < min_bytes)) {
40519- struct rb_node *node = rb_next(&entry->offset_index);
40520+ node = rb_next(&entry->offset_index);
40521
40522 if (entry->bitmap && entry->bytes > bytes + empty_size) {
40523 ret = btrfs_bitmap_cluster(block_group, entry, cluster,
40524diff -urNp linux-2.6.32.43/fs/btrfs/inode.c linux-2.6.32.43/fs/btrfs/inode.c
40525--- linux-2.6.32.43/fs/btrfs/inode.c 2011-03-27 14:31:47.000000000 -0400
40526+++ linux-2.6.32.43/fs/btrfs/inode.c 2011-06-12 06:39:58.000000000 -0400
40527@@ -63,7 +63,7 @@ static const struct inode_operations btr
40528 static const struct address_space_operations btrfs_aops;
40529 static const struct address_space_operations btrfs_symlink_aops;
40530 static const struct file_operations btrfs_dir_file_operations;
40531-static struct extent_io_ops btrfs_extent_io_ops;
40532+static const struct extent_io_ops btrfs_extent_io_ops;
40533
40534 static struct kmem_cache *btrfs_inode_cachep;
40535 struct kmem_cache *btrfs_trans_handle_cachep;
40536@@ -925,6 +925,7 @@ static int cow_file_range_async(struct i
40537 1, 0, NULL, GFP_NOFS);
40538 while (start < end) {
40539 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
40540+ BUG_ON(!async_cow);
40541 async_cow->inode = inode;
40542 async_cow->root = root;
40543 async_cow->locked_page = locked_page;
40544@@ -4591,6 +4592,8 @@ static noinline int uncompress_inline(st
40545 inline_size = btrfs_file_extent_inline_item_len(leaf,
40546 btrfs_item_nr(leaf, path->slots[0]));
40547 tmp = kmalloc(inline_size, GFP_NOFS);
40548+ if (!tmp)
40549+ return -ENOMEM;
40550 ptr = btrfs_file_extent_inline_start(item);
40551
40552 read_extent_buffer(leaf, tmp, ptr, inline_size);
40553@@ -5410,7 +5413,7 @@ fail:
40554 return -ENOMEM;
40555 }
40556
40557-static int btrfs_getattr(struct vfsmount *mnt,
40558+int btrfs_getattr(struct vfsmount *mnt,
40559 struct dentry *dentry, struct kstat *stat)
40560 {
40561 struct inode *inode = dentry->d_inode;
40562@@ -5422,6 +5425,14 @@ static int btrfs_getattr(struct vfsmount
40563 return 0;
40564 }
40565
40566+EXPORT_SYMBOL(btrfs_getattr);
40567+
40568+dev_t get_btrfs_dev_from_inode(struct inode *inode)
40569+{
40570+ return BTRFS_I(inode)->root->anon_super.s_dev;
40571+}
40572+EXPORT_SYMBOL(get_btrfs_dev_from_inode);
40573+
40574 static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
40575 struct inode *new_dir, struct dentry *new_dentry)
40576 {
40577@@ -5972,7 +5983,7 @@ static const struct file_operations btrf
40578 .fsync = btrfs_sync_file,
40579 };
40580
40581-static struct extent_io_ops btrfs_extent_io_ops = {
40582+static const struct extent_io_ops btrfs_extent_io_ops = {
40583 .fill_delalloc = run_delalloc_range,
40584 .submit_bio_hook = btrfs_submit_bio_hook,
40585 .merge_bio_hook = btrfs_merge_bio_hook,
40586diff -urNp linux-2.6.32.43/fs/btrfs/relocation.c linux-2.6.32.43/fs/btrfs/relocation.c
40587--- linux-2.6.32.43/fs/btrfs/relocation.c 2011-03-27 14:31:47.000000000 -0400
40588+++ linux-2.6.32.43/fs/btrfs/relocation.c 2011-04-17 15:56:46.000000000 -0400
40589@@ -884,7 +884,7 @@ static int __update_reloc_root(struct bt
40590 }
40591 spin_unlock(&rc->reloc_root_tree.lock);
40592
40593- BUG_ON((struct btrfs_root *)node->data != root);
40594+ BUG_ON(!node || (struct btrfs_root *)node->data != root);
40595
40596 if (!del) {
40597 spin_lock(&rc->reloc_root_tree.lock);
40598diff -urNp linux-2.6.32.43/fs/btrfs/sysfs.c linux-2.6.32.43/fs/btrfs/sysfs.c
40599--- linux-2.6.32.43/fs/btrfs/sysfs.c 2011-03-27 14:31:47.000000000 -0400
40600+++ linux-2.6.32.43/fs/btrfs/sysfs.c 2011-04-17 15:56:46.000000000 -0400
40601@@ -164,12 +164,12 @@ static void btrfs_root_release(struct ko
40602 complete(&root->kobj_unregister);
40603 }
40604
40605-static struct sysfs_ops btrfs_super_attr_ops = {
40606+static const struct sysfs_ops btrfs_super_attr_ops = {
40607 .show = btrfs_super_attr_show,
40608 .store = btrfs_super_attr_store,
40609 };
40610
40611-static struct sysfs_ops btrfs_root_attr_ops = {
40612+static const struct sysfs_ops btrfs_root_attr_ops = {
40613 .show = btrfs_root_attr_show,
40614 .store = btrfs_root_attr_store,
40615 };
40616diff -urNp linux-2.6.32.43/fs/buffer.c linux-2.6.32.43/fs/buffer.c
40617--- linux-2.6.32.43/fs/buffer.c 2011-03-27 14:31:47.000000000 -0400
40618+++ linux-2.6.32.43/fs/buffer.c 2011-04-17 15:56:46.000000000 -0400
40619@@ -25,6 +25,7 @@
40620 #include <linux/percpu.h>
40621 #include <linux/slab.h>
40622 #include <linux/capability.h>
40623+#include <linux/security.h>
40624 #include <linux/blkdev.h>
40625 #include <linux/file.h>
40626 #include <linux/quotaops.h>
40627diff -urNp linux-2.6.32.43/fs/cachefiles/bind.c linux-2.6.32.43/fs/cachefiles/bind.c
40628--- linux-2.6.32.43/fs/cachefiles/bind.c 2011-03-27 14:31:47.000000000 -0400
40629+++ linux-2.6.32.43/fs/cachefiles/bind.c 2011-04-17 15:56:46.000000000 -0400
40630@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef
40631 args);
40632
40633 /* start by checking things over */
40634- ASSERT(cache->fstop_percent >= 0 &&
40635- cache->fstop_percent < cache->fcull_percent &&
40636+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
40637 cache->fcull_percent < cache->frun_percent &&
40638 cache->frun_percent < 100);
40639
40640- ASSERT(cache->bstop_percent >= 0 &&
40641- cache->bstop_percent < cache->bcull_percent &&
40642+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
40643 cache->bcull_percent < cache->brun_percent &&
40644 cache->brun_percent < 100);
40645
40646diff -urNp linux-2.6.32.43/fs/cachefiles/daemon.c linux-2.6.32.43/fs/cachefiles/daemon.c
40647--- linux-2.6.32.43/fs/cachefiles/daemon.c 2011-03-27 14:31:47.000000000 -0400
40648+++ linux-2.6.32.43/fs/cachefiles/daemon.c 2011-04-17 15:56:46.000000000 -0400
40649@@ -220,7 +220,7 @@ static ssize_t cachefiles_daemon_write(s
40650 if (test_bit(CACHEFILES_DEAD, &cache->flags))
40651 return -EIO;
40652
40653- if (datalen < 0 || datalen > PAGE_SIZE - 1)
40654+ if (datalen > PAGE_SIZE - 1)
40655 return -EOPNOTSUPP;
40656
40657 /* drag the command string into the kernel so we can parse it */
40658@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc
40659 if (args[0] != '%' || args[1] != '\0')
40660 return -EINVAL;
40661
40662- if (fstop < 0 || fstop >= cache->fcull_percent)
40663+ if (fstop >= cache->fcull_percent)
40664 return cachefiles_daemon_range_error(cache, args);
40665
40666 cache->fstop_percent = fstop;
40667@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc
40668 if (args[0] != '%' || args[1] != '\0')
40669 return -EINVAL;
40670
40671- if (bstop < 0 || bstop >= cache->bcull_percent)
40672+ if (bstop >= cache->bcull_percent)
40673 return cachefiles_daemon_range_error(cache, args);
40674
40675 cache->bstop_percent = bstop;
40676diff -urNp linux-2.6.32.43/fs/cachefiles/internal.h linux-2.6.32.43/fs/cachefiles/internal.h
40677--- linux-2.6.32.43/fs/cachefiles/internal.h 2011-03-27 14:31:47.000000000 -0400
40678+++ linux-2.6.32.43/fs/cachefiles/internal.h 2011-05-04 17:56:28.000000000 -0400
40679@@ -56,7 +56,7 @@ struct cachefiles_cache {
40680 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
40681 struct rb_root active_nodes; /* active nodes (can't be culled) */
40682 rwlock_t active_lock; /* lock for active_nodes */
40683- atomic_t gravecounter; /* graveyard uniquifier */
40684+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
40685 unsigned frun_percent; /* when to stop culling (% files) */
40686 unsigned fcull_percent; /* when to start culling (% files) */
40687 unsigned fstop_percent; /* when to stop allocating (% files) */
40688@@ -168,19 +168,19 @@ extern int cachefiles_check_in_use(struc
40689 * proc.c
40690 */
40691 #ifdef CONFIG_CACHEFILES_HISTOGRAM
40692-extern atomic_t cachefiles_lookup_histogram[HZ];
40693-extern atomic_t cachefiles_mkdir_histogram[HZ];
40694-extern atomic_t cachefiles_create_histogram[HZ];
40695+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40696+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40697+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
40698
40699 extern int __init cachefiles_proc_init(void);
40700 extern void cachefiles_proc_cleanup(void);
40701 static inline
40702-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
40703+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
40704 {
40705 unsigned long jif = jiffies - start_jif;
40706 if (jif >= HZ)
40707 jif = HZ - 1;
40708- atomic_inc(&histogram[jif]);
40709+ atomic_inc_unchecked(&histogram[jif]);
40710 }
40711
40712 #else
40713diff -urNp linux-2.6.32.43/fs/cachefiles/namei.c linux-2.6.32.43/fs/cachefiles/namei.c
40714--- linux-2.6.32.43/fs/cachefiles/namei.c 2011-03-27 14:31:47.000000000 -0400
40715+++ linux-2.6.32.43/fs/cachefiles/namei.c 2011-05-04 17:56:28.000000000 -0400
40716@@ -250,7 +250,7 @@ try_again:
40717 /* first step is to make up a grave dentry in the graveyard */
40718 sprintf(nbuffer, "%08x%08x",
40719 (uint32_t) get_seconds(),
40720- (uint32_t) atomic_inc_return(&cache->gravecounter));
40721+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
40722
40723 /* do the multiway lock magic */
40724 trap = lock_rename(cache->graveyard, dir);
40725diff -urNp linux-2.6.32.43/fs/cachefiles/proc.c linux-2.6.32.43/fs/cachefiles/proc.c
40726--- linux-2.6.32.43/fs/cachefiles/proc.c 2011-03-27 14:31:47.000000000 -0400
40727+++ linux-2.6.32.43/fs/cachefiles/proc.c 2011-05-04 17:56:28.000000000 -0400
40728@@ -14,9 +14,9 @@
40729 #include <linux/seq_file.h>
40730 #include "internal.h"
40731
40732-atomic_t cachefiles_lookup_histogram[HZ];
40733-atomic_t cachefiles_mkdir_histogram[HZ];
40734-atomic_t cachefiles_create_histogram[HZ];
40735+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
40736+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
40737+atomic_unchecked_t cachefiles_create_histogram[HZ];
40738
40739 /*
40740 * display the latency histogram
40741@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(str
40742 return 0;
40743 default:
40744 index = (unsigned long) v - 3;
40745- x = atomic_read(&cachefiles_lookup_histogram[index]);
40746- y = atomic_read(&cachefiles_mkdir_histogram[index]);
40747- z = atomic_read(&cachefiles_create_histogram[index]);
40748+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
40749+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
40750+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
40751 if (x == 0 && y == 0 && z == 0)
40752 return 0;
40753
40754diff -urNp linux-2.6.32.43/fs/cachefiles/rdwr.c linux-2.6.32.43/fs/cachefiles/rdwr.c
40755--- linux-2.6.32.43/fs/cachefiles/rdwr.c 2011-03-27 14:31:47.000000000 -0400
40756+++ linux-2.6.32.43/fs/cachefiles/rdwr.c 2011-04-17 15:56:46.000000000 -0400
40757@@ -946,7 +946,7 @@ int cachefiles_write_page(struct fscache
40758 old_fs = get_fs();
40759 set_fs(KERNEL_DS);
40760 ret = file->f_op->write(
40761- file, (const void __user *) data, len, &pos);
40762+ file, (__force const void __user *) data, len, &pos);
40763 set_fs(old_fs);
40764 kunmap(page);
40765 if (ret != len)
40766diff -urNp linux-2.6.32.43/fs/cifs/cifs_debug.c linux-2.6.32.43/fs/cifs/cifs_debug.c
40767--- linux-2.6.32.43/fs/cifs/cifs_debug.c 2011-03-27 14:31:47.000000000 -0400
40768+++ linux-2.6.32.43/fs/cifs/cifs_debug.c 2011-05-04 17:56:28.000000000 -0400
40769@@ -256,25 +256,25 @@ static ssize_t cifs_stats_proc_write(str
40770 tcon = list_entry(tmp3,
40771 struct cifsTconInfo,
40772 tcon_list);
40773- atomic_set(&tcon->num_smbs_sent, 0);
40774- atomic_set(&tcon->num_writes, 0);
40775- atomic_set(&tcon->num_reads, 0);
40776- atomic_set(&tcon->num_oplock_brks, 0);
40777- atomic_set(&tcon->num_opens, 0);
40778- atomic_set(&tcon->num_posixopens, 0);
40779- atomic_set(&tcon->num_posixmkdirs, 0);
40780- atomic_set(&tcon->num_closes, 0);
40781- atomic_set(&tcon->num_deletes, 0);
40782- atomic_set(&tcon->num_mkdirs, 0);
40783- atomic_set(&tcon->num_rmdirs, 0);
40784- atomic_set(&tcon->num_renames, 0);
40785- atomic_set(&tcon->num_t2renames, 0);
40786- atomic_set(&tcon->num_ffirst, 0);
40787- atomic_set(&tcon->num_fnext, 0);
40788- atomic_set(&tcon->num_fclose, 0);
40789- atomic_set(&tcon->num_hardlinks, 0);
40790- atomic_set(&tcon->num_symlinks, 0);
40791- atomic_set(&tcon->num_locks, 0);
40792+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
40793+ atomic_set_unchecked(&tcon->num_writes, 0);
40794+ atomic_set_unchecked(&tcon->num_reads, 0);
40795+ atomic_set_unchecked(&tcon->num_oplock_brks, 0);
40796+ atomic_set_unchecked(&tcon->num_opens, 0);
40797+ atomic_set_unchecked(&tcon->num_posixopens, 0);
40798+ atomic_set_unchecked(&tcon->num_posixmkdirs, 0);
40799+ atomic_set_unchecked(&tcon->num_closes, 0);
40800+ atomic_set_unchecked(&tcon->num_deletes, 0);
40801+ atomic_set_unchecked(&tcon->num_mkdirs, 0);
40802+ atomic_set_unchecked(&tcon->num_rmdirs, 0);
40803+ atomic_set_unchecked(&tcon->num_renames, 0);
40804+ atomic_set_unchecked(&tcon->num_t2renames, 0);
40805+ atomic_set_unchecked(&tcon->num_ffirst, 0);
40806+ atomic_set_unchecked(&tcon->num_fnext, 0);
40807+ atomic_set_unchecked(&tcon->num_fclose, 0);
40808+ atomic_set_unchecked(&tcon->num_hardlinks, 0);
40809+ atomic_set_unchecked(&tcon->num_symlinks, 0);
40810+ atomic_set_unchecked(&tcon->num_locks, 0);
40811 }
40812 }
40813 }
40814@@ -334,41 +334,41 @@ static int cifs_stats_proc_show(struct s
40815 if (tcon->need_reconnect)
40816 seq_puts(m, "\tDISCONNECTED ");
40817 seq_printf(m, "\nSMBs: %d Oplock Breaks: %d",
40818- atomic_read(&tcon->num_smbs_sent),
40819- atomic_read(&tcon->num_oplock_brks));
40820+ atomic_read_unchecked(&tcon->num_smbs_sent),
40821+ atomic_read_unchecked(&tcon->num_oplock_brks));
40822 seq_printf(m, "\nReads: %d Bytes: %lld",
40823- atomic_read(&tcon->num_reads),
40824+ atomic_read_unchecked(&tcon->num_reads),
40825 (long long)(tcon->bytes_read));
40826 seq_printf(m, "\nWrites: %d Bytes: %lld",
40827- atomic_read(&tcon->num_writes),
40828+ atomic_read_unchecked(&tcon->num_writes),
40829 (long long)(tcon->bytes_written));
40830 seq_printf(m, "\nFlushes: %d",
40831- atomic_read(&tcon->num_flushes));
40832+ atomic_read_unchecked(&tcon->num_flushes));
40833 seq_printf(m, "\nLocks: %d HardLinks: %d "
40834 "Symlinks: %d",
40835- atomic_read(&tcon->num_locks),
40836- atomic_read(&tcon->num_hardlinks),
40837- atomic_read(&tcon->num_symlinks));
40838+ atomic_read_unchecked(&tcon->num_locks),
40839+ atomic_read_unchecked(&tcon->num_hardlinks),
40840+ atomic_read_unchecked(&tcon->num_symlinks));
40841 seq_printf(m, "\nOpens: %d Closes: %d "
40842 "Deletes: %d",
40843- atomic_read(&tcon->num_opens),
40844- atomic_read(&tcon->num_closes),
40845- atomic_read(&tcon->num_deletes));
40846+ atomic_read_unchecked(&tcon->num_opens),
40847+ atomic_read_unchecked(&tcon->num_closes),
40848+ atomic_read_unchecked(&tcon->num_deletes));
40849 seq_printf(m, "\nPosix Opens: %d "
40850 "Posix Mkdirs: %d",
40851- atomic_read(&tcon->num_posixopens),
40852- atomic_read(&tcon->num_posixmkdirs));
40853+ atomic_read_unchecked(&tcon->num_posixopens),
40854+ atomic_read_unchecked(&tcon->num_posixmkdirs));
40855 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
40856- atomic_read(&tcon->num_mkdirs),
40857- atomic_read(&tcon->num_rmdirs));
40858+ atomic_read_unchecked(&tcon->num_mkdirs),
40859+ atomic_read_unchecked(&tcon->num_rmdirs));
40860 seq_printf(m, "\nRenames: %d T2 Renames %d",
40861- atomic_read(&tcon->num_renames),
40862- atomic_read(&tcon->num_t2renames));
40863+ atomic_read_unchecked(&tcon->num_renames),
40864+ atomic_read_unchecked(&tcon->num_t2renames));
40865 seq_printf(m, "\nFindFirst: %d FNext %d "
40866 "FClose %d",
40867- atomic_read(&tcon->num_ffirst),
40868- atomic_read(&tcon->num_fnext),
40869- atomic_read(&tcon->num_fclose));
40870+ atomic_read_unchecked(&tcon->num_ffirst),
40871+ atomic_read_unchecked(&tcon->num_fnext),
40872+ atomic_read_unchecked(&tcon->num_fclose));
40873 }
40874 }
40875 }
40876diff -urNp linux-2.6.32.43/fs/cifs/cifsglob.h linux-2.6.32.43/fs/cifs/cifsglob.h
40877--- linux-2.6.32.43/fs/cifs/cifsglob.h 2011-03-27 14:31:47.000000000 -0400
40878+++ linux-2.6.32.43/fs/cifs/cifsglob.h 2011-05-04 17:56:28.000000000 -0400
40879@@ -252,28 +252,28 @@ struct cifsTconInfo {
40880 __u16 Flags; /* optional support bits */
40881 enum statusEnum tidStatus;
40882 #ifdef CONFIG_CIFS_STATS
40883- atomic_t num_smbs_sent;
40884- atomic_t num_writes;
40885- atomic_t num_reads;
40886- atomic_t num_flushes;
40887- atomic_t num_oplock_brks;
40888- atomic_t num_opens;
40889- atomic_t num_closes;
40890- atomic_t num_deletes;
40891- atomic_t num_mkdirs;
40892- atomic_t num_posixopens;
40893- atomic_t num_posixmkdirs;
40894- atomic_t num_rmdirs;
40895- atomic_t num_renames;
40896- atomic_t num_t2renames;
40897- atomic_t num_ffirst;
40898- atomic_t num_fnext;
40899- atomic_t num_fclose;
40900- atomic_t num_hardlinks;
40901- atomic_t num_symlinks;
40902- atomic_t num_locks;
40903- atomic_t num_acl_get;
40904- atomic_t num_acl_set;
40905+ atomic_unchecked_t num_smbs_sent;
40906+ atomic_unchecked_t num_writes;
40907+ atomic_unchecked_t num_reads;
40908+ atomic_unchecked_t num_flushes;
40909+ atomic_unchecked_t num_oplock_brks;
40910+ atomic_unchecked_t num_opens;
40911+ atomic_unchecked_t num_closes;
40912+ atomic_unchecked_t num_deletes;
40913+ atomic_unchecked_t num_mkdirs;
40914+ atomic_unchecked_t num_posixopens;
40915+ atomic_unchecked_t num_posixmkdirs;
40916+ atomic_unchecked_t num_rmdirs;
40917+ atomic_unchecked_t num_renames;
40918+ atomic_unchecked_t num_t2renames;
40919+ atomic_unchecked_t num_ffirst;
40920+ atomic_unchecked_t num_fnext;
40921+ atomic_unchecked_t num_fclose;
40922+ atomic_unchecked_t num_hardlinks;
40923+ atomic_unchecked_t num_symlinks;
40924+ atomic_unchecked_t num_locks;
40925+ atomic_unchecked_t num_acl_get;
40926+ atomic_unchecked_t num_acl_set;
40927 #ifdef CONFIG_CIFS_STATS2
40928 unsigned long long time_writes;
40929 unsigned long long time_reads;
40930@@ -414,7 +414,7 @@ static inline char CIFS_DIR_SEP(const st
40931 }
40932
40933 #ifdef CONFIG_CIFS_STATS
40934-#define cifs_stats_inc atomic_inc
40935+#define cifs_stats_inc atomic_inc_unchecked
40936
40937 static inline void cifs_stats_bytes_written(struct cifsTconInfo *tcon,
40938 unsigned int bytes)
40939diff -urNp linux-2.6.32.43/fs/cifs/link.c linux-2.6.32.43/fs/cifs/link.c
40940--- linux-2.6.32.43/fs/cifs/link.c 2011-03-27 14:31:47.000000000 -0400
40941+++ linux-2.6.32.43/fs/cifs/link.c 2011-04-17 15:56:46.000000000 -0400
40942@@ -215,7 +215,7 @@ cifs_symlink(struct inode *inode, struct
40943
40944 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
40945 {
40946- char *p = nd_get_link(nd);
40947+ const char *p = nd_get_link(nd);
40948 if (!IS_ERR(p))
40949 kfree(p);
40950 }
40951diff -urNp linux-2.6.32.43/fs/coda/cache.c linux-2.6.32.43/fs/coda/cache.c
40952--- linux-2.6.32.43/fs/coda/cache.c 2011-03-27 14:31:47.000000000 -0400
40953+++ linux-2.6.32.43/fs/coda/cache.c 2011-05-04 17:56:28.000000000 -0400
40954@@ -24,14 +24,14 @@
40955 #include <linux/coda_fs_i.h>
40956 #include <linux/coda_cache.h>
40957
40958-static atomic_t permission_epoch = ATOMIC_INIT(0);
40959+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
40960
40961 /* replace or extend an acl cache hit */
40962 void coda_cache_enter(struct inode *inode, int mask)
40963 {
40964 struct coda_inode_info *cii = ITOC(inode);
40965
40966- cii->c_cached_epoch = atomic_read(&permission_epoch);
40967+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
40968 if (cii->c_uid != current_fsuid()) {
40969 cii->c_uid = current_fsuid();
40970 cii->c_cached_perm = mask;
40971@@ -43,13 +43,13 @@ void coda_cache_enter(struct inode *inod
40972 void coda_cache_clear_inode(struct inode *inode)
40973 {
40974 struct coda_inode_info *cii = ITOC(inode);
40975- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
40976+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
40977 }
40978
40979 /* remove all acl caches */
40980 void coda_cache_clear_all(struct super_block *sb)
40981 {
40982- atomic_inc(&permission_epoch);
40983+ atomic_inc_unchecked(&permission_epoch);
40984 }
40985
40986
40987@@ -61,7 +61,7 @@ int coda_cache_check(struct inode *inode
40988
40989 hit = (mask & cii->c_cached_perm) == mask &&
40990 cii->c_uid == current_fsuid() &&
40991- cii->c_cached_epoch == atomic_read(&permission_epoch);
40992+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
40993
40994 return hit;
40995 }
40996diff -urNp linux-2.6.32.43/fs/compat_binfmt_elf.c linux-2.6.32.43/fs/compat_binfmt_elf.c
40997--- linux-2.6.32.43/fs/compat_binfmt_elf.c 2011-03-27 14:31:47.000000000 -0400
40998+++ linux-2.6.32.43/fs/compat_binfmt_elf.c 2011-04-17 15:56:46.000000000 -0400
40999@@ -29,10 +29,12 @@
41000 #undef elfhdr
41001 #undef elf_phdr
41002 #undef elf_note
41003+#undef elf_dyn
41004 #undef elf_addr_t
41005 #define elfhdr elf32_hdr
41006 #define elf_phdr elf32_phdr
41007 #define elf_note elf32_note
41008+#define elf_dyn Elf32_Dyn
41009 #define elf_addr_t Elf32_Addr
41010
41011 /*
41012diff -urNp linux-2.6.32.43/fs/compat.c linux-2.6.32.43/fs/compat.c
41013--- linux-2.6.32.43/fs/compat.c 2011-04-17 17:00:52.000000000 -0400
41014+++ linux-2.6.32.43/fs/compat.c 2011-05-16 21:46:57.000000000 -0400
41015@@ -830,6 +830,7 @@ struct compat_old_linux_dirent {
41016
41017 struct compat_readdir_callback {
41018 struct compat_old_linux_dirent __user *dirent;
41019+ struct file * file;
41020 int result;
41021 };
41022
41023@@ -847,6 +848,10 @@ static int compat_fillonedir(void *__buf
41024 buf->result = -EOVERFLOW;
41025 return -EOVERFLOW;
41026 }
41027+
41028+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41029+ return 0;
41030+
41031 buf->result++;
41032 dirent = buf->dirent;
41033 if (!access_ok(VERIFY_WRITE, dirent,
41034@@ -879,6 +884,7 @@ asmlinkage long compat_sys_old_readdir(u
41035
41036 buf.result = 0;
41037 buf.dirent = dirent;
41038+ buf.file = file;
41039
41040 error = vfs_readdir(file, compat_fillonedir, &buf);
41041 if (buf.result)
41042@@ -899,6 +905,7 @@ struct compat_linux_dirent {
41043 struct compat_getdents_callback {
41044 struct compat_linux_dirent __user *current_dir;
41045 struct compat_linux_dirent __user *previous;
41046+ struct file * file;
41047 int count;
41048 int error;
41049 };
41050@@ -919,6 +926,10 @@ static int compat_filldir(void *__buf, c
41051 buf->error = -EOVERFLOW;
41052 return -EOVERFLOW;
41053 }
41054+
41055+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41056+ return 0;
41057+
41058 dirent = buf->previous;
41059 if (dirent) {
41060 if (__put_user(offset, &dirent->d_off))
41061@@ -966,6 +977,7 @@ asmlinkage long compat_sys_getdents(unsi
41062 buf.previous = NULL;
41063 buf.count = count;
41064 buf.error = 0;
41065+ buf.file = file;
41066
41067 error = vfs_readdir(file, compat_filldir, &buf);
41068 if (error >= 0)
41069@@ -987,6 +999,7 @@ out:
41070 struct compat_getdents_callback64 {
41071 struct linux_dirent64 __user *current_dir;
41072 struct linux_dirent64 __user *previous;
41073+ struct file * file;
41074 int count;
41075 int error;
41076 };
41077@@ -1003,6 +1016,10 @@ static int compat_filldir64(void * __buf
41078 buf->error = -EINVAL; /* only used if we fail.. */
41079 if (reclen > buf->count)
41080 return -EINVAL;
41081+
41082+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
41083+ return 0;
41084+
41085 dirent = buf->previous;
41086
41087 if (dirent) {
41088@@ -1054,6 +1071,7 @@ asmlinkage long compat_sys_getdents64(un
41089 buf.previous = NULL;
41090 buf.count = count;
41091 buf.error = 0;
41092+ buf.file = file;
41093
41094 error = vfs_readdir(file, compat_filldir64, &buf);
41095 if (error >= 0)
41096@@ -1098,7 +1116,7 @@ static ssize_t compat_do_readv_writev(in
41097 * verify all the pointers
41098 */
41099 ret = -EINVAL;
41100- if ((nr_segs > UIO_MAXIOV) || (nr_segs <= 0))
41101+ if (nr_segs > UIO_MAXIOV)
41102 goto out;
41103 if (!file->f_op)
41104 goto out;
41105@@ -1463,6 +1481,11 @@ int compat_do_execve(char * filename,
41106 compat_uptr_t __user *envp,
41107 struct pt_regs * regs)
41108 {
41109+#ifdef CONFIG_GRKERNSEC
41110+ struct file *old_exec_file;
41111+ struct acl_subject_label *old_acl;
41112+ struct rlimit old_rlim[RLIM_NLIMITS];
41113+#endif
41114 struct linux_binprm *bprm;
41115 struct file *file;
41116 struct files_struct *displaced;
41117@@ -1499,6 +1522,19 @@ int compat_do_execve(char * filename,
41118 bprm->filename = filename;
41119 bprm->interp = filename;
41120
41121+ if (gr_process_user_ban()) {
41122+ retval = -EPERM;
41123+ goto out_file;
41124+ }
41125+
41126+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41127+ retval = -EAGAIN;
41128+ if (gr_handle_nproc())
41129+ goto out_file;
41130+ retval = -EACCES;
41131+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt))
41132+ goto out_file;
41133+
41134 retval = bprm_mm_init(bprm);
41135 if (retval)
41136 goto out_file;
41137@@ -1528,9 +1564,40 @@ int compat_do_execve(char * filename,
41138 if (retval < 0)
41139 goto out;
41140
41141+ if (!gr_tpe_allow(file)) {
41142+ retval = -EACCES;
41143+ goto out;
41144+ }
41145+
41146+ if (gr_check_crash_exec(file)) {
41147+ retval = -EACCES;
41148+ goto out;
41149+ }
41150+
41151+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41152+
41153+ gr_handle_exec_args_compat(bprm, argv);
41154+
41155+#ifdef CONFIG_GRKERNSEC
41156+ old_acl = current->acl;
41157+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41158+ old_exec_file = current->exec_file;
41159+ get_file(file);
41160+ current->exec_file = file;
41161+#endif
41162+
41163+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41164+ bprm->unsafe & LSM_UNSAFE_SHARE);
41165+ if (retval < 0)
41166+ goto out_fail;
41167+
41168 retval = search_binary_handler(bprm, regs);
41169 if (retval < 0)
41170- goto out;
41171+ goto out_fail;
41172+#ifdef CONFIG_GRKERNSEC
41173+ if (old_exec_file)
41174+ fput(old_exec_file);
41175+#endif
41176
41177 /* execve succeeded */
41178 current->fs->in_exec = 0;
41179@@ -1541,6 +1608,14 @@ int compat_do_execve(char * filename,
41180 put_files_struct(displaced);
41181 return retval;
41182
41183+out_fail:
41184+#ifdef CONFIG_GRKERNSEC
41185+ current->acl = old_acl;
41186+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41187+ fput(current->exec_file);
41188+ current->exec_file = old_exec_file;
41189+#endif
41190+
41191 out:
41192 if (bprm->mm) {
41193 acct_arg_size(bprm, 0);
41194@@ -1711,6 +1786,8 @@ int compat_core_sys_select(int n, compat
41195 struct fdtable *fdt;
41196 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
41197
41198+ pax_track_stack();
41199+
41200 if (n < 0)
41201 goto out_nofds;
41202
41203diff -urNp linux-2.6.32.43/fs/compat_ioctl.c linux-2.6.32.43/fs/compat_ioctl.c
41204--- linux-2.6.32.43/fs/compat_ioctl.c 2011-03-27 14:31:47.000000000 -0400
41205+++ linux-2.6.32.43/fs/compat_ioctl.c 2011-04-23 12:56:11.000000000 -0400
41206@@ -234,6 +234,8 @@ static int do_video_set_spu_palette(unsi
41207 up = (struct compat_video_spu_palette __user *) arg;
41208 err = get_user(palp, &up->palette);
41209 err |= get_user(length, &up->length);
41210+ if (err)
41211+ return -EFAULT;
41212
41213 up_native = compat_alloc_user_space(sizeof(struct video_spu_palette));
41214 err = put_user(compat_ptr(palp), &up_native->palette);
41215diff -urNp linux-2.6.32.43/fs/configfs/dir.c linux-2.6.32.43/fs/configfs/dir.c
41216--- linux-2.6.32.43/fs/configfs/dir.c 2011-03-27 14:31:47.000000000 -0400
41217+++ linux-2.6.32.43/fs/configfs/dir.c 2011-05-11 18:25:15.000000000 -0400
41218@@ -1572,7 +1572,8 @@ static int configfs_readdir(struct file
41219 }
41220 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
41221 struct configfs_dirent *next;
41222- const char * name;
41223+ const unsigned char * name;
41224+ char d_name[sizeof(next->s_dentry->d_iname)];
41225 int len;
41226
41227 next = list_entry(p, struct configfs_dirent,
41228@@ -1581,7 +1582,12 @@ static int configfs_readdir(struct file
41229 continue;
41230
41231 name = configfs_get_name(next);
41232- len = strlen(name);
41233+ if (next->s_dentry && name == next->s_dentry->d_iname) {
41234+ len = next->s_dentry->d_name.len;
41235+ memcpy(d_name, name, len);
41236+ name = d_name;
41237+ } else
41238+ len = strlen(name);
41239 if (next->s_dentry)
41240 ino = next->s_dentry->d_inode->i_ino;
41241 else
41242diff -urNp linux-2.6.32.43/fs/dcache.c linux-2.6.32.43/fs/dcache.c
41243--- linux-2.6.32.43/fs/dcache.c 2011-03-27 14:31:47.000000000 -0400
41244+++ linux-2.6.32.43/fs/dcache.c 2011-04-23 13:32:21.000000000 -0400
41245@@ -45,8 +45,6 @@ EXPORT_SYMBOL(dcache_lock);
41246
41247 static struct kmem_cache *dentry_cache __read_mostly;
41248
41249-#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
41250-
41251 /*
41252 * This is the single most critical data structure when it comes
41253 * to the dcache: the hashtable for lookups. Somebody should try
41254@@ -2319,7 +2317,7 @@ void __init vfs_caches_init(unsigned lon
41255 mempages -= reserve;
41256
41257 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
41258- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
41259+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
41260
41261 dcache_init();
41262 inode_init();
41263diff -urNp linux-2.6.32.43/fs/dlm/lockspace.c linux-2.6.32.43/fs/dlm/lockspace.c
41264--- linux-2.6.32.43/fs/dlm/lockspace.c 2011-03-27 14:31:47.000000000 -0400
41265+++ linux-2.6.32.43/fs/dlm/lockspace.c 2011-04-17 15:56:46.000000000 -0400
41266@@ -148,7 +148,7 @@ static void lockspace_kobj_release(struc
41267 kfree(ls);
41268 }
41269
41270-static struct sysfs_ops dlm_attr_ops = {
41271+static const struct sysfs_ops dlm_attr_ops = {
41272 .show = dlm_attr_show,
41273 .store = dlm_attr_store,
41274 };
41275diff -urNp linux-2.6.32.43/fs/ecryptfs/inode.c linux-2.6.32.43/fs/ecryptfs/inode.c
41276--- linux-2.6.32.43/fs/ecryptfs/inode.c 2011-03-27 14:31:47.000000000 -0400
41277+++ linux-2.6.32.43/fs/ecryptfs/inode.c 2011-04-17 15:56:46.000000000 -0400
41278@@ -660,7 +660,7 @@ static int ecryptfs_readlink_lower(struc
41279 old_fs = get_fs();
41280 set_fs(get_ds());
41281 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
41282- (char __user *)lower_buf,
41283+ (__force char __user *)lower_buf,
41284 lower_bufsiz);
41285 set_fs(old_fs);
41286 if (rc < 0)
41287@@ -706,7 +706,7 @@ static void *ecryptfs_follow_link(struct
41288 }
41289 old_fs = get_fs();
41290 set_fs(get_ds());
41291- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
41292+ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len);
41293 set_fs(old_fs);
41294 if (rc < 0)
41295 goto out_free;
41296diff -urNp linux-2.6.32.43/fs/exec.c linux-2.6.32.43/fs/exec.c
41297--- linux-2.6.32.43/fs/exec.c 2011-06-25 12:55:34.000000000 -0400
41298+++ linux-2.6.32.43/fs/exec.c 2011-07-06 19:53:33.000000000 -0400
41299@@ -56,12 +56,24 @@
41300 #include <linux/fsnotify.h>
41301 #include <linux/fs_struct.h>
41302 #include <linux/pipe_fs_i.h>
41303+#include <linux/random.h>
41304+#include <linux/seq_file.h>
41305+
41306+#ifdef CONFIG_PAX_REFCOUNT
41307+#include <linux/kallsyms.h>
41308+#include <linux/kdebug.h>
41309+#endif
41310
41311 #include <asm/uaccess.h>
41312 #include <asm/mmu_context.h>
41313 #include <asm/tlb.h>
41314 #include "internal.h"
41315
41316+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
41317+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
41318+EXPORT_SYMBOL(pax_set_initial_flags_func);
41319+#endif
41320+
41321 int core_uses_pid;
41322 char core_pattern[CORENAME_MAX_SIZE] = "core";
41323 unsigned int core_pipe_limit;
41324@@ -115,7 +127,7 @@ SYSCALL_DEFINE1(uselib, const char __use
41325 goto out;
41326
41327 file = do_filp_open(AT_FDCWD, tmp,
41328- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41329+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41330 MAY_READ | MAY_EXEC | MAY_OPEN);
41331 putname(tmp);
41332 error = PTR_ERR(file);
41333@@ -178,18 +190,10 @@ struct page *get_arg_page(struct linux_b
41334 int write)
41335 {
41336 struct page *page;
41337- int ret;
41338
41339-#ifdef CONFIG_STACK_GROWSUP
41340- if (write) {
41341- ret = expand_stack_downwards(bprm->vma, pos);
41342- if (ret < 0)
41343- return NULL;
41344- }
41345-#endif
41346- ret = get_user_pages(current, bprm->mm, pos,
41347- 1, write, 1, &page, NULL);
41348- if (ret <= 0)
41349+ if (0 > expand_stack_downwards(bprm->vma, pos))
41350+ return NULL;
41351+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
41352 return NULL;
41353
41354 if (write) {
41355@@ -263,6 +267,11 @@ static int __bprm_mm_init(struct linux_b
41356 vma->vm_end = STACK_TOP_MAX;
41357 vma->vm_start = vma->vm_end - PAGE_SIZE;
41358 vma->vm_flags = VM_STACK_FLAGS;
41359+
41360+#ifdef CONFIG_PAX_SEGMEXEC
41361+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
41362+#endif
41363+
41364 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
41365
41366 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
41367@@ -276,6 +285,12 @@ static int __bprm_mm_init(struct linux_b
41368 mm->stack_vm = mm->total_vm = 1;
41369 up_write(&mm->mmap_sem);
41370 bprm->p = vma->vm_end - sizeof(void *);
41371+
41372+#ifdef CONFIG_PAX_RANDUSTACK
41373+ if (randomize_va_space)
41374+ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK;
41375+#endif
41376+
41377 return 0;
41378 err:
41379 up_write(&mm->mmap_sem);
41380@@ -510,7 +525,7 @@ int copy_strings_kernel(int argc,char **
41381 int r;
41382 mm_segment_t oldfs = get_fs();
41383 set_fs(KERNEL_DS);
41384- r = copy_strings(argc, (char __user * __user *)argv, bprm);
41385+ r = copy_strings(argc, (__force char __user * __user *)argv, bprm);
41386 set_fs(oldfs);
41387 return r;
41388 }
41389@@ -540,7 +555,8 @@ static int shift_arg_pages(struct vm_are
41390 unsigned long new_end = old_end - shift;
41391 struct mmu_gather *tlb;
41392
41393- BUG_ON(new_start > new_end);
41394+ if (new_start >= new_end || new_start < mmap_min_addr)
41395+ return -ENOMEM;
41396
41397 /*
41398 * ensure there are no vmas between where we want to go
41399@@ -549,6 +565,10 @@ static int shift_arg_pages(struct vm_are
41400 if (vma != find_vma(mm, new_start))
41401 return -EFAULT;
41402
41403+#ifdef CONFIG_PAX_SEGMEXEC
41404+ BUG_ON(pax_find_mirror_vma(vma));
41405+#endif
41406+
41407 /*
41408 * cover the whole range: [new_start, old_end)
41409 */
41410@@ -630,10 +650,6 @@ int setup_arg_pages(struct linux_binprm
41411 stack_top = arch_align_stack(stack_top);
41412 stack_top = PAGE_ALIGN(stack_top);
41413
41414- if (unlikely(stack_top < mmap_min_addr) ||
41415- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
41416- return -ENOMEM;
41417-
41418 stack_shift = vma->vm_end - stack_top;
41419
41420 bprm->p -= stack_shift;
41421@@ -645,6 +661,14 @@ int setup_arg_pages(struct linux_binprm
41422 bprm->exec -= stack_shift;
41423
41424 down_write(&mm->mmap_sem);
41425+
41426+ /* Move stack pages down in memory. */
41427+ if (stack_shift) {
41428+ ret = shift_arg_pages(vma, stack_shift);
41429+ if (ret)
41430+ goto out_unlock;
41431+ }
41432+
41433 vm_flags = VM_STACK_FLAGS;
41434
41435 /*
41436@@ -658,19 +682,24 @@ int setup_arg_pages(struct linux_binprm
41437 vm_flags &= ~VM_EXEC;
41438 vm_flags |= mm->def_flags;
41439
41440+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41441+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
41442+ vm_flags &= ~VM_EXEC;
41443+
41444+#ifdef CONFIG_PAX_MPROTECT
41445+ if (mm->pax_flags & MF_PAX_MPROTECT)
41446+ vm_flags &= ~VM_MAYEXEC;
41447+#endif
41448+
41449+ }
41450+#endif
41451+
41452 ret = mprotect_fixup(vma, &prev, vma->vm_start, vma->vm_end,
41453 vm_flags);
41454 if (ret)
41455 goto out_unlock;
41456 BUG_ON(prev != vma);
41457
41458- /* Move stack pages down in memory. */
41459- if (stack_shift) {
41460- ret = shift_arg_pages(vma, stack_shift);
41461- if (ret)
41462- goto out_unlock;
41463- }
41464-
41465 stack_expand = EXTRA_STACK_VM_PAGES * PAGE_SIZE;
41466 stack_size = vma->vm_end - vma->vm_start;
41467 /*
41468@@ -707,7 +736,7 @@ struct file *open_exec(const char *name)
41469 int err;
41470
41471 file = do_filp_open(AT_FDCWD, name,
41472- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0,
41473+ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0,
41474 MAY_EXEC | MAY_OPEN);
41475 if (IS_ERR(file))
41476 goto out;
41477@@ -744,7 +773,7 @@ int kernel_read(struct file *file, loff_
41478 old_fs = get_fs();
41479 set_fs(get_ds());
41480 /* The cast to a user pointer is valid due to the set_fs() */
41481- result = vfs_read(file, (void __user *)addr, count, &pos);
41482+ result = vfs_read(file, (__force void __user *)addr, count, &pos);
41483 set_fs(old_fs);
41484 return result;
41485 }
41486@@ -1152,7 +1181,7 @@ int check_unsafe_exec(struct linux_binpr
41487 }
41488 rcu_read_unlock();
41489
41490- if (p->fs->users > n_fs) {
41491+ if (atomic_read(&p->fs->users) > n_fs) {
41492 bprm->unsafe |= LSM_UNSAFE_SHARE;
41493 } else {
41494 res = -EAGAIN;
41495@@ -1347,6 +1376,11 @@ int do_execve(char * filename,
41496 char __user *__user *envp,
41497 struct pt_regs * regs)
41498 {
41499+#ifdef CONFIG_GRKERNSEC
41500+ struct file *old_exec_file;
41501+ struct acl_subject_label *old_acl;
41502+ struct rlimit old_rlim[RLIM_NLIMITS];
41503+#endif
41504 struct linux_binprm *bprm;
41505 struct file *file;
41506 struct files_struct *displaced;
41507@@ -1383,6 +1417,23 @@ int do_execve(char * filename,
41508 bprm->filename = filename;
41509 bprm->interp = filename;
41510
41511+ if (gr_process_user_ban()) {
41512+ retval = -EPERM;
41513+ goto out_file;
41514+ }
41515+
41516+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->cred->user->processes), 1);
41517+
41518+ if (gr_handle_nproc()) {
41519+ retval = -EAGAIN;
41520+ goto out_file;
41521+ }
41522+
41523+ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
41524+ retval = -EACCES;
41525+ goto out_file;
41526+ }
41527+
41528 retval = bprm_mm_init(bprm);
41529 if (retval)
41530 goto out_file;
41531@@ -1412,10 +1463,41 @@ int do_execve(char * filename,
41532 if (retval < 0)
41533 goto out;
41534
41535+ if (!gr_tpe_allow(file)) {
41536+ retval = -EACCES;
41537+ goto out;
41538+ }
41539+
41540+ if (gr_check_crash_exec(file)) {
41541+ retval = -EACCES;
41542+ goto out;
41543+ }
41544+
41545+ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
41546+
41547+ gr_handle_exec_args(bprm, (const char __user *const __user *)argv);
41548+
41549+#ifdef CONFIG_GRKERNSEC
41550+ old_acl = current->acl;
41551+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
41552+ old_exec_file = current->exec_file;
41553+ get_file(file);
41554+ current->exec_file = file;
41555+#endif
41556+
41557+ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt,
41558+ bprm->unsafe & LSM_UNSAFE_SHARE);
41559+ if (retval < 0)
41560+ goto out_fail;
41561+
41562 current->flags &= ~PF_KTHREAD;
41563 retval = search_binary_handler(bprm,regs);
41564 if (retval < 0)
41565- goto out;
41566+ goto out_fail;
41567+#ifdef CONFIG_GRKERNSEC
41568+ if (old_exec_file)
41569+ fput(old_exec_file);
41570+#endif
41571
41572 /* execve succeeded */
41573 current->fs->in_exec = 0;
41574@@ -1426,6 +1508,14 @@ int do_execve(char * filename,
41575 put_files_struct(displaced);
41576 return retval;
41577
41578+out_fail:
41579+#ifdef CONFIG_GRKERNSEC
41580+ current->acl = old_acl;
41581+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
41582+ fput(current->exec_file);
41583+ current->exec_file = old_exec_file;
41584+#endif
41585+
41586 out:
41587 if (bprm->mm) {
41588 acct_arg_size(bprm, 0);
41589@@ -1591,6 +1681,220 @@ out:
41590 return ispipe;
41591 }
41592
41593+int pax_check_flags(unsigned long *flags)
41594+{
41595+ int retval = 0;
41596+
41597+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
41598+ if (*flags & MF_PAX_SEGMEXEC)
41599+ {
41600+ *flags &= ~MF_PAX_SEGMEXEC;
41601+ retval = -EINVAL;
41602+ }
41603+#endif
41604+
41605+ if ((*flags & MF_PAX_PAGEEXEC)
41606+
41607+#ifdef CONFIG_PAX_PAGEEXEC
41608+ && (*flags & MF_PAX_SEGMEXEC)
41609+#endif
41610+
41611+ )
41612+ {
41613+ *flags &= ~MF_PAX_PAGEEXEC;
41614+ retval = -EINVAL;
41615+ }
41616+
41617+ if ((*flags & MF_PAX_MPROTECT)
41618+
41619+#ifdef CONFIG_PAX_MPROTECT
41620+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41621+#endif
41622+
41623+ )
41624+ {
41625+ *flags &= ~MF_PAX_MPROTECT;
41626+ retval = -EINVAL;
41627+ }
41628+
41629+ if ((*flags & MF_PAX_EMUTRAMP)
41630+
41631+#ifdef CONFIG_PAX_EMUTRAMP
41632+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
41633+#endif
41634+
41635+ )
41636+ {
41637+ *flags &= ~MF_PAX_EMUTRAMP;
41638+ retval = -EINVAL;
41639+ }
41640+
41641+ return retval;
41642+}
41643+
41644+EXPORT_SYMBOL(pax_check_flags);
41645+
41646+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
41647+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
41648+{
41649+ struct task_struct *tsk = current;
41650+ struct mm_struct *mm = current->mm;
41651+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
41652+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
41653+ char *path_exec = NULL;
41654+ char *path_fault = NULL;
41655+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
41656+
41657+ if (buffer_exec && buffer_fault) {
41658+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
41659+
41660+ down_read(&mm->mmap_sem);
41661+ vma = mm->mmap;
41662+ while (vma && (!vma_exec || !vma_fault)) {
41663+ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
41664+ vma_exec = vma;
41665+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
41666+ vma_fault = vma;
41667+ vma = vma->vm_next;
41668+ }
41669+ if (vma_exec) {
41670+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
41671+ if (IS_ERR(path_exec))
41672+ path_exec = "<path too long>";
41673+ else {
41674+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
41675+ if (path_exec) {
41676+ *path_exec = 0;
41677+ path_exec = buffer_exec;
41678+ } else
41679+ path_exec = "<path too long>";
41680+ }
41681+ }
41682+ if (vma_fault) {
41683+ start = vma_fault->vm_start;
41684+ end = vma_fault->vm_end;
41685+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
41686+ if (vma_fault->vm_file) {
41687+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
41688+ if (IS_ERR(path_fault))
41689+ path_fault = "<path too long>";
41690+ else {
41691+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
41692+ if (path_fault) {
41693+ *path_fault = 0;
41694+ path_fault = buffer_fault;
41695+ } else
41696+ path_fault = "<path too long>";
41697+ }
41698+ } else
41699+ path_fault = "<anonymous mapping>";
41700+ }
41701+ up_read(&mm->mmap_sem);
41702+ }
41703+ if (tsk->signal->curr_ip)
41704+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
41705+ else
41706+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
41707+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
41708+ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
41709+ task_uid(tsk), task_euid(tsk), pc, sp);
41710+ free_page((unsigned long)buffer_exec);
41711+ free_page((unsigned long)buffer_fault);
41712+ pax_report_insns(pc, sp);
41713+ do_coredump(SIGKILL, SIGKILL, regs);
41714+}
41715+#endif
41716+
41717+#ifdef CONFIG_PAX_REFCOUNT
41718+void pax_report_refcount_overflow(struct pt_regs *regs)
41719+{
41720+ if (current->signal->curr_ip)
41721+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41722+ &current->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid());
41723+ else
41724+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
41725+ current->comm, task_pid_nr(current), current_uid(), current_euid());
41726+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
41727+ show_regs(regs);
41728+ force_sig_specific(SIGKILL, current);
41729+}
41730+#endif
41731+
41732+#ifdef CONFIG_PAX_USERCOPY
41733+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
41734+int object_is_on_stack(const void *obj, unsigned long len)
41735+{
41736+ const void * const stack = task_stack_page(current);
41737+ const void * const stackend = stack + THREAD_SIZE;
41738+
41739+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41740+ const void *frame = NULL;
41741+ const void *oldframe;
41742+#endif
41743+
41744+ if (obj + len < obj)
41745+ return -1;
41746+
41747+ if (obj + len <= stack || stackend <= obj)
41748+ return 0;
41749+
41750+ if (obj < stack || stackend < obj + len)
41751+ return -1;
41752+
41753+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
41754+ oldframe = __builtin_frame_address(1);
41755+ if (oldframe)
41756+ frame = __builtin_frame_address(2);
41757+ /*
41758+ low ----------------------------------------------> high
41759+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
41760+ ^----------------^
41761+ allow copies only within here
41762+ */
41763+ while (stack <= frame && frame < stackend) {
41764+ /* if obj + len extends past the last frame, this
41765+ check won't pass and the next frame will be 0,
41766+ causing us to bail out and correctly report
41767+ the copy as invalid
41768+ */
41769+ if (obj + len <= frame)
41770+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
41771+ oldframe = frame;
41772+ frame = *(const void * const *)frame;
41773+ }
41774+ return -1;
41775+#else
41776+ return 1;
41777+#endif
41778+}
41779+
41780+
41781+NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type)
41782+{
41783+ if (current->signal->curr_ip)
41784+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41785+ &current->signal->curr_ip, to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41786+ else
41787+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
41788+ to ? "leak" : "overwrite", to ? "from" : "to", ptr, type ? : "unknown", len);
41789+
41790+ dump_stack();
41791+ gr_handle_kernel_exploit();
41792+ do_group_exit(SIGKILL);
41793+}
41794+#endif
41795+
41796+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
41797+void pax_track_stack(void)
41798+{
41799+ unsigned long sp = (unsigned long)&sp;
41800+ if (sp < current_thread_info()->lowest_stack &&
41801+ sp > (unsigned long)task_stack_page(current))
41802+ current_thread_info()->lowest_stack = sp;
41803+}
41804+EXPORT_SYMBOL(pax_track_stack);
41805+#endif
41806+
41807 static int zap_process(struct task_struct *start)
41808 {
41809 struct task_struct *t;
41810@@ -1793,17 +2097,17 @@ static void wait_for_dump_helpers(struct
41811 pipe = file->f_path.dentry->d_inode->i_pipe;
41812
41813 pipe_lock(pipe);
41814- pipe->readers++;
41815- pipe->writers--;
41816+ atomic_inc(&pipe->readers);
41817+ atomic_dec(&pipe->writers);
41818
41819- while ((pipe->readers > 1) && (!signal_pending(current))) {
41820+ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) {
41821 wake_up_interruptible_sync(&pipe->wait);
41822 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
41823 pipe_wait(pipe);
41824 }
41825
41826- pipe->readers--;
41827- pipe->writers++;
41828+ atomic_dec(&pipe->readers);
41829+ atomic_inc(&pipe->writers);
41830 pipe_unlock(pipe);
41831
41832 }
41833@@ -1826,10 +2130,13 @@ void do_coredump(long signr, int exit_co
41834 char **helper_argv = NULL;
41835 int helper_argc = 0;
41836 int dump_count = 0;
41837- static atomic_t core_dump_count = ATOMIC_INIT(0);
41838+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
41839
41840 audit_core_dumps(signr);
41841
41842+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
41843+ gr_handle_brute_attach(current, mm->flags);
41844+
41845 binfmt = mm->binfmt;
41846 if (!binfmt || !binfmt->core_dump)
41847 goto fail;
41848@@ -1874,6 +2181,8 @@ void do_coredump(long signr, int exit_co
41849 */
41850 clear_thread_flag(TIF_SIGPENDING);
41851
41852+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
41853+
41854 /*
41855 * lock_kernel() because format_corename() is controlled by sysctl, which
41856 * uses lock_kernel()
41857@@ -1908,7 +2217,7 @@ void do_coredump(long signr, int exit_co
41858 goto fail_unlock;
41859 }
41860
41861- dump_count = atomic_inc_return(&core_dump_count);
41862+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
41863 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
41864 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
41865 task_tgid_vnr(current), current->comm);
41866@@ -1972,7 +2281,7 @@ close_fail:
41867 filp_close(file, NULL);
41868 fail_dropcount:
41869 if (dump_count)
41870- atomic_dec(&core_dump_count);
41871+ atomic_dec_unchecked(&core_dump_count);
41872 fail_unlock:
41873 if (helper_argv)
41874 argv_free(helper_argv);
41875diff -urNp linux-2.6.32.43/fs/ext2/balloc.c linux-2.6.32.43/fs/ext2/balloc.c
41876--- linux-2.6.32.43/fs/ext2/balloc.c 2011-03-27 14:31:47.000000000 -0400
41877+++ linux-2.6.32.43/fs/ext2/balloc.c 2011-04-17 15:56:46.000000000 -0400
41878@@ -1192,7 +1192,7 @@ static int ext2_has_free_blocks(struct e
41879
41880 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41881 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41882- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41883+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41884 sbi->s_resuid != current_fsuid() &&
41885 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41886 return 0;
41887diff -urNp linux-2.6.32.43/fs/ext3/balloc.c linux-2.6.32.43/fs/ext3/balloc.c
41888--- linux-2.6.32.43/fs/ext3/balloc.c 2011-03-27 14:31:47.000000000 -0400
41889+++ linux-2.6.32.43/fs/ext3/balloc.c 2011-04-17 15:56:46.000000000 -0400
41890@@ -1421,7 +1421,7 @@ static int ext3_has_free_blocks(struct e
41891
41892 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
41893 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
41894- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
41895+ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) &&
41896 sbi->s_resuid != current_fsuid() &&
41897 (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) {
41898 return 0;
41899diff -urNp linux-2.6.32.43/fs/ext4/balloc.c linux-2.6.32.43/fs/ext4/balloc.c
41900--- linux-2.6.32.43/fs/ext4/balloc.c 2011-03-27 14:31:47.000000000 -0400
41901+++ linux-2.6.32.43/fs/ext4/balloc.c 2011-04-17 15:56:46.000000000 -0400
41902@@ -570,7 +570,7 @@ int ext4_has_free_blocks(struct ext4_sb_
41903 /* Hm, nope. Are (enough) root reserved blocks available? */
41904 if (sbi->s_resuid == current_fsuid() ||
41905 ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) ||
41906- capable(CAP_SYS_RESOURCE)) {
41907+ capable_nolog(CAP_SYS_RESOURCE)) {
41908 if (free_blocks >= (nblocks + dirty_blocks))
41909 return 1;
41910 }
41911diff -urNp linux-2.6.32.43/fs/ext4/ext4.h linux-2.6.32.43/fs/ext4/ext4.h
41912--- linux-2.6.32.43/fs/ext4/ext4.h 2011-03-27 14:31:47.000000000 -0400
41913+++ linux-2.6.32.43/fs/ext4/ext4.h 2011-04-17 15:56:46.000000000 -0400
41914@@ -1078,19 +1078,19 @@ struct ext4_sb_info {
41915
41916 /* stats for buddy allocator */
41917 spinlock_t s_mb_pa_lock;
41918- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
41919- atomic_t s_bal_success; /* we found long enough chunks */
41920- atomic_t s_bal_allocated; /* in blocks */
41921- atomic_t s_bal_ex_scanned; /* total extents scanned */
41922- atomic_t s_bal_goals; /* goal hits */
41923- atomic_t s_bal_breaks; /* too long searches */
41924- atomic_t s_bal_2orders; /* 2^order hits */
41925+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
41926+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
41927+ atomic_unchecked_t s_bal_allocated; /* in blocks */
41928+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
41929+ atomic_unchecked_t s_bal_goals; /* goal hits */
41930+ atomic_unchecked_t s_bal_breaks; /* too long searches */
41931+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
41932 spinlock_t s_bal_lock;
41933 unsigned long s_mb_buddies_generated;
41934 unsigned long long s_mb_generation_time;
41935- atomic_t s_mb_lost_chunks;
41936- atomic_t s_mb_preallocated;
41937- atomic_t s_mb_discarded;
41938+ atomic_unchecked_t s_mb_lost_chunks;
41939+ atomic_unchecked_t s_mb_preallocated;
41940+ atomic_unchecked_t s_mb_discarded;
41941 atomic_t s_lock_busy;
41942
41943 /* locality groups */
41944diff -urNp linux-2.6.32.43/fs/ext4/mballoc.c linux-2.6.32.43/fs/ext4/mballoc.c
41945--- linux-2.6.32.43/fs/ext4/mballoc.c 2011-06-25 12:55:34.000000000 -0400
41946+++ linux-2.6.32.43/fs/ext4/mballoc.c 2011-06-25 12:56:37.000000000 -0400
41947@@ -1755,7 +1755,7 @@ void ext4_mb_simple_scan_group(struct ex
41948 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
41949
41950 if (EXT4_SB(sb)->s_mb_stats)
41951- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
41952+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
41953
41954 break;
41955 }
41956@@ -2131,7 +2131,7 @@ repeat:
41957 ac->ac_status = AC_STATUS_CONTINUE;
41958 ac->ac_flags |= EXT4_MB_HINT_FIRST;
41959 cr = 3;
41960- atomic_inc(&sbi->s_mb_lost_chunks);
41961+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
41962 goto repeat;
41963 }
41964 }
41965@@ -2174,6 +2174,8 @@ static int ext4_mb_seq_groups_show(struc
41966 ext4_grpblk_t counters[16];
41967 } sg;
41968
41969+ pax_track_stack();
41970+
41971 group--;
41972 if (group == 0)
41973 seq_printf(seq, "#%-5s: %-5s %-5s %-5s "
41974@@ -2534,25 +2536,25 @@ int ext4_mb_release(struct super_block *
41975 if (sbi->s_mb_stats) {
41976 printk(KERN_INFO
41977 "EXT4-fs: mballoc: %u blocks %u reqs (%u success)\n",
41978- atomic_read(&sbi->s_bal_allocated),
41979- atomic_read(&sbi->s_bal_reqs),
41980- atomic_read(&sbi->s_bal_success));
41981+ atomic_read_unchecked(&sbi->s_bal_allocated),
41982+ atomic_read_unchecked(&sbi->s_bal_reqs),
41983+ atomic_read_unchecked(&sbi->s_bal_success));
41984 printk(KERN_INFO
41985 "EXT4-fs: mballoc: %u extents scanned, %u goal hits, "
41986 "%u 2^N hits, %u breaks, %u lost\n",
41987- atomic_read(&sbi->s_bal_ex_scanned),
41988- atomic_read(&sbi->s_bal_goals),
41989- atomic_read(&sbi->s_bal_2orders),
41990- atomic_read(&sbi->s_bal_breaks),
41991- atomic_read(&sbi->s_mb_lost_chunks));
41992+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
41993+ atomic_read_unchecked(&sbi->s_bal_goals),
41994+ atomic_read_unchecked(&sbi->s_bal_2orders),
41995+ atomic_read_unchecked(&sbi->s_bal_breaks),
41996+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
41997 printk(KERN_INFO
41998 "EXT4-fs: mballoc: %lu generated and it took %Lu\n",
41999 sbi->s_mb_buddies_generated++,
42000 sbi->s_mb_generation_time);
42001 printk(KERN_INFO
42002 "EXT4-fs: mballoc: %u preallocated, %u discarded\n",
42003- atomic_read(&sbi->s_mb_preallocated),
42004- atomic_read(&sbi->s_mb_discarded));
42005+ atomic_read_unchecked(&sbi->s_mb_preallocated),
42006+ atomic_read_unchecked(&sbi->s_mb_discarded));
42007 }
42008
42009 free_percpu(sbi->s_locality_groups);
42010@@ -3034,16 +3036,16 @@ static void ext4_mb_collect_stats(struct
42011 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
42012
42013 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
42014- atomic_inc(&sbi->s_bal_reqs);
42015- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42016+ atomic_inc_unchecked(&sbi->s_bal_reqs);
42017+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
42018 if (ac->ac_o_ex.fe_len >= ac->ac_g_ex.fe_len)
42019- atomic_inc(&sbi->s_bal_success);
42020- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
42021+ atomic_inc_unchecked(&sbi->s_bal_success);
42022+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
42023 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
42024 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
42025- atomic_inc(&sbi->s_bal_goals);
42026+ atomic_inc_unchecked(&sbi->s_bal_goals);
42027 if (ac->ac_found > sbi->s_mb_max_to_scan)
42028- atomic_inc(&sbi->s_bal_breaks);
42029+ atomic_inc_unchecked(&sbi->s_bal_breaks);
42030 }
42031
42032 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
42033@@ -3443,7 +3445,7 @@ ext4_mb_new_inode_pa(struct ext4_allocat
42034 trace_ext4_mb_new_inode_pa(ac, pa);
42035
42036 ext4_mb_use_inode_pa(ac, pa);
42037- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42038+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42039
42040 ei = EXT4_I(ac->ac_inode);
42041 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42042@@ -3503,7 +3505,7 @@ ext4_mb_new_group_pa(struct ext4_allocat
42043 trace_ext4_mb_new_group_pa(ac, pa);
42044
42045 ext4_mb_use_group_pa(ac, pa);
42046- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42047+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
42048
42049 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
42050 lg = ac->ac_lg;
42051@@ -3607,7 +3609,7 @@ ext4_mb_release_inode_pa(struct ext4_bud
42052 * from the bitmap and continue.
42053 */
42054 }
42055- atomic_add(free, &sbi->s_mb_discarded);
42056+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
42057
42058 return err;
42059 }
42060@@ -3626,7 +3628,7 @@ ext4_mb_release_group_pa(struct ext4_bud
42061 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
42062 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
42063 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
42064- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42065+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
42066
42067 if (ac) {
42068 ac->ac_sb = sb;
42069diff -urNp linux-2.6.32.43/fs/ext4/super.c linux-2.6.32.43/fs/ext4/super.c
42070--- linux-2.6.32.43/fs/ext4/super.c 2011-03-27 14:31:47.000000000 -0400
42071+++ linux-2.6.32.43/fs/ext4/super.c 2011-04-17 15:56:46.000000000 -0400
42072@@ -2287,7 +2287,7 @@ static void ext4_sb_release(struct kobje
42073 }
42074
42075
42076-static struct sysfs_ops ext4_attr_ops = {
42077+static const struct sysfs_ops ext4_attr_ops = {
42078 .show = ext4_attr_show,
42079 .store = ext4_attr_store,
42080 };
42081diff -urNp linux-2.6.32.43/fs/fcntl.c linux-2.6.32.43/fs/fcntl.c
42082--- linux-2.6.32.43/fs/fcntl.c 2011-03-27 14:31:47.000000000 -0400
42083+++ linux-2.6.32.43/fs/fcntl.c 2011-04-17 15:56:46.000000000 -0400
42084@@ -223,6 +223,11 @@ int __f_setown(struct file *filp, struct
42085 if (err)
42086 return err;
42087
42088+ if (gr_handle_chroot_fowner(pid, type))
42089+ return -ENOENT;
42090+ if (gr_check_protected_task_fowner(pid, type))
42091+ return -EACCES;
42092+
42093 f_modown(filp, pid, type, force);
42094 return 0;
42095 }
42096@@ -344,6 +349,7 @@ static long do_fcntl(int fd, unsigned in
42097 switch (cmd) {
42098 case F_DUPFD:
42099 case F_DUPFD_CLOEXEC:
42100+ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0);
42101 if (arg >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
42102 break;
42103 err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0);
42104diff -urNp linux-2.6.32.43/fs/fifo.c linux-2.6.32.43/fs/fifo.c
42105--- linux-2.6.32.43/fs/fifo.c 2011-03-27 14:31:47.000000000 -0400
42106+++ linux-2.6.32.43/fs/fifo.c 2011-04-17 15:56:46.000000000 -0400
42107@@ -59,10 +59,10 @@ static int fifo_open(struct inode *inode
42108 */
42109 filp->f_op = &read_pipefifo_fops;
42110 pipe->r_counter++;
42111- if (pipe->readers++ == 0)
42112+ if (atomic_inc_return(&pipe->readers) == 1)
42113 wake_up_partner(inode);
42114
42115- if (!pipe->writers) {
42116+ if (!atomic_read(&pipe->writers)) {
42117 if ((filp->f_flags & O_NONBLOCK)) {
42118 /* suppress POLLHUP until we have
42119 * seen a writer */
42120@@ -83,15 +83,15 @@ static int fifo_open(struct inode *inode
42121 * errno=ENXIO when there is no process reading the FIFO.
42122 */
42123 ret = -ENXIO;
42124- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers)
42125+ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
42126 goto err;
42127
42128 filp->f_op = &write_pipefifo_fops;
42129 pipe->w_counter++;
42130- if (!pipe->writers++)
42131+ if (atomic_inc_return(&pipe->writers) == 1)
42132 wake_up_partner(inode);
42133
42134- if (!pipe->readers) {
42135+ if (!atomic_read(&pipe->readers)) {
42136 wait_for_partner(inode, &pipe->r_counter);
42137 if (signal_pending(current))
42138 goto err_wr;
42139@@ -107,11 +107,11 @@ static int fifo_open(struct inode *inode
42140 */
42141 filp->f_op = &rdwr_pipefifo_fops;
42142
42143- pipe->readers++;
42144- pipe->writers++;
42145+ atomic_inc(&pipe->readers);
42146+ atomic_inc(&pipe->writers);
42147 pipe->r_counter++;
42148 pipe->w_counter++;
42149- if (pipe->readers == 1 || pipe->writers == 1)
42150+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
42151 wake_up_partner(inode);
42152 break;
42153
42154@@ -125,19 +125,19 @@ static int fifo_open(struct inode *inode
42155 return 0;
42156
42157 err_rd:
42158- if (!--pipe->readers)
42159+ if (atomic_dec_and_test(&pipe->readers))
42160 wake_up_interruptible(&pipe->wait);
42161 ret = -ERESTARTSYS;
42162 goto err;
42163
42164 err_wr:
42165- if (!--pipe->writers)
42166+ if (atomic_dec_and_test(&pipe->writers))
42167 wake_up_interruptible(&pipe->wait);
42168 ret = -ERESTARTSYS;
42169 goto err;
42170
42171 err:
42172- if (!pipe->readers && !pipe->writers)
42173+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers))
42174 free_pipe_info(inode);
42175
42176 err_nocleanup:
42177diff -urNp linux-2.6.32.43/fs/file.c linux-2.6.32.43/fs/file.c
42178--- linux-2.6.32.43/fs/file.c 2011-03-27 14:31:47.000000000 -0400
42179+++ linux-2.6.32.43/fs/file.c 2011-04-17 15:56:46.000000000 -0400
42180@@ -14,6 +14,7 @@
42181 #include <linux/slab.h>
42182 #include <linux/vmalloc.h>
42183 #include <linux/file.h>
42184+#include <linux/security.h>
42185 #include <linux/fdtable.h>
42186 #include <linux/bitops.h>
42187 #include <linux/interrupt.h>
42188@@ -257,6 +258,8 @@ int expand_files(struct files_struct *fi
42189 * N.B. For clone tasks sharing a files structure, this test
42190 * will limit the total number of files that can be opened.
42191 */
42192+
42193+ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0);
42194 if (nr >= current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
42195 return -EMFILE;
42196
42197diff -urNp linux-2.6.32.43/fs/filesystems.c linux-2.6.32.43/fs/filesystems.c
42198--- linux-2.6.32.43/fs/filesystems.c 2011-03-27 14:31:47.000000000 -0400
42199+++ linux-2.6.32.43/fs/filesystems.c 2011-04-17 15:56:46.000000000 -0400
42200@@ -272,7 +272,12 @@ struct file_system_type *get_fs_type(con
42201 int len = dot ? dot - name : strlen(name);
42202
42203 fs = __get_fs_type(name, len);
42204+
42205+#ifdef CONFIG_GRKERNSEC_MODHARDEN
42206+ if (!fs && (___request_module(true, "grsec_modharden_fs", "%.*s", len, name) == 0))
42207+#else
42208 if (!fs && (request_module("%.*s", len, name) == 0))
42209+#endif
42210 fs = __get_fs_type(name, len);
42211
42212 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
42213diff -urNp linux-2.6.32.43/fs/fscache/cookie.c linux-2.6.32.43/fs/fscache/cookie.c
42214--- linux-2.6.32.43/fs/fscache/cookie.c 2011-03-27 14:31:47.000000000 -0400
42215+++ linux-2.6.32.43/fs/fscache/cookie.c 2011-05-04 17:56:28.000000000 -0400
42216@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire
42217 parent ? (char *) parent->def->name : "<no-parent>",
42218 def->name, netfs_data);
42219
42220- fscache_stat(&fscache_n_acquires);
42221+ fscache_stat_unchecked(&fscache_n_acquires);
42222
42223 /* if there's no parent cookie, then we don't create one here either */
42224 if (!parent) {
42225- fscache_stat(&fscache_n_acquires_null);
42226+ fscache_stat_unchecked(&fscache_n_acquires_null);
42227 _leave(" [no parent]");
42228 return NULL;
42229 }
42230@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire
42231 /* allocate and initialise a cookie */
42232 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
42233 if (!cookie) {
42234- fscache_stat(&fscache_n_acquires_oom);
42235+ fscache_stat_unchecked(&fscache_n_acquires_oom);
42236 _leave(" [ENOMEM]");
42237 return NULL;
42238 }
42239@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire
42240
42241 switch (cookie->def->type) {
42242 case FSCACHE_COOKIE_TYPE_INDEX:
42243- fscache_stat(&fscache_n_cookie_index);
42244+ fscache_stat_unchecked(&fscache_n_cookie_index);
42245 break;
42246 case FSCACHE_COOKIE_TYPE_DATAFILE:
42247- fscache_stat(&fscache_n_cookie_data);
42248+ fscache_stat_unchecked(&fscache_n_cookie_data);
42249 break;
42250 default:
42251- fscache_stat(&fscache_n_cookie_special);
42252+ fscache_stat_unchecked(&fscache_n_cookie_special);
42253 break;
42254 }
42255
42256@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire
42257 if (fscache_acquire_non_index_cookie(cookie) < 0) {
42258 atomic_dec(&parent->n_children);
42259 __fscache_cookie_put(cookie);
42260- fscache_stat(&fscache_n_acquires_nobufs);
42261+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
42262 _leave(" = NULL");
42263 return NULL;
42264 }
42265 }
42266
42267- fscache_stat(&fscache_n_acquires_ok);
42268+ fscache_stat_unchecked(&fscache_n_acquires_ok);
42269 _leave(" = %p", cookie);
42270 return cookie;
42271 }
42272@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_coo
42273 cache = fscache_select_cache_for_object(cookie->parent);
42274 if (!cache) {
42275 up_read(&fscache_addremove_sem);
42276- fscache_stat(&fscache_n_acquires_no_cache);
42277+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
42278 _leave(" = -ENOMEDIUM [no cache]");
42279 return -ENOMEDIUM;
42280 }
42281@@ -256,12 +256,12 @@ static int fscache_alloc_object(struct f
42282 object = cache->ops->alloc_object(cache, cookie);
42283 fscache_stat_d(&fscache_n_cop_alloc_object);
42284 if (IS_ERR(object)) {
42285- fscache_stat(&fscache_n_object_no_alloc);
42286+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
42287 ret = PTR_ERR(object);
42288 goto error;
42289 }
42290
42291- fscache_stat(&fscache_n_object_alloc);
42292+ fscache_stat_unchecked(&fscache_n_object_alloc);
42293
42294 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
42295
42296@@ -377,10 +377,10 @@ void __fscache_update_cookie(struct fsca
42297 struct fscache_object *object;
42298 struct hlist_node *_p;
42299
42300- fscache_stat(&fscache_n_updates);
42301+ fscache_stat_unchecked(&fscache_n_updates);
42302
42303 if (!cookie) {
42304- fscache_stat(&fscache_n_updates_null);
42305+ fscache_stat_unchecked(&fscache_n_updates_null);
42306 _leave(" [no cookie]");
42307 return;
42308 }
42309@@ -414,12 +414,12 @@ void __fscache_relinquish_cookie(struct
42310 struct fscache_object *object;
42311 unsigned long event;
42312
42313- fscache_stat(&fscache_n_relinquishes);
42314+ fscache_stat_unchecked(&fscache_n_relinquishes);
42315 if (retire)
42316- fscache_stat(&fscache_n_relinquishes_retire);
42317+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
42318
42319 if (!cookie) {
42320- fscache_stat(&fscache_n_relinquishes_null);
42321+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
42322 _leave(" [no cookie]");
42323 return;
42324 }
42325@@ -435,7 +435,7 @@ void __fscache_relinquish_cookie(struct
42326
42327 /* wait for the cookie to finish being instantiated (or to fail) */
42328 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
42329- fscache_stat(&fscache_n_relinquishes_waitcrt);
42330+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
42331 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
42332 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
42333 }
42334diff -urNp linux-2.6.32.43/fs/fscache/internal.h linux-2.6.32.43/fs/fscache/internal.h
42335--- linux-2.6.32.43/fs/fscache/internal.h 2011-03-27 14:31:47.000000000 -0400
42336+++ linux-2.6.32.43/fs/fscache/internal.h 2011-05-04 17:56:28.000000000 -0400
42337@@ -136,94 +136,94 @@ extern void fscache_proc_cleanup(void);
42338 extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
42339 extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
42340
42341-extern atomic_t fscache_n_op_pend;
42342-extern atomic_t fscache_n_op_run;
42343-extern atomic_t fscache_n_op_enqueue;
42344-extern atomic_t fscache_n_op_deferred_release;
42345-extern atomic_t fscache_n_op_release;
42346-extern atomic_t fscache_n_op_gc;
42347-extern atomic_t fscache_n_op_cancelled;
42348-extern atomic_t fscache_n_op_rejected;
42349-
42350-extern atomic_t fscache_n_attr_changed;
42351-extern atomic_t fscache_n_attr_changed_ok;
42352-extern atomic_t fscache_n_attr_changed_nobufs;
42353-extern atomic_t fscache_n_attr_changed_nomem;
42354-extern atomic_t fscache_n_attr_changed_calls;
42355-
42356-extern atomic_t fscache_n_allocs;
42357-extern atomic_t fscache_n_allocs_ok;
42358-extern atomic_t fscache_n_allocs_wait;
42359-extern atomic_t fscache_n_allocs_nobufs;
42360-extern atomic_t fscache_n_allocs_intr;
42361-extern atomic_t fscache_n_allocs_object_dead;
42362-extern atomic_t fscache_n_alloc_ops;
42363-extern atomic_t fscache_n_alloc_op_waits;
42364-
42365-extern atomic_t fscache_n_retrievals;
42366-extern atomic_t fscache_n_retrievals_ok;
42367-extern atomic_t fscache_n_retrievals_wait;
42368-extern atomic_t fscache_n_retrievals_nodata;
42369-extern atomic_t fscache_n_retrievals_nobufs;
42370-extern atomic_t fscache_n_retrievals_intr;
42371-extern atomic_t fscache_n_retrievals_nomem;
42372-extern atomic_t fscache_n_retrievals_object_dead;
42373-extern atomic_t fscache_n_retrieval_ops;
42374-extern atomic_t fscache_n_retrieval_op_waits;
42375-
42376-extern atomic_t fscache_n_stores;
42377-extern atomic_t fscache_n_stores_ok;
42378-extern atomic_t fscache_n_stores_again;
42379-extern atomic_t fscache_n_stores_nobufs;
42380-extern atomic_t fscache_n_stores_oom;
42381-extern atomic_t fscache_n_store_ops;
42382-extern atomic_t fscache_n_store_calls;
42383-extern atomic_t fscache_n_store_pages;
42384-extern atomic_t fscache_n_store_radix_deletes;
42385-extern atomic_t fscache_n_store_pages_over_limit;
42386-
42387-extern atomic_t fscache_n_store_vmscan_not_storing;
42388-extern atomic_t fscache_n_store_vmscan_gone;
42389-extern atomic_t fscache_n_store_vmscan_busy;
42390-extern atomic_t fscache_n_store_vmscan_cancelled;
42391-
42392-extern atomic_t fscache_n_marks;
42393-extern atomic_t fscache_n_uncaches;
42394-
42395-extern atomic_t fscache_n_acquires;
42396-extern atomic_t fscache_n_acquires_null;
42397-extern atomic_t fscache_n_acquires_no_cache;
42398-extern atomic_t fscache_n_acquires_ok;
42399-extern atomic_t fscache_n_acquires_nobufs;
42400-extern atomic_t fscache_n_acquires_oom;
42401-
42402-extern atomic_t fscache_n_updates;
42403-extern atomic_t fscache_n_updates_null;
42404-extern atomic_t fscache_n_updates_run;
42405-
42406-extern atomic_t fscache_n_relinquishes;
42407-extern atomic_t fscache_n_relinquishes_null;
42408-extern atomic_t fscache_n_relinquishes_waitcrt;
42409-extern atomic_t fscache_n_relinquishes_retire;
42410-
42411-extern atomic_t fscache_n_cookie_index;
42412-extern atomic_t fscache_n_cookie_data;
42413-extern atomic_t fscache_n_cookie_special;
42414-
42415-extern atomic_t fscache_n_object_alloc;
42416-extern atomic_t fscache_n_object_no_alloc;
42417-extern atomic_t fscache_n_object_lookups;
42418-extern atomic_t fscache_n_object_lookups_negative;
42419-extern atomic_t fscache_n_object_lookups_positive;
42420-extern atomic_t fscache_n_object_lookups_timed_out;
42421-extern atomic_t fscache_n_object_created;
42422-extern atomic_t fscache_n_object_avail;
42423-extern atomic_t fscache_n_object_dead;
42424-
42425-extern atomic_t fscache_n_checkaux_none;
42426-extern atomic_t fscache_n_checkaux_okay;
42427-extern atomic_t fscache_n_checkaux_update;
42428-extern atomic_t fscache_n_checkaux_obsolete;
42429+extern atomic_unchecked_t fscache_n_op_pend;
42430+extern atomic_unchecked_t fscache_n_op_run;
42431+extern atomic_unchecked_t fscache_n_op_enqueue;
42432+extern atomic_unchecked_t fscache_n_op_deferred_release;
42433+extern atomic_unchecked_t fscache_n_op_release;
42434+extern atomic_unchecked_t fscache_n_op_gc;
42435+extern atomic_unchecked_t fscache_n_op_cancelled;
42436+extern atomic_unchecked_t fscache_n_op_rejected;
42437+
42438+extern atomic_unchecked_t fscache_n_attr_changed;
42439+extern atomic_unchecked_t fscache_n_attr_changed_ok;
42440+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
42441+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
42442+extern atomic_unchecked_t fscache_n_attr_changed_calls;
42443+
42444+extern atomic_unchecked_t fscache_n_allocs;
42445+extern atomic_unchecked_t fscache_n_allocs_ok;
42446+extern atomic_unchecked_t fscache_n_allocs_wait;
42447+extern atomic_unchecked_t fscache_n_allocs_nobufs;
42448+extern atomic_unchecked_t fscache_n_allocs_intr;
42449+extern atomic_unchecked_t fscache_n_allocs_object_dead;
42450+extern atomic_unchecked_t fscache_n_alloc_ops;
42451+extern atomic_unchecked_t fscache_n_alloc_op_waits;
42452+
42453+extern atomic_unchecked_t fscache_n_retrievals;
42454+extern atomic_unchecked_t fscache_n_retrievals_ok;
42455+extern atomic_unchecked_t fscache_n_retrievals_wait;
42456+extern atomic_unchecked_t fscache_n_retrievals_nodata;
42457+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
42458+extern atomic_unchecked_t fscache_n_retrievals_intr;
42459+extern atomic_unchecked_t fscache_n_retrievals_nomem;
42460+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
42461+extern atomic_unchecked_t fscache_n_retrieval_ops;
42462+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
42463+
42464+extern atomic_unchecked_t fscache_n_stores;
42465+extern atomic_unchecked_t fscache_n_stores_ok;
42466+extern atomic_unchecked_t fscache_n_stores_again;
42467+extern atomic_unchecked_t fscache_n_stores_nobufs;
42468+extern atomic_unchecked_t fscache_n_stores_oom;
42469+extern atomic_unchecked_t fscache_n_store_ops;
42470+extern atomic_unchecked_t fscache_n_store_calls;
42471+extern atomic_unchecked_t fscache_n_store_pages;
42472+extern atomic_unchecked_t fscache_n_store_radix_deletes;
42473+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
42474+
42475+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
42476+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
42477+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
42478+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
42479+
42480+extern atomic_unchecked_t fscache_n_marks;
42481+extern atomic_unchecked_t fscache_n_uncaches;
42482+
42483+extern atomic_unchecked_t fscache_n_acquires;
42484+extern atomic_unchecked_t fscache_n_acquires_null;
42485+extern atomic_unchecked_t fscache_n_acquires_no_cache;
42486+extern atomic_unchecked_t fscache_n_acquires_ok;
42487+extern atomic_unchecked_t fscache_n_acquires_nobufs;
42488+extern atomic_unchecked_t fscache_n_acquires_oom;
42489+
42490+extern atomic_unchecked_t fscache_n_updates;
42491+extern atomic_unchecked_t fscache_n_updates_null;
42492+extern atomic_unchecked_t fscache_n_updates_run;
42493+
42494+extern atomic_unchecked_t fscache_n_relinquishes;
42495+extern atomic_unchecked_t fscache_n_relinquishes_null;
42496+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
42497+extern atomic_unchecked_t fscache_n_relinquishes_retire;
42498+
42499+extern atomic_unchecked_t fscache_n_cookie_index;
42500+extern atomic_unchecked_t fscache_n_cookie_data;
42501+extern atomic_unchecked_t fscache_n_cookie_special;
42502+
42503+extern atomic_unchecked_t fscache_n_object_alloc;
42504+extern atomic_unchecked_t fscache_n_object_no_alloc;
42505+extern atomic_unchecked_t fscache_n_object_lookups;
42506+extern atomic_unchecked_t fscache_n_object_lookups_negative;
42507+extern atomic_unchecked_t fscache_n_object_lookups_positive;
42508+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
42509+extern atomic_unchecked_t fscache_n_object_created;
42510+extern atomic_unchecked_t fscache_n_object_avail;
42511+extern atomic_unchecked_t fscache_n_object_dead;
42512+
42513+extern atomic_unchecked_t fscache_n_checkaux_none;
42514+extern atomic_unchecked_t fscache_n_checkaux_okay;
42515+extern atomic_unchecked_t fscache_n_checkaux_update;
42516+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
42517
42518 extern atomic_t fscache_n_cop_alloc_object;
42519 extern atomic_t fscache_n_cop_lookup_object;
42520@@ -247,6 +247,11 @@ static inline void fscache_stat(atomic_t
42521 atomic_inc(stat);
42522 }
42523
42524+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
42525+{
42526+ atomic_inc_unchecked(stat);
42527+}
42528+
42529 static inline void fscache_stat_d(atomic_t *stat)
42530 {
42531 atomic_dec(stat);
42532@@ -259,6 +264,7 @@ extern const struct file_operations fsca
42533
42534 #define __fscache_stat(stat) (NULL)
42535 #define fscache_stat(stat) do {} while (0)
42536+#define fscache_stat_unchecked(stat) do {} while (0)
42537 #define fscache_stat_d(stat) do {} while (0)
42538 #endif
42539
42540diff -urNp linux-2.6.32.43/fs/fscache/object.c linux-2.6.32.43/fs/fscache/object.c
42541--- linux-2.6.32.43/fs/fscache/object.c 2011-03-27 14:31:47.000000000 -0400
42542+++ linux-2.6.32.43/fs/fscache/object.c 2011-05-04 17:56:28.000000000 -0400
42543@@ -144,7 +144,7 @@ static void fscache_object_state_machine
42544 /* update the object metadata on disk */
42545 case FSCACHE_OBJECT_UPDATING:
42546 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
42547- fscache_stat(&fscache_n_updates_run);
42548+ fscache_stat_unchecked(&fscache_n_updates_run);
42549 fscache_stat(&fscache_n_cop_update_object);
42550 object->cache->ops->update_object(object);
42551 fscache_stat_d(&fscache_n_cop_update_object);
42552@@ -233,7 +233,7 @@ static void fscache_object_state_machine
42553 spin_lock(&object->lock);
42554 object->state = FSCACHE_OBJECT_DEAD;
42555 spin_unlock(&object->lock);
42556- fscache_stat(&fscache_n_object_dead);
42557+ fscache_stat_unchecked(&fscache_n_object_dead);
42558 goto terminal_transit;
42559
42560 /* handle the parent cache of this object being withdrawn from
42561@@ -248,7 +248,7 @@ static void fscache_object_state_machine
42562 spin_lock(&object->lock);
42563 object->state = FSCACHE_OBJECT_DEAD;
42564 spin_unlock(&object->lock);
42565- fscache_stat(&fscache_n_object_dead);
42566+ fscache_stat_unchecked(&fscache_n_object_dead);
42567 goto terminal_transit;
42568
42569 /* complain about the object being woken up once it is
42570@@ -492,7 +492,7 @@ static void fscache_lookup_object(struct
42571 parent->cookie->def->name, cookie->def->name,
42572 object->cache->tag->name);
42573
42574- fscache_stat(&fscache_n_object_lookups);
42575+ fscache_stat_unchecked(&fscache_n_object_lookups);
42576 fscache_stat(&fscache_n_cop_lookup_object);
42577 ret = object->cache->ops->lookup_object(object);
42578 fscache_stat_d(&fscache_n_cop_lookup_object);
42579@@ -503,7 +503,7 @@ static void fscache_lookup_object(struct
42580 if (ret == -ETIMEDOUT) {
42581 /* probably stuck behind another object, so move this one to
42582 * the back of the queue */
42583- fscache_stat(&fscache_n_object_lookups_timed_out);
42584+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
42585 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42586 }
42587
42588@@ -526,7 +526,7 @@ void fscache_object_lookup_negative(stru
42589
42590 spin_lock(&object->lock);
42591 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42592- fscache_stat(&fscache_n_object_lookups_negative);
42593+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
42594
42595 /* transit here to allow write requests to begin stacking up
42596 * and read requests to begin returning ENODATA */
42597@@ -572,7 +572,7 @@ void fscache_obtained_object(struct fsca
42598 * result, in which case there may be data available */
42599 spin_lock(&object->lock);
42600 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
42601- fscache_stat(&fscache_n_object_lookups_positive);
42602+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
42603
42604 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
42605
42606@@ -586,7 +586,7 @@ void fscache_obtained_object(struct fsca
42607 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
42608 } else {
42609 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
42610- fscache_stat(&fscache_n_object_created);
42611+ fscache_stat_unchecked(&fscache_n_object_created);
42612
42613 object->state = FSCACHE_OBJECT_AVAILABLE;
42614 spin_unlock(&object->lock);
42615@@ -633,7 +633,7 @@ static void fscache_object_available(str
42616 fscache_enqueue_dependents(object);
42617
42618 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
42619- fscache_stat(&fscache_n_object_avail);
42620+ fscache_stat_unchecked(&fscache_n_object_avail);
42621
42622 _leave("");
42623 }
42624@@ -861,7 +861,7 @@ enum fscache_checkaux fscache_check_aux(
42625 enum fscache_checkaux result;
42626
42627 if (!object->cookie->def->check_aux) {
42628- fscache_stat(&fscache_n_checkaux_none);
42629+ fscache_stat_unchecked(&fscache_n_checkaux_none);
42630 return FSCACHE_CHECKAUX_OKAY;
42631 }
42632
42633@@ -870,17 +870,17 @@ enum fscache_checkaux fscache_check_aux(
42634 switch (result) {
42635 /* entry okay as is */
42636 case FSCACHE_CHECKAUX_OKAY:
42637- fscache_stat(&fscache_n_checkaux_okay);
42638+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
42639 break;
42640
42641 /* entry requires update */
42642 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
42643- fscache_stat(&fscache_n_checkaux_update);
42644+ fscache_stat_unchecked(&fscache_n_checkaux_update);
42645 break;
42646
42647 /* entry requires deletion */
42648 case FSCACHE_CHECKAUX_OBSOLETE:
42649- fscache_stat(&fscache_n_checkaux_obsolete);
42650+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
42651 break;
42652
42653 default:
42654diff -urNp linux-2.6.32.43/fs/fscache/operation.c linux-2.6.32.43/fs/fscache/operation.c
42655--- linux-2.6.32.43/fs/fscache/operation.c 2011-03-27 14:31:47.000000000 -0400
42656+++ linux-2.6.32.43/fs/fscache/operation.c 2011-05-04 17:56:28.000000000 -0400
42657@@ -16,7 +16,7 @@
42658 #include <linux/seq_file.h>
42659 #include "internal.h"
42660
42661-atomic_t fscache_op_debug_id;
42662+atomic_unchecked_t fscache_op_debug_id;
42663 EXPORT_SYMBOL(fscache_op_debug_id);
42664
42665 /**
42666@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fs
42667 ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
42668 ASSERTCMP(atomic_read(&op->usage), >, 0);
42669
42670- fscache_stat(&fscache_n_op_enqueue);
42671+ fscache_stat_unchecked(&fscache_n_op_enqueue);
42672 switch (op->flags & FSCACHE_OP_TYPE) {
42673 case FSCACHE_OP_FAST:
42674 _debug("queue fast");
42675@@ -76,7 +76,7 @@ static void fscache_run_op(struct fscach
42676 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
42677 if (op->processor)
42678 fscache_enqueue_operation(op);
42679- fscache_stat(&fscache_n_op_run);
42680+ fscache_stat_unchecked(&fscache_n_op_run);
42681 }
42682
42683 /*
42684@@ -107,11 +107,11 @@ int fscache_submit_exclusive_op(struct f
42685 if (object->n_ops > 0) {
42686 atomic_inc(&op->usage);
42687 list_add_tail(&op->pend_link, &object->pending_ops);
42688- fscache_stat(&fscache_n_op_pend);
42689+ fscache_stat_unchecked(&fscache_n_op_pend);
42690 } else if (!list_empty(&object->pending_ops)) {
42691 atomic_inc(&op->usage);
42692 list_add_tail(&op->pend_link, &object->pending_ops);
42693- fscache_stat(&fscache_n_op_pend);
42694+ fscache_stat_unchecked(&fscache_n_op_pend);
42695 fscache_start_operations(object);
42696 } else {
42697 ASSERTCMP(object->n_in_progress, ==, 0);
42698@@ -127,7 +127,7 @@ int fscache_submit_exclusive_op(struct f
42699 object->n_exclusive++; /* reads and writes must wait */
42700 atomic_inc(&op->usage);
42701 list_add_tail(&op->pend_link, &object->pending_ops);
42702- fscache_stat(&fscache_n_op_pend);
42703+ fscache_stat_unchecked(&fscache_n_op_pend);
42704 ret = 0;
42705 } else {
42706 /* not allowed to submit ops in any other state */
42707@@ -214,11 +214,11 @@ int fscache_submit_op(struct fscache_obj
42708 if (object->n_exclusive > 0) {
42709 atomic_inc(&op->usage);
42710 list_add_tail(&op->pend_link, &object->pending_ops);
42711- fscache_stat(&fscache_n_op_pend);
42712+ fscache_stat_unchecked(&fscache_n_op_pend);
42713 } else if (!list_empty(&object->pending_ops)) {
42714 atomic_inc(&op->usage);
42715 list_add_tail(&op->pend_link, &object->pending_ops);
42716- fscache_stat(&fscache_n_op_pend);
42717+ fscache_stat_unchecked(&fscache_n_op_pend);
42718 fscache_start_operations(object);
42719 } else {
42720 ASSERTCMP(object->n_exclusive, ==, 0);
42721@@ -230,12 +230,12 @@ int fscache_submit_op(struct fscache_obj
42722 object->n_ops++;
42723 atomic_inc(&op->usage);
42724 list_add_tail(&op->pend_link, &object->pending_ops);
42725- fscache_stat(&fscache_n_op_pend);
42726+ fscache_stat_unchecked(&fscache_n_op_pend);
42727 ret = 0;
42728 } else if (object->state == FSCACHE_OBJECT_DYING ||
42729 object->state == FSCACHE_OBJECT_LC_DYING ||
42730 object->state == FSCACHE_OBJECT_WITHDRAWING) {
42731- fscache_stat(&fscache_n_op_rejected);
42732+ fscache_stat_unchecked(&fscache_n_op_rejected);
42733 ret = -ENOBUFS;
42734 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
42735 fscache_report_unexpected_submission(object, op, ostate);
42736@@ -305,7 +305,7 @@ int fscache_cancel_op(struct fscache_ope
42737
42738 ret = -EBUSY;
42739 if (!list_empty(&op->pend_link)) {
42740- fscache_stat(&fscache_n_op_cancelled);
42741+ fscache_stat_unchecked(&fscache_n_op_cancelled);
42742 list_del_init(&op->pend_link);
42743 object->n_ops--;
42744 if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
42745@@ -344,7 +344,7 @@ void fscache_put_operation(struct fscach
42746 if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
42747 BUG();
42748
42749- fscache_stat(&fscache_n_op_release);
42750+ fscache_stat_unchecked(&fscache_n_op_release);
42751
42752 if (op->release) {
42753 op->release(op);
42754@@ -361,7 +361,7 @@ void fscache_put_operation(struct fscach
42755 * lock, and defer it otherwise */
42756 if (!spin_trylock(&object->lock)) {
42757 _debug("defer put");
42758- fscache_stat(&fscache_n_op_deferred_release);
42759+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
42760
42761 cache = object->cache;
42762 spin_lock(&cache->op_gc_list_lock);
42763@@ -423,7 +423,7 @@ void fscache_operation_gc(struct work_st
42764
42765 _debug("GC DEFERRED REL OBJ%x OP%x",
42766 object->debug_id, op->debug_id);
42767- fscache_stat(&fscache_n_op_gc);
42768+ fscache_stat_unchecked(&fscache_n_op_gc);
42769
42770 ASSERTCMP(atomic_read(&op->usage), ==, 0);
42771
42772diff -urNp linux-2.6.32.43/fs/fscache/page.c linux-2.6.32.43/fs/fscache/page.c
42773--- linux-2.6.32.43/fs/fscache/page.c 2011-03-27 14:31:47.000000000 -0400
42774+++ linux-2.6.32.43/fs/fscache/page.c 2011-05-04 17:56:28.000000000 -0400
42775@@ -59,7 +59,7 @@ bool __fscache_maybe_release_page(struct
42776 val = radix_tree_lookup(&cookie->stores, page->index);
42777 if (!val) {
42778 rcu_read_unlock();
42779- fscache_stat(&fscache_n_store_vmscan_not_storing);
42780+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
42781 __fscache_uncache_page(cookie, page);
42782 return true;
42783 }
42784@@ -89,11 +89,11 @@ bool __fscache_maybe_release_page(struct
42785 spin_unlock(&cookie->stores_lock);
42786
42787 if (xpage) {
42788- fscache_stat(&fscache_n_store_vmscan_cancelled);
42789- fscache_stat(&fscache_n_store_radix_deletes);
42790+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
42791+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42792 ASSERTCMP(xpage, ==, page);
42793 } else {
42794- fscache_stat(&fscache_n_store_vmscan_gone);
42795+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
42796 }
42797
42798 wake_up_bit(&cookie->flags, 0);
42799@@ -106,7 +106,7 @@ page_busy:
42800 /* we might want to wait here, but that could deadlock the allocator as
42801 * the slow-work threads writing to the cache may all end up sleeping
42802 * on memory allocation */
42803- fscache_stat(&fscache_n_store_vmscan_busy);
42804+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
42805 return false;
42806 }
42807 EXPORT_SYMBOL(__fscache_maybe_release_page);
42808@@ -130,7 +130,7 @@ static void fscache_end_page_write(struc
42809 FSCACHE_COOKIE_STORING_TAG);
42810 if (!radix_tree_tag_get(&cookie->stores, page->index,
42811 FSCACHE_COOKIE_PENDING_TAG)) {
42812- fscache_stat(&fscache_n_store_radix_deletes);
42813+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
42814 xpage = radix_tree_delete(&cookie->stores, page->index);
42815 }
42816 spin_unlock(&cookie->stores_lock);
42817@@ -151,7 +151,7 @@ static void fscache_attr_changed_op(stru
42818
42819 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
42820
42821- fscache_stat(&fscache_n_attr_changed_calls);
42822+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
42823
42824 if (fscache_object_is_active(object)) {
42825 fscache_set_op_state(op, "CallFS");
42826@@ -178,11 +178,11 @@ int __fscache_attr_changed(struct fscach
42827
42828 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
42829
42830- fscache_stat(&fscache_n_attr_changed);
42831+ fscache_stat_unchecked(&fscache_n_attr_changed);
42832
42833 op = kzalloc(sizeof(*op), GFP_KERNEL);
42834 if (!op) {
42835- fscache_stat(&fscache_n_attr_changed_nomem);
42836+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
42837 _leave(" = -ENOMEM");
42838 return -ENOMEM;
42839 }
42840@@ -202,7 +202,7 @@ int __fscache_attr_changed(struct fscach
42841 if (fscache_submit_exclusive_op(object, op) < 0)
42842 goto nobufs;
42843 spin_unlock(&cookie->lock);
42844- fscache_stat(&fscache_n_attr_changed_ok);
42845+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
42846 fscache_put_operation(op);
42847 _leave(" = 0");
42848 return 0;
42849@@ -210,7 +210,7 @@ int __fscache_attr_changed(struct fscach
42850 nobufs:
42851 spin_unlock(&cookie->lock);
42852 kfree(op);
42853- fscache_stat(&fscache_n_attr_changed_nobufs);
42854+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
42855 _leave(" = %d", -ENOBUFS);
42856 return -ENOBUFS;
42857 }
42858@@ -264,7 +264,7 @@ static struct fscache_retrieval *fscache
42859 /* allocate a retrieval operation and attempt to submit it */
42860 op = kzalloc(sizeof(*op), GFP_NOIO);
42861 if (!op) {
42862- fscache_stat(&fscache_n_retrievals_nomem);
42863+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42864 return NULL;
42865 }
42866
42867@@ -294,13 +294,13 @@ static int fscache_wait_for_deferred_loo
42868 return 0;
42869 }
42870
42871- fscache_stat(&fscache_n_retrievals_wait);
42872+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
42873
42874 jif = jiffies;
42875 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
42876 fscache_wait_bit_interruptible,
42877 TASK_INTERRUPTIBLE) != 0) {
42878- fscache_stat(&fscache_n_retrievals_intr);
42879+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42880 _leave(" = -ERESTARTSYS");
42881 return -ERESTARTSYS;
42882 }
42883@@ -318,8 +318,8 @@ static int fscache_wait_for_deferred_loo
42884 */
42885 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
42886 struct fscache_retrieval *op,
42887- atomic_t *stat_op_waits,
42888- atomic_t *stat_object_dead)
42889+ atomic_unchecked_t *stat_op_waits,
42890+ atomic_unchecked_t *stat_object_dead)
42891 {
42892 int ret;
42893
42894@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_ac
42895 goto check_if_dead;
42896
42897 _debug(">>> WT");
42898- fscache_stat(stat_op_waits);
42899+ fscache_stat_unchecked(stat_op_waits);
42900 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
42901 fscache_wait_bit_interruptible,
42902 TASK_INTERRUPTIBLE) < 0) {
42903@@ -344,7 +344,7 @@ static int fscache_wait_for_retrieval_ac
42904
42905 check_if_dead:
42906 if (unlikely(fscache_object_is_dead(object))) {
42907- fscache_stat(stat_object_dead);
42908+ fscache_stat_unchecked(stat_object_dead);
42909 return -ENOBUFS;
42910 }
42911 return 0;
42912@@ -371,7 +371,7 @@ int __fscache_read_or_alloc_page(struct
42913
42914 _enter("%p,%p,,,", cookie, page);
42915
42916- fscache_stat(&fscache_n_retrievals);
42917+ fscache_stat_unchecked(&fscache_n_retrievals);
42918
42919 if (hlist_empty(&cookie->backing_objects))
42920 goto nobufs;
42921@@ -405,7 +405,7 @@ int __fscache_read_or_alloc_page(struct
42922 goto nobufs_unlock;
42923 spin_unlock(&cookie->lock);
42924
42925- fscache_stat(&fscache_n_retrieval_ops);
42926+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42927
42928 /* pin the netfs read context in case we need to do the actual netfs
42929 * read because we've encountered a cache read failure */
42930@@ -435,15 +435,15 @@ int __fscache_read_or_alloc_page(struct
42931
42932 error:
42933 if (ret == -ENOMEM)
42934- fscache_stat(&fscache_n_retrievals_nomem);
42935+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42936 else if (ret == -ERESTARTSYS)
42937- fscache_stat(&fscache_n_retrievals_intr);
42938+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42939 else if (ret == -ENODATA)
42940- fscache_stat(&fscache_n_retrievals_nodata);
42941+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42942 else if (ret < 0)
42943- fscache_stat(&fscache_n_retrievals_nobufs);
42944+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42945 else
42946- fscache_stat(&fscache_n_retrievals_ok);
42947+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42948
42949 fscache_put_retrieval(op);
42950 _leave(" = %d", ret);
42951@@ -453,7 +453,7 @@ nobufs_unlock:
42952 spin_unlock(&cookie->lock);
42953 kfree(op);
42954 nobufs:
42955- fscache_stat(&fscache_n_retrievals_nobufs);
42956+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42957 _leave(" = -ENOBUFS");
42958 return -ENOBUFS;
42959 }
42960@@ -491,7 +491,7 @@ int __fscache_read_or_alloc_pages(struct
42961
42962 _enter("%p,,%d,,,", cookie, *nr_pages);
42963
42964- fscache_stat(&fscache_n_retrievals);
42965+ fscache_stat_unchecked(&fscache_n_retrievals);
42966
42967 if (hlist_empty(&cookie->backing_objects))
42968 goto nobufs;
42969@@ -522,7 +522,7 @@ int __fscache_read_or_alloc_pages(struct
42970 goto nobufs_unlock;
42971 spin_unlock(&cookie->lock);
42972
42973- fscache_stat(&fscache_n_retrieval_ops);
42974+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
42975
42976 /* pin the netfs read context in case we need to do the actual netfs
42977 * read because we've encountered a cache read failure */
42978@@ -552,15 +552,15 @@ int __fscache_read_or_alloc_pages(struct
42979
42980 error:
42981 if (ret == -ENOMEM)
42982- fscache_stat(&fscache_n_retrievals_nomem);
42983+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
42984 else if (ret == -ERESTARTSYS)
42985- fscache_stat(&fscache_n_retrievals_intr);
42986+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
42987 else if (ret == -ENODATA)
42988- fscache_stat(&fscache_n_retrievals_nodata);
42989+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
42990 else if (ret < 0)
42991- fscache_stat(&fscache_n_retrievals_nobufs);
42992+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
42993 else
42994- fscache_stat(&fscache_n_retrievals_ok);
42995+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
42996
42997 fscache_put_retrieval(op);
42998 _leave(" = %d", ret);
42999@@ -570,7 +570,7 @@ nobufs_unlock:
43000 spin_unlock(&cookie->lock);
43001 kfree(op);
43002 nobufs:
43003- fscache_stat(&fscache_n_retrievals_nobufs);
43004+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
43005 _leave(" = -ENOBUFS");
43006 return -ENOBUFS;
43007 }
43008@@ -594,7 +594,7 @@ int __fscache_alloc_page(struct fscache_
43009
43010 _enter("%p,%p,,,", cookie, page);
43011
43012- fscache_stat(&fscache_n_allocs);
43013+ fscache_stat_unchecked(&fscache_n_allocs);
43014
43015 if (hlist_empty(&cookie->backing_objects))
43016 goto nobufs;
43017@@ -621,7 +621,7 @@ int __fscache_alloc_page(struct fscache_
43018 goto nobufs_unlock;
43019 spin_unlock(&cookie->lock);
43020
43021- fscache_stat(&fscache_n_alloc_ops);
43022+ fscache_stat_unchecked(&fscache_n_alloc_ops);
43023
43024 ret = fscache_wait_for_retrieval_activation(
43025 object, op,
43026@@ -637,11 +637,11 @@ int __fscache_alloc_page(struct fscache_
43027
43028 error:
43029 if (ret == -ERESTARTSYS)
43030- fscache_stat(&fscache_n_allocs_intr);
43031+ fscache_stat_unchecked(&fscache_n_allocs_intr);
43032 else if (ret < 0)
43033- fscache_stat(&fscache_n_allocs_nobufs);
43034+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43035 else
43036- fscache_stat(&fscache_n_allocs_ok);
43037+ fscache_stat_unchecked(&fscache_n_allocs_ok);
43038
43039 fscache_put_retrieval(op);
43040 _leave(" = %d", ret);
43041@@ -651,7 +651,7 @@ nobufs_unlock:
43042 spin_unlock(&cookie->lock);
43043 kfree(op);
43044 nobufs:
43045- fscache_stat(&fscache_n_allocs_nobufs);
43046+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
43047 _leave(" = -ENOBUFS");
43048 return -ENOBUFS;
43049 }
43050@@ -694,7 +694,7 @@ static void fscache_write_op(struct fsca
43051
43052 spin_lock(&cookie->stores_lock);
43053
43054- fscache_stat(&fscache_n_store_calls);
43055+ fscache_stat_unchecked(&fscache_n_store_calls);
43056
43057 /* find a page to store */
43058 page = NULL;
43059@@ -705,7 +705,7 @@ static void fscache_write_op(struct fsca
43060 page = results[0];
43061 _debug("gang %d [%lx]", n, page->index);
43062 if (page->index > op->store_limit) {
43063- fscache_stat(&fscache_n_store_pages_over_limit);
43064+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
43065 goto superseded;
43066 }
43067
43068@@ -721,7 +721,7 @@ static void fscache_write_op(struct fsca
43069
43070 if (page) {
43071 fscache_set_op_state(&op->op, "Store");
43072- fscache_stat(&fscache_n_store_pages);
43073+ fscache_stat_unchecked(&fscache_n_store_pages);
43074 fscache_stat(&fscache_n_cop_write_page);
43075 ret = object->cache->ops->write_page(op, page);
43076 fscache_stat_d(&fscache_n_cop_write_page);
43077@@ -792,7 +792,7 @@ int __fscache_write_page(struct fscache_
43078 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43079 ASSERT(PageFsCache(page));
43080
43081- fscache_stat(&fscache_n_stores);
43082+ fscache_stat_unchecked(&fscache_n_stores);
43083
43084 op = kzalloc(sizeof(*op), GFP_NOIO);
43085 if (!op)
43086@@ -844,7 +844,7 @@ int __fscache_write_page(struct fscache_
43087 spin_unlock(&cookie->stores_lock);
43088 spin_unlock(&object->lock);
43089
43090- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
43091+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
43092 op->store_limit = object->store_limit;
43093
43094 if (fscache_submit_op(object, &op->op) < 0)
43095@@ -852,8 +852,8 @@ int __fscache_write_page(struct fscache_
43096
43097 spin_unlock(&cookie->lock);
43098 radix_tree_preload_end();
43099- fscache_stat(&fscache_n_store_ops);
43100- fscache_stat(&fscache_n_stores_ok);
43101+ fscache_stat_unchecked(&fscache_n_store_ops);
43102+ fscache_stat_unchecked(&fscache_n_stores_ok);
43103
43104 /* the slow work queue now carries its own ref on the object */
43105 fscache_put_operation(&op->op);
43106@@ -861,14 +861,14 @@ int __fscache_write_page(struct fscache_
43107 return 0;
43108
43109 already_queued:
43110- fscache_stat(&fscache_n_stores_again);
43111+ fscache_stat_unchecked(&fscache_n_stores_again);
43112 already_pending:
43113 spin_unlock(&cookie->stores_lock);
43114 spin_unlock(&object->lock);
43115 spin_unlock(&cookie->lock);
43116 radix_tree_preload_end();
43117 kfree(op);
43118- fscache_stat(&fscache_n_stores_ok);
43119+ fscache_stat_unchecked(&fscache_n_stores_ok);
43120 _leave(" = 0");
43121 return 0;
43122
43123@@ -886,14 +886,14 @@ nobufs:
43124 spin_unlock(&cookie->lock);
43125 radix_tree_preload_end();
43126 kfree(op);
43127- fscache_stat(&fscache_n_stores_nobufs);
43128+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
43129 _leave(" = -ENOBUFS");
43130 return -ENOBUFS;
43131
43132 nomem_free:
43133 kfree(op);
43134 nomem:
43135- fscache_stat(&fscache_n_stores_oom);
43136+ fscache_stat_unchecked(&fscache_n_stores_oom);
43137 _leave(" = -ENOMEM");
43138 return -ENOMEM;
43139 }
43140@@ -911,7 +911,7 @@ void __fscache_uncache_page(struct fscac
43141 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
43142 ASSERTCMP(page, !=, NULL);
43143
43144- fscache_stat(&fscache_n_uncaches);
43145+ fscache_stat_unchecked(&fscache_n_uncaches);
43146
43147 /* cache withdrawal may beat us to it */
43148 if (!PageFsCache(page))
43149@@ -964,7 +964,7 @@ void fscache_mark_pages_cached(struct fs
43150 unsigned long loop;
43151
43152 #ifdef CONFIG_FSCACHE_STATS
43153- atomic_add(pagevec->nr, &fscache_n_marks);
43154+ atomic_add_unchecked(pagevec->nr, &fscache_n_marks);
43155 #endif
43156
43157 for (loop = 0; loop < pagevec->nr; loop++) {
43158diff -urNp linux-2.6.32.43/fs/fscache/stats.c linux-2.6.32.43/fs/fscache/stats.c
43159--- linux-2.6.32.43/fs/fscache/stats.c 2011-03-27 14:31:47.000000000 -0400
43160+++ linux-2.6.32.43/fs/fscache/stats.c 2011-05-04 17:56:28.000000000 -0400
43161@@ -18,95 +18,95 @@
43162 /*
43163 * operation counters
43164 */
43165-atomic_t fscache_n_op_pend;
43166-atomic_t fscache_n_op_run;
43167-atomic_t fscache_n_op_enqueue;
43168-atomic_t fscache_n_op_requeue;
43169-atomic_t fscache_n_op_deferred_release;
43170-atomic_t fscache_n_op_release;
43171-atomic_t fscache_n_op_gc;
43172-atomic_t fscache_n_op_cancelled;
43173-atomic_t fscache_n_op_rejected;
43174-
43175-atomic_t fscache_n_attr_changed;
43176-atomic_t fscache_n_attr_changed_ok;
43177-atomic_t fscache_n_attr_changed_nobufs;
43178-atomic_t fscache_n_attr_changed_nomem;
43179-atomic_t fscache_n_attr_changed_calls;
43180-
43181-atomic_t fscache_n_allocs;
43182-atomic_t fscache_n_allocs_ok;
43183-atomic_t fscache_n_allocs_wait;
43184-atomic_t fscache_n_allocs_nobufs;
43185-atomic_t fscache_n_allocs_intr;
43186-atomic_t fscache_n_allocs_object_dead;
43187-atomic_t fscache_n_alloc_ops;
43188-atomic_t fscache_n_alloc_op_waits;
43189-
43190-atomic_t fscache_n_retrievals;
43191-atomic_t fscache_n_retrievals_ok;
43192-atomic_t fscache_n_retrievals_wait;
43193-atomic_t fscache_n_retrievals_nodata;
43194-atomic_t fscache_n_retrievals_nobufs;
43195-atomic_t fscache_n_retrievals_intr;
43196-atomic_t fscache_n_retrievals_nomem;
43197-atomic_t fscache_n_retrievals_object_dead;
43198-atomic_t fscache_n_retrieval_ops;
43199-atomic_t fscache_n_retrieval_op_waits;
43200-
43201-atomic_t fscache_n_stores;
43202-atomic_t fscache_n_stores_ok;
43203-atomic_t fscache_n_stores_again;
43204-atomic_t fscache_n_stores_nobufs;
43205-atomic_t fscache_n_stores_oom;
43206-atomic_t fscache_n_store_ops;
43207-atomic_t fscache_n_store_calls;
43208-atomic_t fscache_n_store_pages;
43209-atomic_t fscache_n_store_radix_deletes;
43210-atomic_t fscache_n_store_pages_over_limit;
43211-
43212-atomic_t fscache_n_store_vmscan_not_storing;
43213-atomic_t fscache_n_store_vmscan_gone;
43214-atomic_t fscache_n_store_vmscan_busy;
43215-atomic_t fscache_n_store_vmscan_cancelled;
43216-
43217-atomic_t fscache_n_marks;
43218-atomic_t fscache_n_uncaches;
43219-
43220-atomic_t fscache_n_acquires;
43221-atomic_t fscache_n_acquires_null;
43222-atomic_t fscache_n_acquires_no_cache;
43223-atomic_t fscache_n_acquires_ok;
43224-atomic_t fscache_n_acquires_nobufs;
43225-atomic_t fscache_n_acquires_oom;
43226-
43227-atomic_t fscache_n_updates;
43228-atomic_t fscache_n_updates_null;
43229-atomic_t fscache_n_updates_run;
43230-
43231-atomic_t fscache_n_relinquishes;
43232-atomic_t fscache_n_relinquishes_null;
43233-atomic_t fscache_n_relinquishes_waitcrt;
43234-atomic_t fscache_n_relinquishes_retire;
43235-
43236-atomic_t fscache_n_cookie_index;
43237-atomic_t fscache_n_cookie_data;
43238-atomic_t fscache_n_cookie_special;
43239-
43240-atomic_t fscache_n_object_alloc;
43241-atomic_t fscache_n_object_no_alloc;
43242-atomic_t fscache_n_object_lookups;
43243-atomic_t fscache_n_object_lookups_negative;
43244-atomic_t fscache_n_object_lookups_positive;
43245-atomic_t fscache_n_object_lookups_timed_out;
43246-atomic_t fscache_n_object_created;
43247-atomic_t fscache_n_object_avail;
43248-atomic_t fscache_n_object_dead;
43249-
43250-atomic_t fscache_n_checkaux_none;
43251-atomic_t fscache_n_checkaux_okay;
43252-atomic_t fscache_n_checkaux_update;
43253-atomic_t fscache_n_checkaux_obsolete;
43254+atomic_unchecked_t fscache_n_op_pend;
43255+atomic_unchecked_t fscache_n_op_run;
43256+atomic_unchecked_t fscache_n_op_enqueue;
43257+atomic_unchecked_t fscache_n_op_requeue;
43258+atomic_unchecked_t fscache_n_op_deferred_release;
43259+atomic_unchecked_t fscache_n_op_release;
43260+atomic_unchecked_t fscache_n_op_gc;
43261+atomic_unchecked_t fscache_n_op_cancelled;
43262+atomic_unchecked_t fscache_n_op_rejected;
43263+
43264+atomic_unchecked_t fscache_n_attr_changed;
43265+atomic_unchecked_t fscache_n_attr_changed_ok;
43266+atomic_unchecked_t fscache_n_attr_changed_nobufs;
43267+atomic_unchecked_t fscache_n_attr_changed_nomem;
43268+atomic_unchecked_t fscache_n_attr_changed_calls;
43269+
43270+atomic_unchecked_t fscache_n_allocs;
43271+atomic_unchecked_t fscache_n_allocs_ok;
43272+atomic_unchecked_t fscache_n_allocs_wait;
43273+atomic_unchecked_t fscache_n_allocs_nobufs;
43274+atomic_unchecked_t fscache_n_allocs_intr;
43275+atomic_unchecked_t fscache_n_allocs_object_dead;
43276+atomic_unchecked_t fscache_n_alloc_ops;
43277+atomic_unchecked_t fscache_n_alloc_op_waits;
43278+
43279+atomic_unchecked_t fscache_n_retrievals;
43280+atomic_unchecked_t fscache_n_retrievals_ok;
43281+atomic_unchecked_t fscache_n_retrievals_wait;
43282+atomic_unchecked_t fscache_n_retrievals_nodata;
43283+atomic_unchecked_t fscache_n_retrievals_nobufs;
43284+atomic_unchecked_t fscache_n_retrievals_intr;
43285+atomic_unchecked_t fscache_n_retrievals_nomem;
43286+atomic_unchecked_t fscache_n_retrievals_object_dead;
43287+atomic_unchecked_t fscache_n_retrieval_ops;
43288+atomic_unchecked_t fscache_n_retrieval_op_waits;
43289+
43290+atomic_unchecked_t fscache_n_stores;
43291+atomic_unchecked_t fscache_n_stores_ok;
43292+atomic_unchecked_t fscache_n_stores_again;
43293+atomic_unchecked_t fscache_n_stores_nobufs;
43294+atomic_unchecked_t fscache_n_stores_oom;
43295+atomic_unchecked_t fscache_n_store_ops;
43296+atomic_unchecked_t fscache_n_store_calls;
43297+atomic_unchecked_t fscache_n_store_pages;
43298+atomic_unchecked_t fscache_n_store_radix_deletes;
43299+atomic_unchecked_t fscache_n_store_pages_over_limit;
43300+
43301+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
43302+atomic_unchecked_t fscache_n_store_vmscan_gone;
43303+atomic_unchecked_t fscache_n_store_vmscan_busy;
43304+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
43305+
43306+atomic_unchecked_t fscache_n_marks;
43307+atomic_unchecked_t fscache_n_uncaches;
43308+
43309+atomic_unchecked_t fscache_n_acquires;
43310+atomic_unchecked_t fscache_n_acquires_null;
43311+atomic_unchecked_t fscache_n_acquires_no_cache;
43312+atomic_unchecked_t fscache_n_acquires_ok;
43313+atomic_unchecked_t fscache_n_acquires_nobufs;
43314+atomic_unchecked_t fscache_n_acquires_oom;
43315+
43316+atomic_unchecked_t fscache_n_updates;
43317+atomic_unchecked_t fscache_n_updates_null;
43318+atomic_unchecked_t fscache_n_updates_run;
43319+
43320+atomic_unchecked_t fscache_n_relinquishes;
43321+atomic_unchecked_t fscache_n_relinquishes_null;
43322+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
43323+atomic_unchecked_t fscache_n_relinquishes_retire;
43324+
43325+atomic_unchecked_t fscache_n_cookie_index;
43326+atomic_unchecked_t fscache_n_cookie_data;
43327+atomic_unchecked_t fscache_n_cookie_special;
43328+
43329+atomic_unchecked_t fscache_n_object_alloc;
43330+atomic_unchecked_t fscache_n_object_no_alloc;
43331+atomic_unchecked_t fscache_n_object_lookups;
43332+atomic_unchecked_t fscache_n_object_lookups_negative;
43333+atomic_unchecked_t fscache_n_object_lookups_positive;
43334+atomic_unchecked_t fscache_n_object_lookups_timed_out;
43335+atomic_unchecked_t fscache_n_object_created;
43336+atomic_unchecked_t fscache_n_object_avail;
43337+atomic_unchecked_t fscache_n_object_dead;
43338+
43339+atomic_unchecked_t fscache_n_checkaux_none;
43340+atomic_unchecked_t fscache_n_checkaux_okay;
43341+atomic_unchecked_t fscache_n_checkaux_update;
43342+atomic_unchecked_t fscache_n_checkaux_obsolete;
43343
43344 atomic_t fscache_n_cop_alloc_object;
43345 atomic_t fscache_n_cop_lookup_object;
43346@@ -133,113 +133,113 @@ static int fscache_stats_show(struct seq
43347 seq_puts(m, "FS-Cache statistics\n");
43348
43349 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
43350- atomic_read(&fscache_n_cookie_index),
43351- atomic_read(&fscache_n_cookie_data),
43352- atomic_read(&fscache_n_cookie_special));
43353+ atomic_read_unchecked(&fscache_n_cookie_index),
43354+ atomic_read_unchecked(&fscache_n_cookie_data),
43355+ atomic_read_unchecked(&fscache_n_cookie_special));
43356
43357 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
43358- atomic_read(&fscache_n_object_alloc),
43359- atomic_read(&fscache_n_object_no_alloc),
43360- atomic_read(&fscache_n_object_avail),
43361- atomic_read(&fscache_n_object_dead));
43362+ atomic_read_unchecked(&fscache_n_object_alloc),
43363+ atomic_read_unchecked(&fscache_n_object_no_alloc),
43364+ atomic_read_unchecked(&fscache_n_object_avail),
43365+ atomic_read_unchecked(&fscache_n_object_dead));
43366 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
43367- atomic_read(&fscache_n_checkaux_none),
43368- atomic_read(&fscache_n_checkaux_okay),
43369- atomic_read(&fscache_n_checkaux_update),
43370- atomic_read(&fscache_n_checkaux_obsolete));
43371+ atomic_read_unchecked(&fscache_n_checkaux_none),
43372+ atomic_read_unchecked(&fscache_n_checkaux_okay),
43373+ atomic_read_unchecked(&fscache_n_checkaux_update),
43374+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
43375
43376 seq_printf(m, "Pages : mrk=%u unc=%u\n",
43377- atomic_read(&fscache_n_marks),
43378- atomic_read(&fscache_n_uncaches));
43379+ atomic_read_unchecked(&fscache_n_marks),
43380+ atomic_read_unchecked(&fscache_n_uncaches));
43381
43382 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
43383 " oom=%u\n",
43384- atomic_read(&fscache_n_acquires),
43385- atomic_read(&fscache_n_acquires_null),
43386- atomic_read(&fscache_n_acquires_no_cache),
43387- atomic_read(&fscache_n_acquires_ok),
43388- atomic_read(&fscache_n_acquires_nobufs),
43389- atomic_read(&fscache_n_acquires_oom));
43390+ atomic_read_unchecked(&fscache_n_acquires),
43391+ atomic_read_unchecked(&fscache_n_acquires_null),
43392+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
43393+ atomic_read_unchecked(&fscache_n_acquires_ok),
43394+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
43395+ atomic_read_unchecked(&fscache_n_acquires_oom));
43396
43397 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
43398- atomic_read(&fscache_n_object_lookups),
43399- atomic_read(&fscache_n_object_lookups_negative),
43400- atomic_read(&fscache_n_object_lookups_positive),
43401- atomic_read(&fscache_n_object_lookups_timed_out),
43402- atomic_read(&fscache_n_object_created));
43403+ atomic_read_unchecked(&fscache_n_object_lookups),
43404+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
43405+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
43406+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out),
43407+ atomic_read_unchecked(&fscache_n_object_created));
43408
43409 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
43410- atomic_read(&fscache_n_updates),
43411- atomic_read(&fscache_n_updates_null),
43412- atomic_read(&fscache_n_updates_run));
43413+ atomic_read_unchecked(&fscache_n_updates),
43414+ atomic_read_unchecked(&fscache_n_updates_null),
43415+ atomic_read_unchecked(&fscache_n_updates_run));
43416
43417 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
43418- atomic_read(&fscache_n_relinquishes),
43419- atomic_read(&fscache_n_relinquishes_null),
43420- atomic_read(&fscache_n_relinquishes_waitcrt),
43421- atomic_read(&fscache_n_relinquishes_retire));
43422+ atomic_read_unchecked(&fscache_n_relinquishes),
43423+ atomic_read_unchecked(&fscache_n_relinquishes_null),
43424+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
43425+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
43426
43427 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
43428- atomic_read(&fscache_n_attr_changed),
43429- atomic_read(&fscache_n_attr_changed_ok),
43430- atomic_read(&fscache_n_attr_changed_nobufs),
43431- atomic_read(&fscache_n_attr_changed_nomem),
43432- atomic_read(&fscache_n_attr_changed_calls));
43433+ atomic_read_unchecked(&fscache_n_attr_changed),
43434+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
43435+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
43436+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
43437+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
43438
43439 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
43440- atomic_read(&fscache_n_allocs),
43441- atomic_read(&fscache_n_allocs_ok),
43442- atomic_read(&fscache_n_allocs_wait),
43443- atomic_read(&fscache_n_allocs_nobufs),
43444- atomic_read(&fscache_n_allocs_intr));
43445+ atomic_read_unchecked(&fscache_n_allocs),
43446+ atomic_read_unchecked(&fscache_n_allocs_ok),
43447+ atomic_read_unchecked(&fscache_n_allocs_wait),
43448+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
43449+ atomic_read_unchecked(&fscache_n_allocs_intr));
43450 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
43451- atomic_read(&fscache_n_alloc_ops),
43452- atomic_read(&fscache_n_alloc_op_waits),
43453- atomic_read(&fscache_n_allocs_object_dead));
43454+ atomic_read_unchecked(&fscache_n_alloc_ops),
43455+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
43456+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
43457
43458 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
43459 " int=%u oom=%u\n",
43460- atomic_read(&fscache_n_retrievals),
43461- atomic_read(&fscache_n_retrievals_ok),
43462- atomic_read(&fscache_n_retrievals_wait),
43463- atomic_read(&fscache_n_retrievals_nodata),
43464- atomic_read(&fscache_n_retrievals_nobufs),
43465- atomic_read(&fscache_n_retrievals_intr),
43466- atomic_read(&fscache_n_retrievals_nomem));
43467+ atomic_read_unchecked(&fscache_n_retrievals),
43468+ atomic_read_unchecked(&fscache_n_retrievals_ok),
43469+ atomic_read_unchecked(&fscache_n_retrievals_wait),
43470+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
43471+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
43472+ atomic_read_unchecked(&fscache_n_retrievals_intr),
43473+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
43474 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
43475- atomic_read(&fscache_n_retrieval_ops),
43476- atomic_read(&fscache_n_retrieval_op_waits),
43477- atomic_read(&fscache_n_retrievals_object_dead));
43478+ atomic_read_unchecked(&fscache_n_retrieval_ops),
43479+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
43480+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
43481
43482 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
43483- atomic_read(&fscache_n_stores),
43484- atomic_read(&fscache_n_stores_ok),
43485- atomic_read(&fscache_n_stores_again),
43486- atomic_read(&fscache_n_stores_nobufs),
43487- atomic_read(&fscache_n_stores_oom));
43488+ atomic_read_unchecked(&fscache_n_stores),
43489+ atomic_read_unchecked(&fscache_n_stores_ok),
43490+ atomic_read_unchecked(&fscache_n_stores_again),
43491+ atomic_read_unchecked(&fscache_n_stores_nobufs),
43492+ atomic_read_unchecked(&fscache_n_stores_oom));
43493 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
43494- atomic_read(&fscache_n_store_ops),
43495- atomic_read(&fscache_n_store_calls),
43496- atomic_read(&fscache_n_store_pages),
43497- atomic_read(&fscache_n_store_radix_deletes),
43498- atomic_read(&fscache_n_store_pages_over_limit));
43499+ atomic_read_unchecked(&fscache_n_store_ops),
43500+ atomic_read_unchecked(&fscache_n_store_calls),
43501+ atomic_read_unchecked(&fscache_n_store_pages),
43502+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
43503+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
43504
43505 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
43506- atomic_read(&fscache_n_store_vmscan_not_storing),
43507- atomic_read(&fscache_n_store_vmscan_gone),
43508- atomic_read(&fscache_n_store_vmscan_busy),
43509- atomic_read(&fscache_n_store_vmscan_cancelled));
43510+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
43511+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
43512+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
43513+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled));
43514
43515 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
43516- atomic_read(&fscache_n_op_pend),
43517- atomic_read(&fscache_n_op_run),
43518- atomic_read(&fscache_n_op_enqueue),
43519- atomic_read(&fscache_n_op_cancelled),
43520- atomic_read(&fscache_n_op_rejected));
43521+ atomic_read_unchecked(&fscache_n_op_pend),
43522+ atomic_read_unchecked(&fscache_n_op_run),
43523+ atomic_read_unchecked(&fscache_n_op_enqueue),
43524+ atomic_read_unchecked(&fscache_n_op_cancelled),
43525+ atomic_read_unchecked(&fscache_n_op_rejected));
43526 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
43527- atomic_read(&fscache_n_op_deferred_release),
43528- atomic_read(&fscache_n_op_release),
43529- atomic_read(&fscache_n_op_gc));
43530+ atomic_read_unchecked(&fscache_n_op_deferred_release),
43531+ atomic_read_unchecked(&fscache_n_op_release),
43532+ atomic_read_unchecked(&fscache_n_op_gc));
43533
43534 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
43535 atomic_read(&fscache_n_cop_alloc_object),
43536diff -urNp linux-2.6.32.43/fs/fs_struct.c linux-2.6.32.43/fs/fs_struct.c
43537--- linux-2.6.32.43/fs/fs_struct.c 2011-03-27 14:31:47.000000000 -0400
43538+++ linux-2.6.32.43/fs/fs_struct.c 2011-04-17 15:56:46.000000000 -0400
43539@@ -4,6 +4,7 @@
43540 #include <linux/path.h>
43541 #include <linux/slab.h>
43542 #include <linux/fs_struct.h>
43543+#include <linux/grsecurity.h>
43544
43545 /*
43546 * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values.
43547@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s
43548 old_root = fs->root;
43549 fs->root = *path;
43550 path_get(path);
43551+ gr_set_chroot_entries(current, path);
43552 write_unlock(&fs->lock);
43553 if (old_root.dentry)
43554 path_put(&old_root);
43555@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo
43556 && fs->root.mnt == old_root->mnt) {
43557 path_get(new_root);
43558 fs->root = *new_root;
43559+ gr_set_chroot_entries(p, new_root);
43560 count++;
43561 }
43562 if (fs->pwd.dentry == old_root->dentry
43563@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk)
43564 task_lock(tsk);
43565 write_lock(&fs->lock);
43566 tsk->fs = NULL;
43567- kill = !--fs->users;
43568+ gr_clear_chroot_entries(tsk);
43569+ kill = !atomic_dec_return(&fs->users);
43570 write_unlock(&fs->lock);
43571 task_unlock(tsk);
43572 if (kill)
43573@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct
43574 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
43575 /* We don't need to lock fs - think why ;-) */
43576 if (fs) {
43577- fs->users = 1;
43578+ atomic_set(&fs->users, 1);
43579 fs->in_exec = 0;
43580 rwlock_init(&fs->lock);
43581 fs->umask = old->umask;
43582@@ -127,8 +131,9 @@ int unshare_fs_struct(void)
43583
43584 task_lock(current);
43585 write_lock(&fs->lock);
43586- kill = !--fs->users;
43587+ kill = !atomic_dec_return(&fs->users);
43588 current->fs = new_fs;
43589+ gr_set_chroot_entries(current, &new_fs->root);
43590 write_unlock(&fs->lock);
43591 task_unlock(current);
43592
43593@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask);
43594
43595 /* to be mentioned only in INIT_TASK */
43596 struct fs_struct init_fs = {
43597- .users = 1,
43598+ .users = ATOMIC_INIT(1),
43599 .lock = __RW_LOCK_UNLOCKED(init_fs.lock),
43600 .umask = 0022,
43601 };
43602@@ -162,12 +167,13 @@ void daemonize_fs_struct(void)
43603 task_lock(current);
43604
43605 write_lock(&init_fs.lock);
43606- init_fs.users++;
43607+ atomic_inc(&init_fs.users);
43608 write_unlock(&init_fs.lock);
43609
43610 write_lock(&fs->lock);
43611 current->fs = &init_fs;
43612- kill = !--fs->users;
43613+ gr_set_chroot_entries(current, &current->fs->root);
43614+ kill = !atomic_dec_return(&fs->users);
43615 write_unlock(&fs->lock);
43616
43617 task_unlock(current);
43618diff -urNp linux-2.6.32.43/fs/fuse/cuse.c linux-2.6.32.43/fs/fuse/cuse.c
43619--- linux-2.6.32.43/fs/fuse/cuse.c 2011-03-27 14:31:47.000000000 -0400
43620+++ linux-2.6.32.43/fs/fuse/cuse.c 2011-08-05 20:33:55.000000000 -0400
43621@@ -576,10 +576,12 @@ static int __init cuse_init(void)
43622 INIT_LIST_HEAD(&cuse_conntbl[i]);
43623
43624 /* inherit and extend fuse_dev_operations */
43625- cuse_channel_fops = fuse_dev_operations;
43626- cuse_channel_fops.owner = THIS_MODULE;
43627- cuse_channel_fops.open = cuse_channel_open;
43628- cuse_channel_fops.release = cuse_channel_release;
43629+ pax_open_kernel();
43630+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
43631+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
43632+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
43633+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
43634+ pax_close_kernel();
43635
43636 cuse_class = class_create(THIS_MODULE, "cuse");
43637 if (IS_ERR(cuse_class))
43638diff -urNp linux-2.6.32.43/fs/fuse/dev.c linux-2.6.32.43/fs/fuse/dev.c
43639--- linux-2.6.32.43/fs/fuse/dev.c 2011-03-27 14:31:47.000000000 -0400
43640+++ linux-2.6.32.43/fs/fuse/dev.c 2011-08-05 20:33:55.000000000 -0400
43641@@ -885,7 +885,7 @@ static int fuse_notify_inval_entry(struc
43642 {
43643 struct fuse_notify_inval_entry_out outarg;
43644 int err = -EINVAL;
43645- char buf[FUSE_NAME_MAX+1];
43646+ char *buf = NULL;
43647 struct qstr name;
43648
43649 if (size < sizeof(outarg))
43650@@ -899,6 +899,11 @@ static int fuse_notify_inval_entry(struc
43651 if (outarg.namelen > FUSE_NAME_MAX)
43652 goto err;
43653
43654+ err = -ENOMEM;
43655+ buf = kmalloc(FUSE_NAME_MAX+1, GFP_KERNEL);
43656+ if (!buf)
43657+ goto err;
43658+
43659 name.name = buf;
43660 name.len = outarg.namelen;
43661 err = fuse_copy_one(cs, buf, outarg.namelen + 1);
43662@@ -910,17 +915,15 @@ static int fuse_notify_inval_entry(struc
43663
43664 down_read(&fc->killsb);
43665 err = -ENOENT;
43666- if (!fc->sb)
43667- goto err_unlock;
43668-
43669- err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43670-
43671-err_unlock:
43672+ if (fc->sb)
43673+ err = fuse_reverse_inval_entry(fc->sb, outarg.parent, &name);
43674 up_read(&fc->killsb);
43675+ kfree(buf);
43676 return err;
43677
43678 err:
43679 fuse_copy_finish(cs);
43680+ kfree(buf);
43681 return err;
43682 }
43683
43684diff -urNp linux-2.6.32.43/fs/fuse/dir.c linux-2.6.32.43/fs/fuse/dir.c
43685--- linux-2.6.32.43/fs/fuse/dir.c 2011-03-27 14:31:47.000000000 -0400
43686+++ linux-2.6.32.43/fs/fuse/dir.c 2011-04-17 15:56:46.000000000 -0400
43687@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de
43688 return link;
43689 }
43690
43691-static void free_link(char *link)
43692+static void free_link(const char *link)
43693 {
43694 if (!IS_ERR(link))
43695 free_page((unsigned long) link);
43696diff -urNp linux-2.6.32.43/fs/gfs2/ops_inode.c linux-2.6.32.43/fs/gfs2/ops_inode.c
43697--- linux-2.6.32.43/fs/gfs2/ops_inode.c 2011-03-27 14:31:47.000000000 -0400
43698+++ linux-2.6.32.43/fs/gfs2/ops_inode.c 2011-05-16 21:46:57.000000000 -0400
43699@@ -752,6 +752,8 @@ static int gfs2_rename(struct inode *odi
43700 unsigned int x;
43701 int error;
43702
43703+ pax_track_stack();
43704+
43705 if (ndentry->d_inode) {
43706 nip = GFS2_I(ndentry->d_inode);
43707 if (ip == nip)
43708diff -urNp linux-2.6.32.43/fs/gfs2/sys.c linux-2.6.32.43/fs/gfs2/sys.c
43709--- linux-2.6.32.43/fs/gfs2/sys.c 2011-03-27 14:31:47.000000000 -0400
43710+++ linux-2.6.32.43/fs/gfs2/sys.c 2011-04-17 15:56:46.000000000 -0400
43711@@ -49,7 +49,7 @@ static ssize_t gfs2_attr_store(struct ko
43712 return a->store ? a->store(sdp, buf, len) : len;
43713 }
43714
43715-static struct sysfs_ops gfs2_attr_ops = {
43716+static const struct sysfs_ops gfs2_attr_ops = {
43717 .show = gfs2_attr_show,
43718 .store = gfs2_attr_store,
43719 };
43720@@ -584,7 +584,7 @@ static int gfs2_uevent(struct kset *kset
43721 return 0;
43722 }
43723
43724-static struct kset_uevent_ops gfs2_uevent_ops = {
43725+static const struct kset_uevent_ops gfs2_uevent_ops = {
43726 .uevent = gfs2_uevent,
43727 };
43728
43729diff -urNp linux-2.6.32.43/fs/hfsplus/catalog.c linux-2.6.32.43/fs/hfsplus/catalog.c
43730--- linux-2.6.32.43/fs/hfsplus/catalog.c 2011-03-27 14:31:47.000000000 -0400
43731+++ linux-2.6.32.43/fs/hfsplus/catalog.c 2011-05-16 21:46:57.000000000 -0400
43732@@ -157,6 +157,8 @@ int hfsplus_find_cat(struct super_block
43733 int err;
43734 u16 type;
43735
43736+ pax_track_stack();
43737+
43738 hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL);
43739 err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry));
43740 if (err)
43741@@ -186,6 +188,8 @@ int hfsplus_create_cat(u32 cnid, struct
43742 int entry_size;
43743 int err;
43744
43745+ pax_track_stack();
43746+
43747 dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink);
43748 sb = dir->i_sb;
43749 hfs_find_init(HFSPLUS_SB(sb).cat_tree, &fd);
43750@@ -318,6 +322,8 @@ int hfsplus_rename_cat(u32 cnid,
43751 int entry_size, type;
43752 int err = 0;
43753
43754+ pax_track_stack();
43755+
43756 dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name,
43757 dst_dir->i_ino, dst_name->name);
43758 sb = src_dir->i_sb;
43759diff -urNp linux-2.6.32.43/fs/hfsplus/dir.c linux-2.6.32.43/fs/hfsplus/dir.c
43760--- linux-2.6.32.43/fs/hfsplus/dir.c 2011-03-27 14:31:47.000000000 -0400
43761+++ linux-2.6.32.43/fs/hfsplus/dir.c 2011-05-16 21:46:57.000000000 -0400
43762@@ -121,6 +121,8 @@ static int hfsplus_readdir(struct file *
43763 struct hfsplus_readdir_data *rd;
43764 u16 type;
43765
43766+ pax_track_stack();
43767+
43768 if (filp->f_pos >= inode->i_size)
43769 return 0;
43770
43771diff -urNp linux-2.6.32.43/fs/hfsplus/inode.c linux-2.6.32.43/fs/hfsplus/inode.c
43772--- linux-2.6.32.43/fs/hfsplus/inode.c 2011-03-27 14:31:47.000000000 -0400
43773+++ linux-2.6.32.43/fs/hfsplus/inode.c 2011-05-16 21:46:57.000000000 -0400
43774@@ -399,6 +399,8 @@ int hfsplus_cat_read_inode(struct inode
43775 int res = 0;
43776 u16 type;
43777
43778+ pax_track_stack();
43779+
43780 type = hfs_bnode_read_u16(fd->bnode, fd->entryoffset);
43781
43782 HFSPLUS_I(inode).dev = 0;
43783@@ -461,6 +463,8 @@ int hfsplus_cat_write_inode(struct inode
43784 struct hfs_find_data fd;
43785 hfsplus_cat_entry entry;
43786
43787+ pax_track_stack();
43788+
43789 if (HFSPLUS_IS_RSRC(inode))
43790 main_inode = HFSPLUS_I(inode).rsrc_inode;
43791
43792diff -urNp linux-2.6.32.43/fs/hfsplus/ioctl.c linux-2.6.32.43/fs/hfsplus/ioctl.c
43793--- linux-2.6.32.43/fs/hfsplus/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43794+++ linux-2.6.32.43/fs/hfsplus/ioctl.c 2011-05-16 21:46:57.000000000 -0400
43795@@ -101,6 +101,8 @@ int hfsplus_setxattr(struct dentry *dent
43796 struct hfsplus_cat_file *file;
43797 int res;
43798
43799+ pax_track_stack();
43800+
43801 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43802 return -EOPNOTSUPP;
43803
43804@@ -143,6 +145,8 @@ ssize_t hfsplus_getxattr(struct dentry *
43805 struct hfsplus_cat_file *file;
43806 ssize_t res = 0;
43807
43808+ pax_track_stack();
43809+
43810 if (!S_ISREG(inode->i_mode) || HFSPLUS_IS_RSRC(inode))
43811 return -EOPNOTSUPP;
43812
43813diff -urNp linux-2.6.32.43/fs/hfsplus/super.c linux-2.6.32.43/fs/hfsplus/super.c
43814--- linux-2.6.32.43/fs/hfsplus/super.c 2011-03-27 14:31:47.000000000 -0400
43815+++ linux-2.6.32.43/fs/hfsplus/super.c 2011-05-16 21:46:57.000000000 -0400
43816@@ -312,6 +312,8 @@ static int hfsplus_fill_super(struct sup
43817 struct nls_table *nls = NULL;
43818 int err = -EINVAL;
43819
43820+ pax_track_stack();
43821+
43822 sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
43823 if (!sbi)
43824 return -ENOMEM;
43825diff -urNp linux-2.6.32.43/fs/hugetlbfs/inode.c linux-2.6.32.43/fs/hugetlbfs/inode.c
43826--- linux-2.6.32.43/fs/hugetlbfs/inode.c 2011-03-27 14:31:47.000000000 -0400
43827+++ linux-2.6.32.43/fs/hugetlbfs/inode.c 2011-04-17 15:56:46.000000000 -0400
43828@@ -909,7 +909,7 @@ static struct file_system_type hugetlbfs
43829 .kill_sb = kill_litter_super,
43830 };
43831
43832-static struct vfsmount *hugetlbfs_vfsmount;
43833+struct vfsmount *hugetlbfs_vfsmount;
43834
43835 static int can_do_hugetlb_shm(void)
43836 {
43837diff -urNp linux-2.6.32.43/fs/ioctl.c linux-2.6.32.43/fs/ioctl.c
43838--- linux-2.6.32.43/fs/ioctl.c 2011-03-27 14:31:47.000000000 -0400
43839+++ linux-2.6.32.43/fs/ioctl.c 2011-04-17 15:56:46.000000000 -0400
43840@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema
43841 u64 phys, u64 len, u32 flags)
43842 {
43843 struct fiemap_extent extent;
43844- struct fiemap_extent *dest = fieinfo->fi_extents_start;
43845+ struct fiemap_extent __user *dest = fieinfo->fi_extents_start;
43846
43847 /* only count the extents */
43848 if (fieinfo->fi_extents_max == 0) {
43849@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil
43850
43851 fieinfo.fi_flags = fiemap.fm_flags;
43852 fieinfo.fi_extents_max = fiemap.fm_extent_count;
43853- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap));
43854+ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap));
43855
43856 if (fiemap.fm_extent_count != 0 &&
43857 !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start,
43858@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil
43859 error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len);
43860 fiemap.fm_flags = fieinfo.fi_flags;
43861 fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped;
43862- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap)))
43863+ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap)))
43864 error = -EFAULT;
43865
43866 return error;
43867diff -urNp linux-2.6.32.43/fs/jbd/checkpoint.c linux-2.6.32.43/fs/jbd/checkpoint.c
43868--- linux-2.6.32.43/fs/jbd/checkpoint.c 2011-03-27 14:31:47.000000000 -0400
43869+++ linux-2.6.32.43/fs/jbd/checkpoint.c 2011-05-16 21:46:57.000000000 -0400
43870@@ -348,6 +348,8 @@ int log_do_checkpoint(journal_t *journal
43871 tid_t this_tid;
43872 int result;
43873
43874+ pax_track_stack();
43875+
43876 jbd_debug(1, "Start checkpoint\n");
43877
43878 /*
43879diff -urNp linux-2.6.32.43/fs/jffs2/compr_rtime.c linux-2.6.32.43/fs/jffs2/compr_rtime.c
43880--- linux-2.6.32.43/fs/jffs2/compr_rtime.c 2011-03-27 14:31:47.000000000 -0400
43881+++ linux-2.6.32.43/fs/jffs2/compr_rtime.c 2011-05-16 21:46:57.000000000 -0400
43882@@ -37,6 +37,8 @@ static int jffs2_rtime_compress(unsigned
43883 int outpos = 0;
43884 int pos=0;
43885
43886+ pax_track_stack();
43887+
43888 memset(positions,0,sizeof(positions));
43889
43890 while (pos < (*sourcelen) && outpos <= (*dstlen)-2) {
43891@@ -79,6 +81,8 @@ static int jffs2_rtime_decompress(unsign
43892 int outpos = 0;
43893 int pos=0;
43894
43895+ pax_track_stack();
43896+
43897 memset(positions,0,sizeof(positions));
43898
43899 while (outpos<destlen) {
43900diff -urNp linux-2.6.32.43/fs/jffs2/compr_rubin.c linux-2.6.32.43/fs/jffs2/compr_rubin.c
43901--- linux-2.6.32.43/fs/jffs2/compr_rubin.c 2011-03-27 14:31:47.000000000 -0400
43902+++ linux-2.6.32.43/fs/jffs2/compr_rubin.c 2011-05-16 21:46:57.000000000 -0400
43903@@ -314,6 +314,8 @@ static int jffs2_dynrubin_compress(unsig
43904 int ret;
43905 uint32_t mysrclen, mydstlen;
43906
43907+ pax_track_stack();
43908+
43909 mysrclen = *sourcelen;
43910 mydstlen = *dstlen - 8;
43911
43912diff -urNp linux-2.6.32.43/fs/jffs2/erase.c linux-2.6.32.43/fs/jffs2/erase.c
43913--- linux-2.6.32.43/fs/jffs2/erase.c 2011-03-27 14:31:47.000000000 -0400
43914+++ linux-2.6.32.43/fs/jffs2/erase.c 2011-04-17 15:56:46.000000000 -0400
43915@@ -434,7 +434,8 @@ static void jffs2_mark_erased_block(stru
43916 struct jffs2_unknown_node marker = {
43917 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
43918 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43919- .totlen = cpu_to_je32(c->cleanmarker_size)
43920+ .totlen = cpu_to_je32(c->cleanmarker_size),
43921+ .hdr_crc = cpu_to_je32(0)
43922 };
43923
43924 jffs2_prealloc_raw_node_refs(c, jeb, 1);
43925diff -urNp linux-2.6.32.43/fs/jffs2/wbuf.c linux-2.6.32.43/fs/jffs2/wbuf.c
43926--- linux-2.6.32.43/fs/jffs2/wbuf.c 2011-03-27 14:31:47.000000000 -0400
43927+++ linux-2.6.32.43/fs/jffs2/wbuf.c 2011-04-17 15:56:46.000000000 -0400
43928@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o
43929 {
43930 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
43931 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
43932- .totlen = constant_cpu_to_je32(8)
43933+ .totlen = constant_cpu_to_je32(8),
43934+ .hdr_crc = constant_cpu_to_je32(0)
43935 };
43936
43937 /*
43938diff -urNp linux-2.6.32.43/fs/jffs2/xattr.c linux-2.6.32.43/fs/jffs2/xattr.c
43939--- linux-2.6.32.43/fs/jffs2/xattr.c 2011-03-27 14:31:47.000000000 -0400
43940+++ linux-2.6.32.43/fs/jffs2/xattr.c 2011-05-16 21:46:57.000000000 -0400
43941@@ -773,6 +773,8 @@ void jffs2_build_xattr_subsystem(struct
43942
43943 BUG_ON(!(c->flags & JFFS2_SB_FLAG_BUILDING));
43944
43945+ pax_track_stack();
43946+
43947 /* Phase.1 : Merge same xref */
43948 for (i=0; i < XREF_TMPHASH_SIZE; i++)
43949 xref_tmphash[i] = NULL;
43950diff -urNp linux-2.6.32.43/fs/jfs/super.c linux-2.6.32.43/fs/jfs/super.c
43951--- linux-2.6.32.43/fs/jfs/super.c 2011-03-27 14:31:47.000000000 -0400
43952+++ linux-2.6.32.43/fs/jfs/super.c 2011-06-07 18:06:04.000000000 -0400
43953@@ -793,7 +793,7 @@ static int __init init_jfs_fs(void)
43954
43955 jfs_inode_cachep =
43956 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
43957- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
43958+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
43959 init_once);
43960 if (jfs_inode_cachep == NULL)
43961 return -ENOMEM;
43962diff -urNp linux-2.6.32.43/fs/Kconfig.binfmt linux-2.6.32.43/fs/Kconfig.binfmt
43963--- linux-2.6.32.43/fs/Kconfig.binfmt 2011-03-27 14:31:47.000000000 -0400
43964+++ linux-2.6.32.43/fs/Kconfig.binfmt 2011-04-17 15:56:46.000000000 -0400
43965@@ -86,7 +86,7 @@ config HAVE_AOUT
43966
43967 config BINFMT_AOUT
43968 tristate "Kernel support for a.out and ECOFF binaries"
43969- depends on HAVE_AOUT
43970+ depends on HAVE_AOUT && BROKEN
43971 ---help---
43972 A.out (Assembler.OUTput) is a set of formats for libraries and
43973 executables used in the earliest versions of UNIX. Linux used
43974diff -urNp linux-2.6.32.43/fs/libfs.c linux-2.6.32.43/fs/libfs.c
43975--- linux-2.6.32.43/fs/libfs.c 2011-03-27 14:31:47.000000000 -0400
43976+++ linux-2.6.32.43/fs/libfs.c 2011-05-11 18:25:15.000000000 -0400
43977@@ -157,12 +157,20 @@ int dcache_readdir(struct file * filp, v
43978
43979 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
43980 struct dentry *next;
43981+ char d_name[sizeof(next->d_iname)];
43982+ const unsigned char *name;
43983+
43984 next = list_entry(p, struct dentry, d_u.d_child);
43985 if (d_unhashed(next) || !next->d_inode)
43986 continue;
43987
43988 spin_unlock(&dcache_lock);
43989- if (filldir(dirent, next->d_name.name,
43990+ name = next->d_name.name;
43991+ if (name == next->d_iname) {
43992+ memcpy(d_name, name, next->d_name.len);
43993+ name = d_name;
43994+ }
43995+ if (filldir(dirent, name,
43996 next->d_name.len, filp->f_pos,
43997 next->d_inode->i_ino,
43998 dt_type(next->d_inode)) < 0)
43999diff -urNp linux-2.6.32.43/fs/lockd/clntproc.c linux-2.6.32.43/fs/lockd/clntproc.c
44000--- linux-2.6.32.43/fs/lockd/clntproc.c 2011-03-27 14:31:47.000000000 -0400
44001+++ linux-2.6.32.43/fs/lockd/clntproc.c 2011-05-16 21:46:57.000000000 -0400
44002@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt
44003 /*
44004 * Cookie counter for NLM requests
44005 */
44006-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
44007+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
44008
44009 void nlmclnt_next_cookie(struct nlm_cookie *c)
44010 {
44011- u32 cookie = atomic_inc_return(&nlm_cookie);
44012+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
44013
44014 memcpy(c->data, &cookie, 4);
44015 c->len=4;
44016@@ -621,6 +621,8 @@ nlmclnt_reclaim(struct nlm_host *host, s
44017 struct nlm_rqst reqst, *req;
44018 int status;
44019
44020+ pax_track_stack();
44021+
44022 req = &reqst;
44023 memset(req, 0, sizeof(*req));
44024 locks_init_lock(&req->a_args.lock.fl);
44025diff -urNp linux-2.6.32.43/fs/lockd/svc.c linux-2.6.32.43/fs/lockd/svc.c
44026--- linux-2.6.32.43/fs/lockd/svc.c 2011-03-27 14:31:47.000000000 -0400
44027+++ linux-2.6.32.43/fs/lockd/svc.c 2011-04-17 15:56:46.000000000 -0400
44028@@ -43,7 +43,7 @@
44029
44030 static struct svc_program nlmsvc_program;
44031
44032-struct nlmsvc_binding * nlmsvc_ops;
44033+const struct nlmsvc_binding * nlmsvc_ops;
44034 EXPORT_SYMBOL_GPL(nlmsvc_ops);
44035
44036 static DEFINE_MUTEX(nlmsvc_mutex);
44037diff -urNp linux-2.6.32.43/fs/locks.c linux-2.6.32.43/fs/locks.c
44038--- linux-2.6.32.43/fs/locks.c 2011-03-27 14:31:47.000000000 -0400
44039+++ linux-2.6.32.43/fs/locks.c 2011-07-06 19:47:11.000000000 -0400
44040@@ -145,10 +145,28 @@ static LIST_HEAD(blocked_list);
44041
44042 static struct kmem_cache *filelock_cache __read_mostly;
44043
44044+static void locks_init_lock_always(struct file_lock *fl)
44045+{
44046+ fl->fl_next = NULL;
44047+ fl->fl_fasync = NULL;
44048+ fl->fl_owner = NULL;
44049+ fl->fl_pid = 0;
44050+ fl->fl_nspid = NULL;
44051+ fl->fl_file = NULL;
44052+ fl->fl_flags = 0;
44053+ fl->fl_type = 0;
44054+ fl->fl_start = fl->fl_end = 0;
44055+}
44056+
44057 /* Allocate an empty lock structure. */
44058 static struct file_lock *locks_alloc_lock(void)
44059 {
44060- return kmem_cache_alloc(filelock_cache, GFP_KERNEL);
44061+ struct file_lock *fl = kmem_cache_alloc(filelock_cache, GFP_KERNEL);
44062+
44063+ if (fl)
44064+ locks_init_lock_always(fl);
44065+
44066+ return fl;
44067 }
44068
44069 void locks_release_private(struct file_lock *fl)
44070@@ -183,17 +201,9 @@ void locks_init_lock(struct file_lock *f
44071 INIT_LIST_HEAD(&fl->fl_link);
44072 INIT_LIST_HEAD(&fl->fl_block);
44073 init_waitqueue_head(&fl->fl_wait);
44074- fl->fl_next = NULL;
44075- fl->fl_fasync = NULL;
44076- fl->fl_owner = NULL;
44077- fl->fl_pid = 0;
44078- fl->fl_nspid = NULL;
44079- fl->fl_file = NULL;
44080- fl->fl_flags = 0;
44081- fl->fl_type = 0;
44082- fl->fl_start = fl->fl_end = 0;
44083 fl->fl_ops = NULL;
44084 fl->fl_lmops = NULL;
44085+ locks_init_lock_always(fl);
44086 }
44087
44088 EXPORT_SYMBOL(locks_init_lock);
44089@@ -2007,16 +2017,16 @@ void locks_remove_flock(struct file *fil
44090 return;
44091
44092 if (filp->f_op && filp->f_op->flock) {
44093- struct file_lock fl = {
44094+ struct file_lock flock = {
44095 .fl_pid = current->tgid,
44096 .fl_file = filp,
44097 .fl_flags = FL_FLOCK,
44098 .fl_type = F_UNLCK,
44099 .fl_end = OFFSET_MAX,
44100 };
44101- filp->f_op->flock(filp, F_SETLKW, &fl);
44102- if (fl.fl_ops && fl.fl_ops->fl_release_private)
44103- fl.fl_ops->fl_release_private(&fl);
44104+ filp->f_op->flock(filp, F_SETLKW, &flock);
44105+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
44106+ flock.fl_ops->fl_release_private(&flock);
44107 }
44108
44109 lock_kernel();
44110diff -urNp linux-2.6.32.43/fs/mbcache.c linux-2.6.32.43/fs/mbcache.c
44111--- linux-2.6.32.43/fs/mbcache.c 2011-03-27 14:31:47.000000000 -0400
44112+++ linux-2.6.32.43/fs/mbcache.c 2011-08-05 20:33:55.000000000 -0400
44113@@ -266,9 +266,9 @@ mb_cache_create(const char *name, struct
44114 if (!cache)
44115 goto fail;
44116 cache->c_name = name;
44117- cache->c_op.free = NULL;
44118+ *(void **)&cache->c_op.free = NULL;
44119 if (cache_op)
44120- cache->c_op.free = cache_op->free;
44121+ *(void **)&cache->c_op.free = cache_op->free;
44122 atomic_set(&cache->c_entry_count, 0);
44123 cache->c_bucket_bits = bucket_bits;
44124 #ifdef MB_CACHE_INDEXES_COUNT
44125diff -urNp linux-2.6.32.43/fs/namei.c linux-2.6.32.43/fs/namei.c
44126--- linux-2.6.32.43/fs/namei.c 2011-03-27 14:31:47.000000000 -0400
44127+++ linux-2.6.32.43/fs/namei.c 2011-05-16 21:46:57.000000000 -0400
44128@@ -224,14 +224,6 @@ int generic_permission(struct inode *ino
44129 return ret;
44130
44131 /*
44132- * Read/write DACs are always overridable.
44133- * Executable DACs are overridable if at least one exec bit is set.
44134- */
44135- if (!(mask & MAY_EXEC) || execute_ok(inode))
44136- if (capable(CAP_DAC_OVERRIDE))
44137- return 0;
44138-
44139- /*
44140 * Searching includes executable on directories, else just read.
44141 */
44142 mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
44143@@ -239,6 +231,14 @@ int generic_permission(struct inode *ino
44144 if (capable(CAP_DAC_READ_SEARCH))
44145 return 0;
44146
44147+ /*
44148+ * Read/write DACs are always overridable.
44149+ * Executable DACs are overridable if at least one exec bit is set.
44150+ */
44151+ if (!(mask & MAY_EXEC) || execute_ok(inode))
44152+ if (capable(CAP_DAC_OVERRIDE))
44153+ return 0;
44154+
44155 return -EACCES;
44156 }
44157
44158@@ -458,7 +458,8 @@ static int exec_permission_lite(struct i
44159 if (!ret)
44160 goto ok;
44161
44162- if (capable(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH))
44163+ if (capable_nolog(CAP_DAC_OVERRIDE) || capable(CAP_DAC_READ_SEARCH) ||
44164+ capable(CAP_DAC_OVERRIDE))
44165 goto ok;
44166
44167 return ret;
44168@@ -638,7 +639,7 @@ static __always_inline int __do_follow_l
44169 cookie = dentry->d_inode->i_op->follow_link(dentry, nd);
44170 error = PTR_ERR(cookie);
44171 if (!IS_ERR(cookie)) {
44172- char *s = nd_get_link(nd);
44173+ const char *s = nd_get_link(nd);
44174 error = 0;
44175 if (s)
44176 error = __vfs_follow_link(nd, s);
44177@@ -669,6 +670,13 @@ static inline int do_follow_link(struct
44178 err = security_inode_follow_link(path->dentry, nd);
44179 if (err)
44180 goto loop;
44181+
44182+ if (gr_handle_follow_link(path->dentry->d_parent->d_inode,
44183+ path->dentry->d_inode, path->dentry, nd->path.mnt)) {
44184+ err = -EACCES;
44185+ goto loop;
44186+ }
44187+
44188 current->link_count++;
44189 current->total_link_count++;
44190 nd->depth++;
44191@@ -1016,11 +1024,18 @@ return_reval:
44192 break;
44193 }
44194 return_base:
44195+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
44196+ path_put(&nd->path);
44197+ return -ENOENT;
44198+ }
44199 return 0;
44200 out_dput:
44201 path_put_conditional(&next, nd);
44202 break;
44203 }
44204+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt))
44205+ err = -ENOENT;
44206+
44207 path_put(&nd->path);
44208 return_err:
44209 return err;
44210@@ -1091,13 +1106,20 @@ static int do_path_lookup(int dfd, const
44211 int retval = path_init(dfd, name, flags, nd);
44212 if (!retval)
44213 retval = path_walk(name, nd);
44214- if (unlikely(!retval && !audit_dummy_context() && nd->path.dentry &&
44215- nd->path.dentry->d_inode))
44216- audit_inode(name, nd->path.dentry);
44217+
44218+ if (likely(!retval)) {
44219+ if (nd->path.dentry && nd->path.dentry->d_inode) {
44220+ if (*name != '/' && !gr_chroot_fchdir(nd->path.dentry, nd->path.mnt))
44221+ retval = -ENOENT;
44222+ if (!audit_dummy_context())
44223+ audit_inode(name, nd->path.dentry);
44224+ }
44225+ }
44226 if (nd->root.mnt) {
44227 path_put(&nd->root);
44228 nd->root.mnt = NULL;
44229 }
44230+
44231 return retval;
44232 }
44233
44234@@ -1576,6 +1598,20 @@ int may_open(struct path *path, int acc_
44235 if (error)
44236 goto err_out;
44237
44238+
44239+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode)) {
44240+ error = -EPERM;
44241+ goto err_out;
44242+ }
44243+ if (gr_handle_rawio(inode)) {
44244+ error = -EPERM;
44245+ goto err_out;
44246+ }
44247+ if (!gr_acl_handle_open(dentry, path->mnt, flag)) {
44248+ error = -EACCES;
44249+ goto err_out;
44250+ }
44251+
44252 if (flag & O_TRUNC) {
44253 error = get_write_access(inode);
44254 if (error)
44255@@ -1621,12 +1657,19 @@ static int __open_namei_create(struct na
44256 int error;
44257 struct dentry *dir = nd->path.dentry;
44258
44259+ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, flag, mode)) {
44260+ error = -EACCES;
44261+ goto out_unlock;
44262+ }
44263+
44264 if (!IS_POSIXACL(dir->d_inode))
44265 mode &= ~current_umask();
44266 error = security_path_mknod(&nd->path, path->dentry, mode, 0);
44267 if (error)
44268 goto out_unlock;
44269 error = vfs_create(dir->d_inode, path->dentry, mode, nd);
44270+ if (!error)
44271+ gr_handle_create(path->dentry, nd->path.mnt);
44272 out_unlock:
44273 mutex_unlock(&dir->d_inode->i_mutex);
44274 dput(nd->path.dentry);
44275@@ -1709,6 +1752,22 @@ struct file *do_filp_open(int dfd, const
44276 &nd, flag);
44277 if (error)
44278 return ERR_PTR(error);
44279+
44280+ if (gr_handle_rofs_blockwrite(nd.path.dentry, nd.path.mnt, acc_mode)) {
44281+ error = -EPERM;
44282+ goto exit;
44283+ }
44284+
44285+ if (gr_handle_rawio(nd.path.dentry->d_inode)) {
44286+ error = -EPERM;
44287+ goto exit;
44288+ }
44289+
44290+ if (!gr_acl_handle_open(nd.path.dentry, nd.path.mnt, flag)) {
44291+ error = -EACCES;
44292+ goto exit;
44293+ }
44294+
44295 goto ok;
44296 }
44297
44298@@ -1795,6 +1854,14 @@ do_last:
44299 /*
44300 * It already exists.
44301 */
44302+
44303+ /* only check if O_CREAT is specified, all other checks need
44304+ to go into may_open */
44305+ if (gr_handle_fifo(path.dentry, path.mnt, dir, flag, acc_mode)) {
44306+ error = -EACCES;
44307+ goto exit_mutex_unlock;
44308+ }
44309+
44310 mutex_unlock(&dir->d_inode->i_mutex);
44311 audit_inode(pathname, path.dentry);
44312
44313@@ -1887,6 +1954,13 @@ do_link:
44314 error = security_inode_follow_link(path.dentry, &nd);
44315 if (error)
44316 goto exit_dput;
44317+
44318+ if (gr_handle_follow_link(path.dentry->d_parent->d_inode, path.dentry->d_inode,
44319+ path.dentry, nd.path.mnt)) {
44320+ error = -EACCES;
44321+ goto exit_dput;
44322+ }
44323+
44324 error = __do_follow_link(&path, &nd);
44325 if (error) {
44326 /* Does someone understand code flow here? Or it is only
44327@@ -2061,6 +2135,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44328 error = may_mknod(mode);
44329 if (error)
44330 goto out_dput;
44331+
44332+ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) {
44333+ error = -EPERM;
44334+ goto out_dput;
44335+ }
44336+
44337+ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) {
44338+ error = -EACCES;
44339+ goto out_dput;
44340+ }
44341+
44342 error = mnt_want_write(nd.path.mnt);
44343 if (error)
44344 goto out_dput;
44345@@ -2081,6 +2166,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const
44346 }
44347 out_drop_write:
44348 mnt_drop_write(nd.path.mnt);
44349+
44350+ if (!error)
44351+ gr_handle_create(dentry, nd.path.mnt);
44352 out_dput:
44353 dput(dentry);
44354 out_unlock:
44355@@ -2134,6 +2222,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44356 if (IS_ERR(dentry))
44357 goto out_unlock;
44358
44359+ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) {
44360+ error = -EACCES;
44361+ goto out_dput;
44362+ }
44363+
44364 if (!IS_POSIXACL(nd.path.dentry->d_inode))
44365 mode &= ~current_umask();
44366 error = mnt_want_write(nd.path.mnt);
44367@@ -2145,6 +2238,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const
44368 error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode);
44369 out_drop_write:
44370 mnt_drop_write(nd.path.mnt);
44371+
44372+ if (!error)
44373+ gr_handle_create(dentry, nd.path.mnt);
44374+
44375 out_dput:
44376 dput(dentry);
44377 out_unlock:
44378@@ -2226,6 +2323,8 @@ static long do_rmdir(int dfd, const char
44379 char * name;
44380 struct dentry *dentry;
44381 struct nameidata nd;
44382+ ino_t saved_ino = 0;
44383+ dev_t saved_dev = 0;
44384
44385 error = user_path_parent(dfd, pathname, &nd, &name);
44386 if (error)
44387@@ -2250,6 +2349,19 @@ static long do_rmdir(int dfd, const char
44388 error = PTR_ERR(dentry);
44389 if (IS_ERR(dentry))
44390 goto exit2;
44391+
44392+ if (dentry->d_inode != NULL) {
44393+ if (dentry->d_inode->i_nlink <= 1) {
44394+ saved_ino = dentry->d_inode->i_ino;
44395+ saved_dev = gr_get_dev_from_dentry(dentry);
44396+ }
44397+
44398+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
44399+ error = -EACCES;
44400+ goto exit3;
44401+ }
44402+ }
44403+
44404 error = mnt_want_write(nd.path.mnt);
44405 if (error)
44406 goto exit3;
44407@@ -2257,6 +2369,8 @@ static long do_rmdir(int dfd, const char
44408 if (error)
44409 goto exit4;
44410 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
44411+ if (!error && (saved_dev || saved_ino))
44412+ gr_handle_delete(saved_ino, saved_dev);
44413 exit4:
44414 mnt_drop_write(nd.path.mnt);
44415 exit3:
44416@@ -2318,6 +2432,8 @@ static long do_unlinkat(int dfd, const c
44417 struct dentry *dentry;
44418 struct nameidata nd;
44419 struct inode *inode = NULL;
44420+ ino_t saved_ino = 0;
44421+ dev_t saved_dev = 0;
44422
44423 error = user_path_parent(dfd, pathname, &nd, &name);
44424 if (error)
44425@@ -2337,8 +2453,19 @@ static long do_unlinkat(int dfd, const c
44426 if (nd.last.name[nd.last.len])
44427 goto slashes;
44428 inode = dentry->d_inode;
44429- if (inode)
44430+ if (inode) {
44431+ if (inode->i_nlink <= 1) {
44432+ saved_ino = inode->i_ino;
44433+ saved_dev = gr_get_dev_from_dentry(dentry);
44434+ }
44435+
44436 atomic_inc(&inode->i_count);
44437+
44438+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
44439+ error = -EACCES;
44440+ goto exit2;
44441+ }
44442+ }
44443 error = mnt_want_write(nd.path.mnt);
44444 if (error)
44445 goto exit2;
44446@@ -2346,6 +2473,8 @@ static long do_unlinkat(int dfd, const c
44447 if (error)
44448 goto exit3;
44449 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
44450+ if (!error && (saved_ino || saved_dev))
44451+ gr_handle_delete(saved_ino, saved_dev);
44452 exit3:
44453 mnt_drop_write(nd.path.mnt);
44454 exit2:
44455@@ -2424,6 +2553,11 @@ SYSCALL_DEFINE3(symlinkat, const char __
44456 if (IS_ERR(dentry))
44457 goto out_unlock;
44458
44459+ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) {
44460+ error = -EACCES;
44461+ goto out_dput;
44462+ }
44463+
44464 error = mnt_want_write(nd.path.mnt);
44465 if (error)
44466 goto out_dput;
44467@@ -2431,6 +2565,8 @@ SYSCALL_DEFINE3(symlinkat, const char __
44468 if (error)
44469 goto out_drop_write;
44470 error = vfs_symlink(nd.path.dentry->d_inode, dentry, from);
44471+ if (!error)
44472+ gr_handle_create(dentry, nd.path.mnt);
44473 out_drop_write:
44474 mnt_drop_write(nd.path.mnt);
44475 out_dput:
44476@@ -2524,6 +2660,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44477 error = PTR_ERR(new_dentry);
44478 if (IS_ERR(new_dentry))
44479 goto out_unlock;
44480+
44481+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
44482+ old_path.dentry->d_inode,
44483+ old_path.dentry->d_inode->i_mode, to)) {
44484+ error = -EACCES;
44485+ goto out_dput;
44486+ }
44487+
44488+ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt,
44489+ old_path.dentry, old_path.mnt, to)) {
44490+ error = -EACCES;
44491+ goto out_dput;
44492+ }
44493+
44494 error = mnt_want_write(nd.path.mnt);
44495 if (error)
44496 goto out_dput;
44497@@ -2531,6 +2681,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con
44498 if (error)
44499 goto out_drop_write;
44500 error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry);
44501+ if (!error)
44502+ gr_handle_create(new_dentry, nd.path.mnt);
44503 out_drop_write:
44504 mnt_drop_write(nd.path.mnt);
44505 out_dput:
44506@@ -2708,6 +2860,8 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44507 char *to;
44508 int error;
44509
44510+ pax_track_stack();
44511+
44512 error = user_path_parent(olddfd, oldname, &oldnd, &from);
44513 if (error)
44514 goto exit;
44515@@ -2764,6 +2918,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44516 if (new_dentry == trap)
44517 goto exit5;
44518
44519+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
44520+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
44521+ to);
44522+ if (error)
44523+ goto exit5;
44524+
44525 error = mnt_want_write(oldnd.path.mnt);
44526 if (error)
44527 goto exit5;
44528@@ -2773,6 +2933,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c
44529 goto exit6;
44530 error = vfs_rename(old_dir->d_inode, old_dentry,
44531 new_dir->d_inode, new_dentry);
44532+ if (!error)
44533+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
44534+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
44535 exit6:
44536 mnt_drop_write(oldnd.path.mnt);
44537 exit5:
44538@@ -2798,6 +2961,8 @@ SYSCALL_DEFINE2(rename, const char __use
44539
44540 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
44541 {
44542+ char tmpbuf[64];
44543+ const char *newlink;
44544 int len;
44545
44546 len = PTR_ERR(link);
44547@@ -2807,7 +2972,14 @@ int vfs_readlink(struct dentry *dentry,
44548 len = strlen(link);
44549 if (len > (unsigned) buflen)
44550 len = buflen;
44551- if (copy_to_user(buffer, link, len))
44552+
44553+ if (len < sizeof(tmpbuf)) {
44554+ memcpy(tmpbuf, link, len);
44555+ newlink = tmpbuf;
44556+ } else
44557+ newlink = link;
44558+
44559+ if (copy_to_user(buffer, newlink, len))
44560 len = -EFAULT;
44561 out:
44562 return len;
44563diff -urNp linux-2.6.32.43/fs/namespace.c linux-2.6.32.43/fs/namespace.c
44564--- linux-2.6.32.43/fs/namespace.c 2011-03-27 14:31:47.000000000 -0400
44565+++ linux-2.6.32.43/fs/namespace.c 2011-04-17 15:56:46.000000000 -0400
44566@@ -1083,6 +1083,9 @@ static int do_umount(struct vfsmount *mn
44567 if (!(sb->s_flags & MS_RDONLY))
44568 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
44569 up_write(&sb->s_umount);
44570+
44571+ gr_log_remount(mnt->mnt_devname, retval);
44572+
44573 return retval;
44574 }
44575
44576@@ -1104,6 +1107,9 @@ static int do_umount(struct vfsmount *mn
44577 security_sb_umount_busy(mnt);
44578 up_write(&namespace_sem);
44579 release_mounts(&umount_list);
44580+
44581+ gr_log_unmount(mnt->mnt_devname, retval);
44582+
44583 return retval;
44584 }
44585
44586@@ -1962,6 +1968,16 @@ long do_mount(char *dev_name, char *dir_
44587 if (retval)
44588 goto dput_out;
44589
44590+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
44591+ retval = -EPERM;
44592+ goto dput_out;
44593+ }
44594+
44595+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
44596+ retval = -EPERM;
44597+ goto dput_out;
44598+ }
44599+
44600 if (flags & MS_REMOUNT)
44601 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
44602 data_page);
44603@@ -1976,6 +1992,9 @@ long do_mount(char *dev_name, char *dir_
44604 dev_name, data_page);
44605 dput_out:
44606 path_put(&path);
44607+
44608+ gr_log_mount(dev_name, dir_name, retval);
44609+
44610 return retval;
44611 }
44612
44613@@ -2182,6 +2201,12 @@ SYSCALL_DEFINE2(pivot_root, const char _
44614 goto out1;
44615 }
44616
44617+ if (gr_handle_chroot_pivot()) {
44618+ error = -EPERM;
44619+ path_put(&old);
44620+ goto out1;
44621+ }
44622+
44623 read_lock(&current->fs->lock);
44624 root = current->fs->root;
44625 path_get(&current->fs->root);
44626diff -urNp linux-2.6.32.43/fs/ncpfs/dir.c linux-2.6.32.43/fs/ncpfs/dir.c
44627--- linux-2.6.32.43/fs/ncpfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44628+++ linux-2.6.32.43/fs/ncpfs/dir.c 2011-05-16 21:46:57.000000000 -0400
44629@@ -275,6 +275,8 @@ __ncp_lookup_validate(struct dentry *den
44630 int res, val = 0, len;
44631 __u8 __name[NCP_MAXPATHLEN + 1];
44632
44633+ pax_track_stack();
44634+
44635 parent = dget_parent(dentry);
44636 dir = parent->d_inode;
44637
44638@@ -799,6 +801,8 @@ static struct dentry *ncp_lookup(struct
44639 int error, res, len;
44640 __u8 __name[NCP_MAXPATHLEN + 1];
44641
44642+ pax_track_stack();
44643+
44644 lock_kernel();
44645 error = -EIO;
44646 if (!ncp_conn_valid(server))
44647@@ -883,10 +887,12 @@ int ncp_create_new(struct inode *dir, st
44648 int error, result, len;
44649 int opmode;
44650 __u8 __name[NCP_MAXPATHLEN + 1];
44651-
44652+
44653 PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
44654 dentry->d_parent->d_name.name, dentry->d_name.name, mode);
44655
44656+ pax_track_stack();
44657+
44658 error = -EIO;
44659 lock_kernel();
44660 if (!ncp_conn_valid(server))
44661@@ -952,6 +958,8 @@ static int ncp_mkdir(struct inode *dir,
44662 int error, len;
44663 __u8 __name[NCP_MAXPATHLEN + 1];
44664
44665+ pax_track_stack();
44666+
44667 DPRINTK("ncp_mkdir: making %s/%s\n",
44668 dentry->d_parent->d_name.name, dentry->d_name.name);
44669
44670@@ -960,6 +968,8 @@ static int ncp_mkdir(struct inode *dir,
44671 if (!ncp_conn_valid(server))
44672 goto out;
44673
44674+ pax_track_stack();
44675+
44676 ncp_age_dentry(server, dentry);
44677 len = sizeof(__name);
44678 error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
44679@@ -1114,6 +1124,8 @@ static int ncp_rename(struct inode *old_
44680 int old_len, new_len;
44681 __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
44682
44683+ pax_track_stack();
44684+
44685 DPRINTK("ncp_rename: %s/%s to %s/%s\n",
44686 old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
44687 new_dentry->d_parent->d_name.name, new_dentry->d_name.name);
44688diff -urNp linux-2.6.32.43/fs/ncpfs/inode.c linux-2.6.32.43/fs/ncpfs/inode.c
44689--- linux-2.6.32.43/fs/ncpfs/inode.c 2011-03-27 14:31:47.000000000 -0400
44690+++ linux-2.6.32.43/fs/ncpfs/inode.c 2011-05-16 21:46:57.000000000 -0400
44691@@ -445,6 +445,8 @@ static int ncp_fill_super(struct super_b
44692 #endif
44693 struct ncp_entry_info finfo;
44694
44695+ pax_track_stack();
44696+
44697 data.wdog_pid = NULL;
44698 server = kzalloc(sizeof(struct ncp_server), GFP_KERNEL);
44699 if (!server)
44700diff -urNp linux-2.6.32.43/fs/nfs/inode.c linux-2.6.32.43/fs/nfs/inode.c
44701--- linux-2.6.32.43/fs/nfs/inode.c 2011-05-10 22:12:01.000000000 -0400
44702+++ linux-2.6.32.43/fs/nfs/inode.c 2011-07-06 19:53:33.000000000 -0400
44703@@ -156,7 +156,7 @@ static void nfs_zap_caches_locked(struct
44704 nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
44705 nfsi->attrtimeo_timestamp = jiffies;
44706
44707- memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
44708+ memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_I(inode)->cookieverf));
44709 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
44710 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
44711 else
44712@@ -973,16 +973,16 @@ static int nfs_size_need_update(const st
44713 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
44714 }
44715
44716-static atomic_long_t nfs_attr_generation_counter;
44717+static atomic_long_unchecked_t nfs_attr_generation_counter;
44718
44719 static unsigned long nfs_read_attr_generation_counter(void)
44720 {
44721- return atomic_long_read(&nfs_attr_generation_counter);
44722+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
44723 }
44724
44725 unsigned long nfs_inc_attr_generation_counter(void)
44726 {
44727- return atomic_long_inc_return(&nfs_attr_generation_counter);
44728+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
44729 }
44730
44731 void nfs_fattr_init(struct nfs_fattr *fattr)
44732diff -urNp linux-2.6.32.43/fs/nfsd/lockd.c linux-2.6.32.43/fs/nfsd/lockd.c
44733--- linux-2.6.32.43/fs/nfsd/lockd.c 2011-04-17 17:00:52.000000000 -0400
44734+++ linux-2.6.32.43/fs/nfsd/lockd.c 2011-04-17 17:03:15.000000000 -0400
44735@@ -66,7 +66,7 @@ nlm_fclose(struct file *filp)
44736 fput(filp);
44737 }
44738
44739-static struct nlmsvc_binding nfsd_nlm_ops = {
44740+static const struct nlmsvc_binding nfsd_nlm_ops = {
44741 .fopen = nlm_fopen, /* open file for locking */
44742 .fclose = nlm_fclose, /* close file */
44743 };
44744diff -urNp linux-2.6.32.43/fs/nfsd/nfs4state.c linux-2.6.32.43/fs/nfsd/nfs4state.c
44745--- linux-2.6.32.43/fs/nfsd/nfs4state.c 2011-03-27 14:31:47.000000000 -0400
44746+++ linux-2.6.32.43/fs/nfsd/nfs4state.c 2011-05-16 21:46:57.000000000 -0400
44747@@ -3457,6 +3457,8 @@ nfsd4_lock(struct svc_rqst *rqstp, struc
44748 unsigned int cmd;
44749 int err;
44750
44751+ pax_track_stack();
44752+
44753 dprintk("NFSD: nfsd4_lock: start=%Ld length=%Ld\n",
44754 (long long) lock->lk_offset,
44755 (long long) lock->lk_length);
44756diff -urNp linux-2.6.32.43/fs/nfsd/nfs4xdr.c linux-2.6.32.43/fs/nfsd/nfs4xdr.c
44757--- linux-2.6.32.43/fs/nfsd/nfs4xdr.c 2011-03-27 14:31:47.000000000 -0400
44758+++ linux-2.6.32.43/fs/nfsd/nfs4xdr.c 2011-05-16 21:46:57.000000000 -0400
44759@@ -1751,6 +1751,8 @@ nfsd4_encode_fattr(struct svc_fh *fhp, s
44760 struct nfsd4_compoundres *resp = rqstp->rq_resp;
44761 u32 minorversion = resp->cstate.minorversion;
44762
44763+ pax_track_stack();
44764+
44765 BUG_ON(bmval1 & NFSD_WRITEONLY_ATTRS_WORD1);
44766 BUG_ON(bmval0 & ~nfsd_suppattrs0(minorversion));
44767 BUG_ON(bmval1 & ~nfsd_suppattrs1(minorversion));
44768diff -urNp linux-2.6.32.43/fs/nfsd/vfs.c linux-2.6.32.43/fs/nfsd/vfs.c
44769--- linux-2.6.32.43/fs/nfsd/vfs.c 2011-05-10 22:12:01.000000000 -0400
44770+++ linux-2.6.32.43/fs/nfsd/vfs.c 2011-05-10 22:12:33.000000000 -0400
44771@@ -937,7 +937,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st
44772 } else {
44773 oldfs = get_fs();
44774 set_fs(KERNEL_DS);
44775- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
44776+ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset);
44777 set_fs(oldfs);
44778 }
44779
44780@@ -1060,7 +1060,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s
44781
44782 /* Write the data. */
44783 oldfs = get_fs(); set_fs(KERNEL_DS);
44784- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset);
44785+ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset);
44786 set_fs(oldfs);
44787 if (host_err < 0)
44788 goto out_nfserr;
44789@@ -1542,7 +1542,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st
44790 */
44791
44792 oldfs = get_fs(); set_fs(KERNEL_DS);
44793- host_err = inode->i_op->readlink(dentry, buf, *lenp);
44794+ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp);
44795 set_fs(oldfs);
44796
44797 if (host_err < 0)
44798diff -urNp linux-2.6.32.43/fs/nilfs2/ioctl.c linux-2.6.32.43/fs/nilfs2/ioctl.c
44799--- linux-2.6.32.43/fs/nilfs2/ioctl.c 2011-03-27 14:31:47.000000000 -0400
44800+++ linux-2.6.32.43/fs/nilfs2/ioctl.c 2011-05-04 17:56:28.000000000 -0400
44801@@ -480,7 +480,7 @@ static int nilfs_ioctl_clean_segments(st
44802 unsigned int cmd, void __user *argp)
44803 {
44804 struct nilfs_argv argv[5];
44805- const static size_t argsz[5] = {
44806+ static const size_t argsz[5] = {
44807 sizeof(struct nilfs_vdesc),
44808 sizeof(struct nilfs_period),
44809 sizeof(__u64),
44810diff -urNp linux-2.6.32.43/fs/notify/dnotify/dnotify.c linux-2.6.32.43/fs/notify/dnotify/dnotify.c
44811--- linux-2.6.32.43/fs/notify/dnotify/dnotify.c 2011-03-27 14:31:47.000000000 -0400
44812+++ linux-2.6.32.43/fs/notify/dnotify/dnotify.c 2011-04-17 15:56:46.000000000 -0400
44813@@ -173,7 +173,7 @@ static void dnotify_free_mark(struct fsn
44814 kmem_cache_free(dnotify_mark_entry_cache, dnentry);
44815 }
44816
44817-static struct fsnotify_ops dnotify_fsnotify_ops = {
44818+static const struct fsnotify_ops dnotify_fsnotify_ops = {
44819 .handle_event = dnotify_handle_event,
44820 .should_send_event = dnotify_should_send_event,
44821 .free_group_priv = NULL,
44822diff -urNp linux-2.6.32.43/fs/notify/notification.c linux-2.6.32.43/fs/notify/notification.c
44823--- linux-2.6.32.43/fs/notify/notification.c 2011-03-27 14:31:47.000000000 -0400
44824+++ linux-2.6.32.43/fs/notify/notification.c 2011-05-04 17:56:28.000000000 -0400
44825@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event
44826 * get set to 0 so it will never get 'freed'
44827 */
44828 static struct fsnotify_event q_overflow_event;
44829-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44830+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
44831
44832 /**
44833 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
44834@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = A
44835 */
44836 u32 fsnotify_get_cookie(void)
44837 {
44838- return atomic_inc_return(&fsnotify_sync_cookie);
44839+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
44840 }
44841 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
44842
44843diff -urNp linux-2.6.32.43/fs/ntfs/dir.c linux-2.6.32.43/fs/ntfs/dir.c
44844--- linux-2.6.32.43/fs/ntfs/dir.c 2011-03-27 14:31:47.000000000 -0400
44845+++ linux-2.6.32.43/fs/ntfs/dir.c 2011-04-17 15:56:46.000000000 -0400
44846@@ -1328,7 +1328,7 @@ find_next_index_buffer:
44847 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
44848 ~(s64)(ndir->itype.index.block_size - 1)));
44849 /* Bounds checks. */
44850- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44851+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
44852 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
44853 "inode 0x%lx or driver bug.", vdir->i_ino);
44854 goto err_out;
44855diff -urNp linux-2.6.32.43/fs/ntfs/file.c linux-2.6.32.43/fs/ntfs/file.c
44856--- linux-2.6.32.43/fs/ntfs/file.c 2011-03-27 14:31:47.000000000 -0400
44857+++ linux-2.6.32.43/fs/ntfs/file.c 2011-04-17 15:56:46.000000000 -0400
44858@@ -2243,6 +2243,6 @@ const struct inode_operations ntfs_file_
44859 #endif /* NTFS_RW */
44860 };
44861
44862-const struct file_operations ntfs_empty_file_ops = {};
44863+const struct file_operations ntfs_empty_file_ops __read_only;
44864
44865-const struct inode_operations ntfs_empty_inode_ops = {};
44866+const struct inode_operations ntfs_empty_inode_ops __read_only;
44867diff -urNp linux-2.6.32.43/fs/ocfs2/cluster/masklog.c linux-2.6.32.43/fs/ocfs2/cluster/masklog.c
44868--- linux-2.6.32.43/fs/ocfs2/cluster/masklog.c 2011-03-27 14:31:47.000000000 -0400
44869+++ linux-2.6.32.43/fs/ocfs2/cluster/masklog.c 2011-04-17 15:56:46.000000000 -0400
44870@@ -135,7 +135,7 @@ static ssize_t mlog_store(struct kobject
44871 return mlog_mask_store(mlog_attr->mask, buf, count);
44872 }
44873
44874-static struct sysfs_ops mlog_attr_ops = {
44875+static const struct sysfs_ops mlog_attr_ops = {
44876 .show = mlog_show,
44877 .store = mlog_store,
44878 };
44879diff -urNp linux-2.6.32.43/fs/ocfs2/localalloc.c linux-2.6.32.43/fs/ocfs2/localalloc.c
44880--- linux-2.6.32.43/fs/ocfs2/localalloc.c 2011-03-27 14:31:47.000000000 -0400
44881+++ linux-2.6.32.43/fs/ocfs2/localalloc.c 2011-04-17 15:56:46.000000000 -0400
44882@@ -1188,7 +1188,7 @@ static int ocfs2_local_alloc_slide_windo
44883 goto bail;
44884 }
44885
44886- atomic_inc(&osb->alloc_stats.moves);
44887+ atomic_inc_unchecked(&osb->alloc_stats.moves);
44888
44889 status = 0;
44890 bail:
44891diff -urNp linux-2.6.32.43/fs/ocfs2/namei.c linux-2.6.32.43/fs/ocfs2/namei.c
44892--- linux-2.6.32.43/fs/ocfs2/namei.c 2011-03-27 14:31:47.000000000 -0400
44893+++ linux-2.6.32.43/fs/ocfs2/namei.c 2011-05-16 21:46:57.000000000 -0400
44894@@ -1043,6 +1043,8 @@ static int ocfs2_rename(struct inode *ol
44895 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
44896 struct ocfs2_dir_lookup_result target_insert = { NULL, };
44897
44898+ pax_track_stack();
44899+
44900 /* At some point it might be nice to break this function up a
44901 * bit. */
44902
44903diff -urNp linux-2.6.32.43/fs/ocfs2/ocfs2.h linux-2.6.32.43/fs/ocfs2/ocfs2.h
44904--- linux-2.6.32.43/fs/ocfs2/ocfs2.h 2011-03-27 14:31:47.000000000 -0400
44905+++ linux-2.6.32.43/fs/ocfs2/ocfs2.h 2011-04-17 15:56:46.000000000 -0400
44906@@ -217,11 +217,11 @@ enum ocfs2_vol_state
44907
44908 struct ocfs2_alloc_stats
44909 {
44910- atomic_t moves;
44911- atomic_t local_data;
44912- atomic_t bitmap_data;
44913- atomic_t bg_allocs;
44914- atomic_t bg_extends;
44915+ atomic_unchecked_t moves;
44916+ atomic_unchecked_t local_data;
44917+ atomic_unchecked_t bitmap_data;
44918+ atomic_unchecked_t bg_allocs;
44919+ atomic_unchecked_t bg_extends;
44920 };
44921
44922 enum ocfs2_local_alloc_state
44923diff -urNp linux-2.6.32.43/fs/ocfs2/suballoc.c linux-2.6.32.43/fs/ocfs2/suballoc.c
44924--- linux-2.6.32.43/fs/ocfs2/suballoc.c 2011-03-27 14:31:47.000000000 -0400
44925+++ linux-2.6.32.43/fs/ocfs2/suballoc.c 2011-04-17 15:56:46.000000000 -0400
44926@@ -623,7 +623,7 @@ static int ocfs2_reserve_suballoc_bits(s
44927 mlog_errno(status);
44928 goto bail;
44929 }
44930- atomic_inc(&osb->alloc_stats.bg_extends);
44931+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
44932
44933 /* You should never ask for this much metadata */
44934 BUG_ON(bits_wanted >
44935@@ -1654,7 +1654,7 @@ int ocfs2_claim_metadata(struct ocfs2_su
44936 mlog_errno(status);
44937 goto bail;
44938 }
44939- atomic_inc(&osb->alloc_stats.bg_allocs);
44940+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44941
44942 *blkno_start = bg_blkno + (u64) *suballoc_bit_start;
44943 ac->ac_bits_given += (*num_bits);
44944@@ -1728,7 +1728,7 @@ int ocfs2_claim_new_inode(struct ocfs2_s
44945 mlog_errno(status);
44946 goto bail;
44947 }
44948- atomic_inc(&osb->alloc_stats.bg_allocs);
44949+ atomic_inc_unchecked(&osb->alloc_stats.bg_allocs);
44950
44951 BUG_ON(num_bits != 1);
44952
44953@@ -1830,7 +1830,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44954 cluster_start,
44955 num_clusters);
44956 if (!status)
44957- atomic_inc(&osb->alloc_stats.local_data);
44958+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
44959 } else {
44960 if (min_clusters > (osb->bitmap_cpg - 1)) {
44961 /* The only paths asking for contiguousness
44962@@ -1858,7 +1858,7 @@ int __ocfs2_claim_clusters(struct ocfs2_
44963 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
44964 bg_blkno,
44965 bg_bit_off);
44966- atomic_inc(&osb->alloc_stats.bitmap_data);
44967+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
44968 }
44969 }
44970 if (status < 0) {
44971diff -urNp linux-2.6.32.43/fs/ocfs2/super.c linux-2.6.32.43/fs/ocfs2/super.c
44972--- linux-2.6.32.43/fs/ocfs2/super.c 2011-03-27 14:31:47.000000000 -0400
44973+++ linux-2.6.32.43/fs/ocfs2/super.c 2011-04-17 15:56:46.000000000 -0400
44974@@ -284,11 +284,11 @@ static int ocfs2_osb_dump(struct ocfs2_s
44975 "%10s => GlobalAllocs: %d LocalAllocs: %d "
44976 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
44977 "Stats",
44978- atomic_read(&osb->alloc_stats.bitmap_data),
44979- atomic_read(&osb->alloc_stats.local_data),
44980- atomic_read(&osb->alloc_stats.bg_allocs),
44981- atomic_read(&osb->alloc_stats.moves),
44982- atomic_read(&osb->alloc_stats.bg_extends));
44983+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
44984+ atomic_read_unchecked(&osb->alloc_stats.local_data),
44985+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
44986+ atomic_read_unchecked(&osb->alloc_stats.moves),
44987+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
44988
44989 out += snprintf(buf + out, len - out,
44990 "%10s => State: %u Descriptor: %llu Size: %u bits "
44991@@ -2002,11 +2002,11 @@ static int ocfs2_initialize_super(struct
44992 spin_lock_init(&osb->osb_xattr_lock);
44993 ocfs2_init_inode_steal_slot(osb);
44994
44995- atomic_set(&osb->alloc_stats.moves, 0);
44996- atomic_set(&osb->alloc_stats.local_data, 0);
44997- atomic_set(&osb->alloc_stats.bitmap_data, 0);
44998- atomic_set(&osb->alloc_stats.bg_allocs, 0);
44999- atomic_set(&osb->alloc_stats.bg_extends, 0);
45000+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
45001+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
45002+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
45003+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
45004+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
45005
45006 /* Copy the blockcheck stats from the superblock probe */
45007 osb->osb_ecc_stats = *stats;
45008diff -urNp linux-2.6.32.43/fs/open.c linux-2.6.32.43/fs/open.c
45009--- linux-2.6.32.43/fs/open.c 2011-03-27 14:31:47.000000000 -0400
45010+++ linux-2.6.32.43/fs/open.c 2011-04-17 15:56:46.000000000 -0400
45011@@ -275,6 +275,10 @@ static long do_sys_truncate(const char _
45012 error = locks_verify_truncate(inode, NULL, length);
45013 if (!error)
45014 error = security_path_truncate(&path, length, 0);
45015+
45016+ if (!error && !gr_acl_handle_truncate(path.dentry, path.mnt))
45017+ error = -EACCES;
45018+
45019 if (!error) {
45020 vfs_dq_init(inode);
45021 error = do_truncate(path.dentry, length, 0, NULL);
45022@@ -511,6 +515,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con
45023 if (__mnt_is_readonly(path.mnt))
45024 res = -EROFS;
45025
45026+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
45027+ res = -EACCES;
45028+
45029 out_path_release:
45030 path_put(&path);
45031 out:
45032@@ -537,6 +544,8 @@ SYSCALL_DEFINE1(chdir, const char __user
45033 if (error)
45034 goto dput_and_out;
45035
45036+ gr_log_chdir(path.dentry, path.mnt);
45037+
45038 set_fs_pwd(current->fs, &path);
45039
45040 dput_and_out:
45041@@ -563,6 +572,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd
45042 goto out_putf;
45043
45044 error = inode_permission(inode, MAY_EXEC | MAY_ACCESS);
45045+
45046+ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt))
45047+ error = -EPERM;
45048+
45049+ if (!error)
45050+ gr_log_chdir(file->f_path.dentry, file->f_path.mnt);
45051+
45052 if (!error)
45053 set_fs_pwd(current->fs, &file->f_path);
45054 out_putf:
45055@@ -588,7 +604,18 @@ SYSCALL_DEFINE1(chroot, const char __use
45056 if (!capable(CAP_SYS_CHROOT))
45057 goto dput_and_out;
45058
45059+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
45060+ goto dput_and_out;
45061+
45062+ if (gr_handle_chroot_caps(&path)) {
45063+ error = -ENOMEM;
45064+ goto dput_and_out;
45065+ }
45066+
45067 set_fs_root(current->fs, &path);
45068+
45069+ gr_handle_chroot_chdir(&path);
45070+
45071 error = 0;
45072 dput_and_out:
45073 path_put(&path);
45074@@ -616,12 +643,27 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd
45075 err = mnt_want_write_file(file);
45076 if (err)
45077 goto out_putf;
45078+
45079 mutex_lock(&inode->i_mutex);
45080+
45081+ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) {
45082+ err = -EACCES;
45083+ goto out_unlock;
45084+ }
45085+
45086 if (mode == (mode_t) -1)
45087 mode = inode->i_mode;
45088+
45089+ if (gr_handle_chroot_chmod(dentry, file->f_path.mnt, mode)) {
45090+ err = -EPERM;
45091+ goto out_unlock;
45092+ }
45093+
45094 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
45095 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
45096 err = notify_change(dentry, &newattrs);
45097+
45098+out_unlock:
45099 mutex_unlock(&inode->i_mutex);
45100 mnt_drop_write(file->f_path.mnt);
45101 out_putf:
45102@@ -645,12 +687,27 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons
45103 error = mnt_want_write(path.mnt);
45104 if (error)
45105 goto dput_and_out;
45106+
45107 mutex_lock(&inode->i_mutex);
45108+
45109+ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) {
45110+ error = -EACCES;
45111+ goto out_unlock;
45112+ }
45113+
45114 if (mode == (mode_t) -1)
45115 mode = inode->i_mode;
45116+
45117+ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) {
45118+ error = -EACCES;
45119+ goto out_unlock;
45120+ }
45121+
45122 newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
45123 newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
45124 error = notify_change(path.dentry, &newattrs);
45125+
45126+out_unlock:
45127 mutex_unlock(&inode->i_mutex);
45128 mnt_drop_write(path.mnt);
45129 dput_and_out:
45130@@ -664,12 +721,15 @@ SYSCALL_DEFINE2(chmod, const char __user
45131 return sys_fchmodat(AT_FDCWD, filename, mode);
45132 }
45133
45134-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
45135+static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
45136 {
45137 struct inode *inode = dentry->d_inode;
45138 int error;
45139 struct iattr newattrs;
45140
45141+ if (!gr_acl_handle_chown(dentry, mnt))
45142+ return -EACCES;
45143+
45144 newattrs.ia_valid = ATTR_CTIME;
45145 if (user != (uid_t) -1) {
45146 newattrs.ia_valid |= ATTR_UID;
45147@@ -700,7 +760,7 @@ SYSCALL_DEFINE3(chown, const char __user
45148 error = mnt_want_write(path.mnt);
45149 if (error)
45150 goto out_release;
45151- error = chown_common(path.dentry, user, group);
45152+ error = chown_common(path.dentry, user, group, path.mnt);
45153 mnt_drop_write(path.mnt);
45154 out_release:
45155 path_put(&path);
45156@@ -725,7 +785,7 @@ SYSCALL_DEFINE5(fchownat, int, dfd, cons
45157 error = mnt_want_write(path.mnt);
45158 if (error)
45159 goto out_release;
45160- error = chown_common(path.dentry, user, group);
45161+ error = chown_common(path.dentry, user, group, path.mnt);
45162 mnt_drop_write(path.mnt);
45163 out_release:
45164 path_put(&path);
45165@@ -744,7 +804,7 @@ SYSCALL_DEFINE3(lchown, const char __use
45166 error = mnt_want_write(path.mnt);
45167 if (error)
45168 goto out_release;
45169- error = chown_common(path.dentry, user, group);
45170+ error = chown_common(path.dentry, user, group, path.mnt);
45171 mnt_drop_write(path.mnt);
45172 out_release:
45173 path_put(&path);
45174@@ -767,7 +827,7 @@ SYSCALL_DEFINE3(fchown, unsigned int, fd
45175 goto out_fput;
45176 dentry = file->f_path.dentry;
45177 audit_inode(NULL, dentry);
45178- error = chown_common(dentry, user, group);
45179+ error = chown_common(dentry, user, group, file->f_path.mnt);
45180 mnt_drop_write(file->f_path.mnt);
45181 out_fput:
45182 fput(file);
45183@@ -1036,7 +1096,10 @@ long do_sys_open(int dfd, const char __u
45184 if (!IS_ERR(tmp)) {
45185 fd = get_unused_fd_flags(flags);
45186 if (fd >= 0) {
45187- struct file *f = do_filp_open(dfd, tmp, flags, mode, 0);
45188+ struct file *f;
45189+ /* don't allow to be set by userland */
45190+ flags &= ~FMODE_GREXEC;
45191+ f = do_filp_open(dfd, tmp, flags, mode, 0);
45192 if (IS_ERR(f)) {
45193 put_unused_fd(fd);
45194 fd = PTR_ERR(f);
45195diff -urNp linux-2.6.32.43/fs/partitions/ldm.c linux-2.6.32.43/fs/partitions/ldm.c
45196--- linux-2.6.32.43/fs/partitions/ldm.c 2011-06-25 12:55:34.000000000 -0400
45197+++ linux-2.6.32.43/fs/partitions/ldm.c 2011-06-25 12:56:37.000000000 -0400
45198@@ -1311,6 +1311,7 @@ static bool ldm_frag_add (const u8 *data
45199 ldm_error ("A VBLK claims to have %d parts.", num);
45200 return false;
45201 }
45202+
45203 if (rec >= num) {
45204 ldm_error("REC value (%d) exceeds NUM value (%d)", rec, num);
45205 return false;
45206@@ -1322,7 +1323,7 @@ static bool ldm_frag_add (const u8 *data
45207 goto found;
45208 }
45209
45210- f = kmalloc (sizeof (*f) + size*num, GFP_KERNEL);
45211+ f = kmalloc (size*num + sizeof (*f), GFP_KERNEL);
45212 if (!f) {
45213 ldm_crit ("Out of memory.");
45214 return false;
45215diff -urNp linux-2.6.32.43/fs/partitions/mac.c linux-2.6.32.43/fs/partitions/mac.c
45216--- linux-2.6.32.43/fs/partitions/mac.c 2011-03-27 14:31:47.000000000 -0400
45217+++ linux-2.6.32.43/fs/partitions/mac.c 2011-04-17 15:56:46.000000000 -0400
45218@@ -59,11 +59,11 @@ int mac_partition(struct parsed_partitio
45219 return 0; /* not a MacOS disk */
45220 }
45221 blocks_in_map = be32_to_cpu(part->map_count);
45222+ printk(" [mac]");
45223 if (blocks_in_map < 0 || blocks_in_map >= DISK_MAX_PARTS) {
45224 put_dev_sector(sect);
45225 return 0;
45226 }
45227- printk(" [mac]");
45228 for (slot = 1; slot <= blocks_in_map; ++slot) {
45229 int pos = slot * secsize;
45230 put_dev_sector(sect);
45231diff -urNp linux-2.6.32.43/fs/pipe.c linux-2.6.32.43/fs/pipe.c
45232--- linux-2.6.32.43/fs/pipe.c 2011-03-27 14:31:47.000000000 -0400
45233+++ linux-2.6.32.43/fs/pipe.c 2011-04-23 13:37:17.000000000 -0400
45234@@ -401,9 +401,9 @@ redo:
45235 }
45236 if (bufs) /* More to do? */
45237 continue;
45238- if (!pipe->writers)
45239+ if (!atomic_read(&pipe->writers))
45240 break;
45241- if (!pipe->waiting_writers) {
45242+ if (!atomic_read(&pipe->waiting_writers)) {
45243 /* syscall merging: Usually we must not sleep
45244 * if O_NONBLOCK is set, or if we got some data.
45245 * But if a writer sleeps in kernel space, then
45246@@ -462,7 +462,7 @@ pipe_write(struct kiocb *iocb, const str
45247 mutex_lock(&inode->i_mutex);
45248 pipe = inode->i_pipe;
45249
45250- if (!pipe->readers) {
45251+ if (!atomic_read(&pipe->readers)) {
45252 send_sig(SIGPIPE, current, 0);
45253 ret = -EPIPE;
45254 goto out;
45255@@ -511,7 +511,7 @@ redo1:
45256 for (;;) {
45257 int bufs;
45258
45259- if (!pipe->readers) {
45260+ if (!atomic_read(&pipe->readers)) {
45261 send_sig(SIGPIPE, current, 0);
45262 if (!ret)
45263 ret = -EPIPE;
45264@@ -597,9 +597,9 @@ redo2:
45265 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
45266 do_wakeup = 0;
45267 }
45268- pipe->waiting_writers++;
45269+ atomic_inc(&pipe->waiting_writers);
45270 pipe_wait(pipe);
45271- pipe->waiting_writers--;
45272+ atomic_dec(&pipe->waiting_writers);
45273 }
45274 out:
45275 mutex_unlock(&inode->i_mutex);
45276@@ -666,7 +666,7 @@ pipe_poll(struct file *filp, poll_table
45277 mask = 0;
45278 if (filp->f_mode & FMODE_READ) {
45279 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
45280- if (!pipe->writers && filp->f_version != pipe->w_counter)
45281+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
45282 mask |= POLLHUP;
45283 }
45284
45285@@ -676,7 +676,7 @@ pipe_poll(struct file *filp, poll_table
45286 * Most Unices do not set POLLERR for FIFOs but on Linux they
45287 * behave exactly like pipes for poll().
45288 */
45289- if (!pipe->readers)
45290+ if (!atomic_read(&pipe->readers))
45291 mask |= POLLERR;
45292 }
45293
45294@@ -690,10 +690,10 @@ pipe_release(struct inode *inode, int de
45295
45296 mutex_lock(&inode->i_mutex);
45297 pipe = inode->i_pipe;
45298- pipe->readers -= decr;
45299- pipe->writers -= decw;
45300+ atomic_sub(decr, &pipe->readers);
45301+ atomic_sub(decw, &pipe->writers);
45302
45303- if (!pipe->readers && !pipe->writers) {
45304+ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) {
45305 free_pipe_info(inode);
45306 } else {
45307 wake_up_interruptible_sync(&pipe->wait);
45308@@ -783,7 +783,7 @@ pipe_read_open(struct inode *inode, stru
45309
45310 if (inode->i_pipe) {
45311 ret = 0;
45312- inode->i_pipe->readers++;
45313+ atomic_inc(&inode->i_pipe->readers);
45314 }
45315
45316 mutex_unlock(&inode->i_mutex);
45317@@ -800,7 +800,7 @@ pipe_write_open(struct inode *inode, str
45318
45319 if (inode->i_pipe) {
45320 ret = 0;
45321- inode->i_pipe->writers++;
45322+ atomic_inc(&inode->i_pipe->writers);
45323 }
45324
45325 mutex_unlock(&inode->i_mutex);
45326@@ -818,9 +818,9 @@ pipe_rdwr_open(struct inode *inode, stru
45327 if (inode->i_pipe) {
45328 ret = 0;
45329 if (filp->f_mode & FMODE_READ)
45330- inode->i_pipe->readers++;
45331+ atomic_inc(&inode->i_pipe->readers);
45332 if (filp->f_mode & FMODE_WRITE)
45333- inode->i_pipe->writers++;
45334+ atomic_inc(&inode->i_pipe->writers);
45335 }
45336
45337 mutex_unlock(&inode->i_mutex);
45338@@ -905,7 +905,7 @@ void free_pipe_info(struct inode *inode)
45339 inode->i_pipe = NULL;
45340 }
45341
45342-static struct vfsmount *pipe_mnt __read_mostly;
45343+struct vfsmount *pipe_mnt __read_mostly;
45344 static int pipefs_delete_dentry(struct dentry *dentry)
45345 {
45346 /*
45347@@ -945,7 +945,8 @@ static struct inode * get_pipe_inode(voi
45348 goto fail_iput;
45349 inode->i_pipe = pipe;
45350
45351- pipe->readers = pipe->writers = 1;
45352+ atomic_set(&pipe->readers, 1);
45353+ atomic_set(&pipe->writers, 1);
45354 inode->i_fop = &rdwr_pipefifo_fops;
45355
45356 /*
45357diff -urNp linux-2.6.32.43/fs/proc/array.c linux-2.6.32.43/fs/proc/array.c
45358--- linux-2.6.32.43/fs/proc/array.c 2011-03-27 14:31:47.000000000 -0400
45359+++ linux-2.6.32.43/fs/proc/array.c 2011-05-16 21:46:57.000000000 -0400
45360@@ -60,6 +60,7 @@
45361 #include <linux/tty.h>
45362 #include <linux/string.h>
45363 #include <linux/mman.h>
45364+#include <linux/grsecurity.h>
45365 #include <linux/proc_fs.h>
45366 #include <linux/ioport.h>
45367 #include <linux/uaccess.h>
45368@@ -321,6 +322,21 @@ static inline void task_context_switch_c
45369 p->nivcsw);
45370 }
45371
45372+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45373+static inline void task_pax(struct seq_file *m, struct task_struct *p)
45374+{
45375+ if (p->mm)
45376+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
45377+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
45378+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
45379+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
45380+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
45381+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
45382+ else
45383+ seq_printf(m, "PaX:\t-----\n");
45384+}
45385+#endif
45386+
45387 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
45388 struct pid *pid, struct task_struct *task)
45389 {
45390@@ -337,9 +353,24 @@ int proc_pid_status(struct seq_file *m,
45391 task_cap(m, task);
45392 cpuset_task_status_allowed(m, task);
45393 task_context_switch_counts(m, task);
45394+
45395+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
45396+ task_pax(m, task);
45397+#endif
45398+
45399+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
45400+ task_grsec_rbac(m, task);
45401+#endif
45402+
45403 return 0;
45404 }
45405
45406+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45407+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45408+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45409+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45410+#endif
45411+
45412 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
45413 struct pid *pid, struct task_struct *task, int whole)
45414 {
45415@@ -358,9 +389,11 @@ static int do_task_stat(struct seq_file
45416 cputime_t cutime, cstime, utime, stime;
45417 cputime_t cgtime, gtime;
45418 unsigned long rsslim = 0;
45419- char tcomm[sizeof(task->comm)];
45420+ char tcomm[sizeof(task->comm)] = { 0 };
45421 unsigned long flags;
45422
45423+ pax_track_stack();
45424+
45425 state = *get_task_state(task);
45426 vsize = eip = esp = 0;
45427 permitted = ptrace_may_access(task, PTRACE_MODE_READ);
45428@@ -433,6 +466,19 @@ static int do_task_stat(struct seq_file
45429 gtime = task_gtime(task);
45430 }
45431
45432+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45433+ if (PAX_RAND_FLAGS(mm)) {
45434+ eip = 0;
45435+ esp = 0;
45436+ wchan = 0;
45437+ }
45438+#endif
45439+#ifdef CONFIG_GRKERNSEC_HIDESYM
45440+ wchan = 0;
45441+ eip =0;
45442+ esp =0;
45443+#endif
45444+
45445 /* scale priority and nice values from timeslices to -20..20 */
45446 /* to make it look like a "normal" Unix priority/nice value */
45447 priority = task_prio(task);
45448@@ -473,9 +519,15 @@ static int do_task_stat(struct seq_file
45449 vsize,
45450 mm ? get_mm_rss(mm) : 0,
45451 rsslim,
45452+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45453+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0),
45454+ PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0),
45455+ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0),
45456+#else
45457 mm ? (permitted ? mm->start_code : 1) : 0,
45458 mm ? (permitted ? mm->end_code : 1) : 0,
45459 (permitted && mm) ? mm->start_stack : 0,
45460+#endif
45461 esp,
45462 eip,
45463 /* The signal information here is obsolete.
45464@@ -528,3 +580,18 @@ int proc_pid_statm(struct seq_file *m, s
45465
45466 return 0;
45467 }
45468+
45469+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45470+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
45471+{
45472+ u32 curr_ip = 0;
45473+ unsigned long flags;
45474+
45475+ if (lock_task_sighand(task, &flags)) {
45476+ curr_ip = task->signal->curr_ip;
45477+ unlock_task_sighand(task, &flags);
45478+ }
45479+
45480+ return sprintf(buffer, "%pI4\n", &curr_ip);
45481+}
45482+#endif
45483diff -urNp linux-2.6.32.43/fs/proc/base.c linux-2.6.32.43/fs/proc/base.c
45484--- linux-2.6.32.43/fs/proc/base.c 2011-04-22 19:16:29.000000000 -0400
45485+++ linux-2.6.32.43/fs/proc/base.c 2011-06-04 21:20:50.000000000 -0400
45486@@ -102,6 +102,22 @@ struct pid_entry {
45487 union proc_op op;
45488 };
45489
45490+struct getdents_callback {
45491+ struct linux_dirent __user * current_dir;
45492+ struct linux_dirent __user * previous;
45493+ struct file * file;
45494+ int count;
45495+ int error;
45496+};
45497+
45498+static int gr_fake_filldir(void * __buf, const char *name, int namlen,
45499+ loff_t offset, u64 ino, unsigned int d_type)
45500+{
45501+ struct getdents_callback * buf = (struct getdents_callback *) __buf;
45502+ buf->error = -EINVAL;
45503+ return 0;
45504+}
45505+
45506 #define NOD(NAME, MODE, IOP, FOP, OP) { \
45507 .name = (NAME), \
45508 .len = sizeof(NAME) - 1, \
45509@@ -213,6 +229,9 @@ static int check_mem_permission(struct t
45510 if (task == current)
45511 return 0;
45512
45513+ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))
45514+ return -EPERM;
45515+
45516 /*
45517 * If current is actively ptrace'ing, and would also be
45518 * permitted to freshly attach with ptrace now, permit it.
45519@@ -260,6 +279,9 @@ static int proc_pid_cmdline(struct task_
45520 if (!mm->arg_end)
45521 goto out_mm; /* Shh! No looking before we're done */
45522
45523+ if (gr_acl_handle_procpidmem(task))
45524+ goto out_mm;
45525+
45526 len = mm->arg_end - mm->arg_start;
45527
45528 if (len > PAGE_SIZE)
45529@@ -287,12 +309,28 @@ out:
45530 return res;
45531 }
45532
45533+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45534+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
45535+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
45536+ _mm->pax_flags & MF_PAX_SEGMEXEC))
45537+#endif
45538+
45539 static int proc_pid_auxv(struct task_struct *task, char *buffer)
45540 {
45541 int res = 0;
45542 struct mm_struct *mm = get_task_mm(task);
45543 if (mm) {
45544 unsigned int nwords = 0;
45545+
45546+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
45547+ /* allow if we're currently ptracing this task */
45548+ if (PAX_RAND_FLAGS(mm) &&
45549+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
45550+ mmput(mm);
45551+ return res;
45552+ }
45553+#endif
45554+
45555 do {
45556 nwords += 2;
45557 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
45558@@ -306,7 +344,7 @@ static int proc_pid_auxv(struct task_str
45559 }
45560
45561
45562-#ifdef CONFIG_KALLSYMS
45563+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45564 /*
45565 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
45566 * Returns the resolved symbol. If that fails, simply return the address.
45567@@ -328,7 +366,7 @@ static int proc_pid_wchan(struct task_st
45568 }
45569 #endif /* CONFIG_KALLSYMS */
45570
45571-#ifdef CONFIG_STACKTRACE
45572+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45573
45574 #define MAX_STACK_TRACE_DEPTH 64
45575
45576@@ -522,7 +560,7 @@ static int proc_pid_limits(struct task_s
45577 return count;
45578 }
45579
45580-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45581+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45582 static int proc_pid_syscall(struct task_struct *task, char *buffer)
45583 {
45584 long nr;
45585@@ -547,7 +585,7 @@ static int proc_pid_syscall(struct task_
45586 /************************************************************************/
45587
45588 /* permission checks */
45589-static int proc_fd_access_allowed(struct inode *inode)
45590+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
45591 {
45592 struct task_struct *task;
45593 int allowed = 0;
45594@@ -557,7 +595,10 @@ static int proc_fd_access_allowed(struct
45595 */
45596 task = get_proc_task(inode);
45597 if (task) {
45598- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45599+ if (log)
45600+ allowed = ptrace_may_access_log(task, PTRACE_MODE_READ);
45601+ else
45602+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
45603 put_task_struct(task);
45604 }
45605 return allowed;
45606@@ -936,6 +977,9 @@ static ssize_t environ_read(struct file
45607 if (!task)
45608 goto out_no_task;
45609
45610+ if (gr_acl_handle_procpidmem(task))
45611+ goto out;
45612+
45613 if (!ptrace_may_access(task, PTRACE_MODE_READ))
45614 goto out;
45615
45616@@ -1350,7 +1394,7 @@ static void *proc_pid_follow_link(struct
45617 path_put(&nd->path);
45618
45619 /* Are we allowed to snoop on the tasks file descriptors? */
45620- if (!proc_fd_access_allowed(inode))
45621+ if (!proc_fd_access_allowed(inode,0))
45622 goto out;
45623
45624 error = PROC_I(inode)->op.proc_get_link(inode, &nd->path);
45625@@ -1390,8 +1434,18 @@ static int proc_pid_readlink(struct dent
45626 struct path path;
45627
45628 /* Are we allowed to snoop on the tasks file descriptors? */
45629- if (!proc_fd_access_allowed(inode))
45630- goto out;
45631+ /* logging this is needed for learning on chromium to work properly,
45632+ but we don't want to flood the logs from 'ps' which does a readlink
45633+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
45634+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
45635+ */
45636+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
45637+ if (!proc_fd_access_allowed(inode,0))
45638+ goto out;
45639+ } else {
45640+ if (!proc_fd_access_allowed(inode,1))
45641+ goto out;
45642+ }
45643
45644 error = PROC_I(inode)->op.proc_get_link(inode, &path);
45645 if (error)
45646@@ -1456,7 +1510,11 @@ static struct inode *proc_pid_make_inode
45647 rcu_read_lock();
45648 cred = __task_cred(task);
45649 inode->i_uid = cred->euid;
45650+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45651+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45652+#else
45653 inode->i_gid = cred->egid;
45654+#endif
45655 rcu_read_unlock();
45656 }
45657 security_task_to_inode(task, inode);
45658@@ -1474,6 +1532,9 @@ static int pid_getattr(struct vfsmount *
45659 struct inode *inode = dentry->d_inode;
45660 struct task_struct *task;
45661 const struct cred *cred;
45662+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45663+ const struct cred *tmpcred = current_cred();
45664+#endif
45665
45666 generic_fillattr(inode, stat);
45667
45668@@ -1481,13 +1542,41 @@ static int pid_getattr(struct vfsmount *
45669 stat->uid = 0;
45670 stat->gid = 0;
45671 task = pid_task(proc_pid(inode), PIDTYPE_PID);
45672+
45673+ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) {
45674+ rcu_read_unlock();
45675+ return -ENOENT;
45676+ }
45677+
45678 if (task) {
45679+ cred = __task_cred(task);
45680+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45681+ if (!tmpcred->uid || (tmpcred->uid == cred->uid)
45682+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45683+ || in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45684+#endif
45685+ ) {
45686+#endif
45687 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45688+#ifdef CONFIG_GRKERNSEC_PROC_USER
45689+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45690+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45691+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45692+#endif
45693 task_dumpable(task)) {
45694- cred = __task_cred(task);
45695 stat->uid = cred->euid;
45696+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45697+ stat->gid = CONFIG_GRKERNSEC_PROC_GID;
45698+#else
45699 stat->gid = cred->egid;
45700+#endif
45701 }
45702+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45703+ } else {
45704+ rcu_read_unlock();
45705+ return -ENOENT;
45706+ }
45707+#endif
45708 }
45709 rcu_read_unlock();
45710 return 0;
45711@@ -1518,11 +1607,20 @@ static int pid_revalidate(struct dentry
45712
45713 if (task) {
45714 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
45715+#ifdef CONFIG_GRKERNSEC_PROC_USER
45716+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
45717+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45718+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
45719+#endif
45720 task_dumpable(task)) {
45721 rcu_read_lock();
45722 cred = __task_cred(task);
45723 inode->i_uid = cred->euid;
45724+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45725+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45726+#else
45727 inode->i_gid = cred->egid;
45728+#endif
45729 rcu_read_unlock();
45730 } else {
45731 inode->i_uid = 0;
45732@@ -1643,7 +1741,8 @@ static int proc_fd_info(struct inode *in
45733 int fd = proc_fd(inode);
45734
45735 if (task) {
45736- files = get_files_struct(task);
45737+ if (!gr_acl_handle_procpidmem(task))
45738+ files = get_files_struct(task);
45739 put_task_struct(task);
45740 }
45741 if (files) {
45742@@ -1895,12 +1994,22 @@ static const struct file_operations proc
45743 static int proc_fd_permission(struct inode *inode, int mask)
45744 {
45745 int rv;
45746+ struct task_struct *task;
45747
45748 rv = generic_permission(inode, mask, NULL);
45749- if (rv == 0)
45750- return 0;
45751+
45752 if (task_pid(current) == proc_pid(inode))
45753 rv = 0;
45754+
45755+ task = get_proc_task(inode);
45756+ if (task == NULL)
45757+ return rv;
45758+
45759+ if (gr_acl_handle_procpidmem(task))
45760+ rv = -EACCES;
45761+
45762+ put_task_struct(task);
45763+
45764 return rv;
45765 }
45766
45767@@ -2009,6 +2118,9 @@ static struct dentry *proc_pident_lookup
45768 if (!task)
45769 goto out_no_task;
45770
45771+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45772+ goto out;
45773+
45774 /*
45775 * Yes, it does not scale. And it should not. Don't add
45776 * new entries into /proc/<tgid>/ without very good reasons.
45777@@ -2053,6 +2165,9 @@ static int proc_pident_readdir(struct fi
45778 if (!task)
45779 goto out_no_task;
45780
45781+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45782+ goto out;
45783+
45784 ret = 0;
45785 i = filp->f_pos;
45786 switch (i) {
45787@@ -2320,7 +2435,7 @@ static void *proc_self_follow_link(struc
45788 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
45789 void *cookie)
45790 {
45791- char *s = nd_get_link(nd);
45792+ const char *s = nd_get_link(nd);
45793 if (!IS_ERR(s))
45794 __putname(s);
45795 }
45796@@ -2519,7 +2634,7 @@ static const struct pid_entry tgid_base_
45797 #ifdef CONFIG_SCHED_DEBUG
45798 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45799 #endif
45800-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45801+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45802 INF("syscall", S_IRUSR, proc_pid_syscall),
45803 #endif
45804 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45805@@ -2544,10 +2659,10 @@ static const struct pid_entry tgid_base_
45806 #ifdef CONFIG_SECURITY
45807 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45808 #endif
45809-#ifdef CONFIG_KALLSYMS
45810+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45811 INF("wchan", S_IRUGO, proc_pid_wchan),
45812 #endif
45813-#ifdef CONFIG_STACKTRACE
45814+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45815 ONE("stack", S_IRUSR, proc_pid_stack),
45816 #endif
45817 #ifdef CONFIG_SCHEDSTATS
45818@@ -2577,6 +2692,9 @@ static const struct pid_entry tgid_base_
45819 #ifdef CONFIG_TASK_IO_ACCOUNTING
45820 INF("io", S_IRUGO, proc_tgid_io_accounting),
45821 #endif
45822+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45823+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
45824+#endif
45825 };
45826
45827 static int proc_tgid_base_readdir(struct file * filp,
45828@@ -2701,7 +2819,14 @@ static struct dentry *proc_pid_instantia
45829 if (!inode)
45830 goto out;
45831
45832+#ifdef CONFIG_GRKERNSEC_PROC_USER
45833+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
45834+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45835+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45836+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
45837+#else
45838 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
45839+#endif
45840 inode->i_op = &proc_tgid_base_inode_operations;
45841 inode->i_fop = &proc_tgid_base_operations;
45842 inode->i_flags|=S_IMMUTABLE;
45843@@ -2743,7 +2868,11 @@ struct dentry *proc_pid_lookup(struct in
45844 if (!task)
45845 goto out;
45846
45847+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
45848+ goto out_put_task;
45849+
45850 result = proc_pid_instantiate(dir, dentry, task, NULL);
45851+out_put_task:
45852 put_task_struct(task);
45853 out:
45854 return result;
45855@@ -2808,6 +2937,11 @@ int proc_pid_readdir(struct file * filp,
45856 {
45857 unsigned int nr;
45858 struct task_struct *reaper;
45859+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45860+ const struct cred *tmpcred = current_cred();
45861+ const struct cred *itercred;
45862+#endif
45863+ filldir_t __filldir = filldir;
45864 struct tgid_iter iter;
45865 struct pid_namespace *ns;
45866
45867@@ -2831,8 +2965,27 @@ int proc_pid_readdir(struct file * filp,
45868 for (iter = next_tgid(ns, iter);
45869 iter.task;
45870 iter.tgid += 1, iter = next_tgid(ns, iter)) {
45871+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45872+ rcu_read_lock();
45873+ itercred = __task_cred(iter.task);
45874+#endif
45875+ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task)
45876+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45877+ || (tmpcred->uid && (itercred->uid != tmpcred->uid)
45878+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45879+ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
45880+#endif
45881+ )
45882+#endif
45883+ )
45884+ __filldir = &gr_fake_filldir;
45885+ else
45886+ __filldir = filldir;
45887+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
45888+ rcu_read_unlock();
45889+#endif
45890 filp->f_pos = iter.tgid + TGID_OFFSET;
45891- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) {
45892+ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) {
45893 put_task_struct(iter.task);
45894 goto out;
45895 }
45896@@ -2858,7 +3011,7 @@ static const struct pid_entry tid_base_s
45897 #ifdef CONFIG_SCHED_DEBUG
45898 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
45899 #endif
45900-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
45901+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
45902 INF("syscall", S_IRUSR, proc_pid_syscall),
45903 #endif
45904 INF("cmdline", S_IRUGO, proc_pid_cmdline),
45905@@ -2882,10 +3035,10 @@ static const struct pid_entry tid_base_s
45906 #ifdef CONFIG_SECURITY
45907 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
45908 #endif
45909-#ifdef CONFIG_KALLSYMS
45910+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45911 INF("wchan", S_IRUGO, proc_pid_wchan),
45912 #endif
45913-#ifdef CONFIG_STACKTRACE
45914+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
45915 ONE("stack", S_IRUSR, proc_pid_stack),
45916 #endif
45917 #ifdef CONFIG_SCHEDSTATS
45918diff -urNp linux-2.6.32.43/fs/proc/cmdline.c linux-2.6.32.43/fs/proc/cmdline.c
45919--- linux-2.6.32.43/fs/proc/cmdline.c 2011-03-27 14:31:47.000000000 -0400
45920+++ linux-2.6.32.43/fs/proc/cmdline.c 2011-04-17 15:56:46.000000000 -0400
45921@@ -23,7 +23,11 @@ static const struct file_operations cmdl
45922
45923 static int __init proc_cmdline_init(void)
45924 {
45925+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45926+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
45927+#else
45928 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
45929+#endif
45930 return 0;
45931 }
45932 module_init(proc_cmdline_init);
45933diff -urNp linux-2.6.32.43/fs/proc/devices.c linux-2.6.32.43/fs/proc/devices.c
45934--- linux-2.6.32.43/fs/proc/devices.c 2011-03-27 14:31:47.000000000 -0400
45935+++ linux-2.6.32.43/fs/proc/devices.c 2011-04-17 15:56:46.000000000 -0400
45936@@ -64,7 +64,11 @@ static const struct file_operations proc
45937
45938 static int __init proc_devices_init(void)
45939 {
45940+#ifdef CONFIG_GRKERNSEC_PROC_ADD
45941+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
45942+#else
45943 proc_create("devices", 0, NULL, &proc_devinfo_operations);
45944+#endif
45945 return 0;
45946 }
45947 module_init(proc_devices_init);
45948diff -urNp linux-2.6.32.43/fs/proc/inode.c linux-2.6.32.43/fs/proc/inode.c
45949--- linux-2.6.32.43/fs/proc/inode.c 2011-03-27 14:31:47.000000000 -0400
45950+++ linux-2.6.32.43/fs/proc/inode.c 2011-04-17 15:56:46.000000000 -0400
45951@@ -457,7 +457,11 @@ struct inode *proc_get_inode(struct supe
45952 if (de->mode) {
45953 inode->i_mode = de->mode;
45954 inode->i_uid = de->uid;
45955+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
45956+ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
45957+#else
45958 inode->i_gid = de->gid;
45959+#endif
45960 }
45961 if (de->size)
45962 inode->i_size = de->size;
45963diff -urNp linux-2.6.32.43/fs/proc/internal.h linux-2.6.32.43/fs/proc/internal.h
45964--- linux-2.6.32.43/fs/proc/internal.h 2011-03-27 14:31:47.000000000 -0400
45965+++ linux-2.6.32.43/fs/proc/internal.h 2011-04-17 15:56:46.000000000 -0400
45966@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi
45967 struct pid *pid, struct task_struct *task);
45968 extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
45969 struct pid *pid, struct task_struct *task);
45970+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
45971+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
45972+#endif
45973 extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
45974
45975 extern const struct file_operations proc_maps_operations;
45976diff -urNp linux-2.6.32.43/fs/proc/Kconfig linux-2.6.32.43/fs/proc/Kconfig
45977--- linux-2.6.32.43/fs/proc/Kconfig 2011-03-27 14:31:47.000000000 -0400
45978+++ linux-2.6.32.43/fs/proc/Kconfig 2011-04-17 15:56:46.000000000 -0400
45979@@ -30,12 +30,12 @@ config PROC_FS
45980
45981 config PROC_KCORE
45982 bool "/proc/kcore support" if !ARM
45983- depends on PROC_FS && MMU
45984+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
45985
45986 config PROC_VMCORE
45987 bool "/proc/vmcore support (EXPERIMENTAL)"
45988- depends on PROC_FS && CRASH_DUMP
45989- default y
45990+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
45991+ default n
45992 help
45993 Exports the dump image of crashed kernel in ELF format.
45994
45995@@ -59,8 +59,8 @@ config PROC_SYSCTL
45996 limited in memory.
45997
45998 config PROC_PAGE_MONITOR
45999- default y
46000- depends on PROC_FS && MMU
46001+ default n
46002+ depends on PROC_FS && MMU && !GRKERNSEC
46003 bool "Enable /proc page monitoring" if EMBEDDED
46004 help
46005 Various /proc files exist to monitor process memory utilization:
46006diff -urNp linux-2.6.32.43/fs/proc/kcore.c linux-2.6.32.43/fs/proc/kcore.c
46007--- linux-2.6.32.43/fs/proc/kcore.c 2011-03-27 14:31:47.000000000 -0400
46008+++ linux-2.6.32.43/fs/proc/kcore.c 2011-05-16 21:46:57.000000000 -0400
46009@@ -320,6 +320,8 @@ static void elf_kcore_store_hdr(char *bu
46010 off_t offset = 0;
46011 struct kcore_list *m;
46012
46013+ pax_track_stack();
46014+
46015 /* setup ELF header */
46016 elf = (struct elfhdr *) bufp;
46017 bufp += sizeof(struct elfhdr);
46018@@ -477,9 +479,10 @@ read_kcore(struct file *file, char __use
46019 * the addresses in the elf_phdr on our list.
46020 */
46021 start = kc_offset_to_vaddr(*fpos - elf_buflen);
46022- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
46023+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
46024+ if (tsz > buflen)
46025 tsz = buflen;
46026-
46027+
46028 while (buflen) {
46029 struct kcore_list *m;
46030
46031@@ -508,20 +511,23 @@ read_kcore(struct file *file, char __use
46032 kfree(elf_buf);
46033 } else {
46034 if (kern_addr_valid(start)) {
46035- unsigned long n;
46036+ char *elf_buf;
46037+ mm_segment_t oldfs;
46038
46039- n = copy_to_user(buffer, (char *)start, tsz);
46040- /*
46041- * We cannot distingush between fault on source
46042- * and fault on destination. When this happens
46043- * we clear too and hope it will trigger the
46044- * EFAULT again.
46045- */
46046- if (n) {
46047- if (clear_user(buffer + tsz - n,
46048- n))
46049+ elf_buf = kmalloc(tsz, GFP_KERNEL);
46050+ if (!elf_buf)
46051+ return -ENOMEM;
46052+ oldfs = get_fs();
46053+ set_fs(KERNEL_DS);
46054+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
46055+ set_fs(oldfs);
46056+ if (copy_to_user(buffer, elf_buf, tsz)) {
46057+ kfree(elf_buf);
46058 return -EFAULT;
46059+ }
46060 }
46061+ set_fs(oldfs);
46062+ kfree(elf_buf);
46063 } else {
46064 if (clear_user(buffer, tsz))
46065 return -EFAULT;
46066@@ -541,6 +547,9 @@ read_kcore(struct file *file, char __use
46067
46068 static int open_kcore(struct inode *inode, struct file *filp)
46069 {
46070+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
46071+ return -EPERM;
46072+#endif
46073 if (!capable(CAP_SYS_RAWIO))
46074 return -EPERM;
46075 if (kcore_need_update)
46076diff -urNp linux-2.6.32.43/fs/proc/meminfo.c linux-2.6.32.43/fs/proc/meminfo.c
46077--- linux-2.6.32.43/fs/proc/meminfo.c 2011-03-27 14:31:47.000000000 -0400
46078+++ linux-2.6.32.43/fs/proc/meminfo.c 2011-05-16 21:46:57.000000000 -0400
46079@@ -29,6 +29,8 @@ static int meminfo_proc_show(struct seq_
46080 unsigned long pages[NR_LRU_LISTS];
46081 int lru;
46082
46083+ pax_track_stack();
46084+
46085 /*
46086 * display in kilobytes.
46087 */
46088@@ -149,7 +151,7 @@ static int meminfo_proc_show(struct seq_
46089 vmi.used >> 10,
46090 vmi.largest_chunk >> 10
46091 #ifdef CONFIG_MEMORY_FAILURE
46092- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10)
46093+ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10)
46094 #endif
46095 );
46096
46097diff -urNp linux-2.6.32.43/fs/proc/nommu.c linux-2.6.32.43/fs/proc/nommu.c
46098--- linux-2.6.32.43/fs/proc/nommu.c 2011-03-27 14:31:47.000000000 -0400
46099+++ linux-2.6.32.43/fs/proc/nommu.c 2011-04-17 15:56:46.000000000 -0400
46100@@ -67,7 +67,7 @@ static int nommu_region_show(struct seq_
46101 if (len < 1)
46102 len = 1;
46103 seq_printf(m, "%*c", len, ' ');
46104- seq_path(m, &file->f_path, "");
46105+ seq_path(m, &file->f_path, "\n\\");
46106 }
46107
46108 seq_putc(m, '\n');
46109diff -urNp linux-2.6.32.43/fs/proc/proc_net.c linux-2.6.32.43/fs/proc/proc_net.c
46110--- linux-2.6.32.43/fs/proc/proc_net.c 2011-03-27 14:31:47.000000000 -0400
46111+++ linux-2.6.32.43/fs/proc/proc_net.c 2011-04-17 15:56:46.000000000 -0400
46112@@ -104,6 +104,17 @@ static struct net *get_proc_task_net(str
46113 struct task_struct *task;
46114 struct nsproxy *ns;
46115 struct net *net = NULL;
46116+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46117+ const struct cred *cred = current_cred();
46118+#endif
46119+
46120+#ifdef CONFIG_GRKERNSEC_PROC_USER
46121+ if (cred->fsuid)
46122+ return net;
46123+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46124+ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID))
46125+ return net;
46126+#endif
46127
46128 rcu_read_lock();
46129 task = pid_task(proc_pid(dir), PIDTYPE_PID);
46130diff -urNp linux-2.6.32.43/fs/proc/proc_sysctl.c linux-2.6.32.43/fs/proc/proc_sysctl.c
46131--- linux-2.6.32.43/fs/proc/proc_sysctl.c 2011-03-27 14:31:47.000000000 -0400
46132+++ linux-2.6.32.43/fs/proc/proc_sysctl.c 2011-04-17 15:56:46.000000000 -0400
46133@@ -7,6 +7,8 @@
46134 #include <linux/security.h>
46135 #include "internal.h"
46136
46137+extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op);
46138+
46139 static const struct dentry_operations proc_sys_dentry_operations;
46140 static const struct file_operations proc_sys_file_operations;
46141 static const struct inode_operations proc_sys_inode_operations;
46142@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st
46143 if (!p)
46144 goto out;
46145
46146+ if (gr_handle_sysctl(p, MAY_EXEC))
46147+ goto out;
46148+
46149 err = ERR_PTR(-ENOMEM);
46150 inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p);
46151 if (h)
46152@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header
46153 if (*pos < file->f_pos)
46154 continue;
46155
46156+ if (gr_handle_sysctl(table, 0))
46157+ continue;
46158+
46159 res = proc_sys_fill_cache(file, dirent, filldir, head, table);
46160 if (res)
46161 return res;
46162@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo
46163 if (IS_ERR(head))
46164 return PTR_ERR(head);
46165
46166+ if (table && gr_handle_sysctl(table, MAY_EXEC))
46167+ return -ENOENT;
46168+
46169 generic_fillattr(inode, stat);
46170 if (table)
46171 stat->mode = (stat->mode & S_IFMT) | table->mode;
46172diff -urNp linux-2.6.32.43/fs/proc/root.c linux-2.6.32.43/fs/proc/root.c
46173--- linux-2.6.32.43/fs/proc/root.c 2011-03-27 14:31:47.000000000 -0400
46174+++ linux-2.6.32.43/fs/proc/root.c 2011-04-17 15:56:46.000000000 -0400
46175@@ -134,7 +134,15 @@ void __init proc_root_init(void)
46176 #ifdef CONFIG_PROC_DEVICETREE
46177 proc_device_tree_init();
46178 #endif
46179+#ifdef CONFIG_GRKERNSEC_PROC_ADD
46180+#ifdef CONFIG_GRKERNSEC_PROC_USER
46181+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
46182+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
46183+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
46184+#endif
46185+#else
46186 proc_mkdir("bus", NULL);
46187+#endif
46188 proc_sys_init();
46189 }
46190
46191diff -urNp linux-2.6.32.43/fs/proc/task_mmu.c linux-2.6.32.43/fs/proc/task_mmu.c
46192--- linux-2.6.32.43/fs/proc/task_mmu.c 2011-03-27 14:31:47.000000000 -0400
46193+++ linux-2.6.32.43/fs/proc/task_mmu.c 2011-04-23 13:38:09.000000000 -0400
46194@@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct
46195 "VmStk:\t%8lu kB\n"
46196 "VmExe:\t%8lu kB\n"
46197 "VmLib:\t%8lu kB\n"
46198- "VmPTE:\t%8lu kB\n",
46199- hiwater_vm << (PAGE_SHIFT-10),
46200+ "VmPTE:\t%8lu kB\n"
46201+
46202+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46203+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
46204+#endif
46205+
46206+ ,hiwater_vm << (PAGE_SHIFT-10),
46207 (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10),
46208 mm->locked_vm << (PAGE_SHIFT-10),
46209 hiwater_rss << (PAGE_SHIFT-10),
46210 total_rss << (PAGE_SHIFT-10),
46211 data << (PAGE_SHIFT-10),
46212 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
46213- (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10);
46214+ (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10
46215+
46216+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
46217+ , mm->context.user_cs_base, mm->context.user_cs_limit
46218+#endif
46219+
46220+ );
46221 }
46222
46223 unsigned long task_vsize(struct mm_struct *mm)
46224@@ -175,7 +186,8 @@ static void m_stop(struct seq_file *m, v
46225 struct proc_maps_private *priv = m->private;
46226 struct vm_area_struct *vma = v;
46227
46228- vma_stop(priv, vma);
46229+ if (!IS_ERR(vma))
46230+ vma_stop(priv, vma);
46231 if (priv->task)
46232 put_task_struct(priv->task);
46233 }
46234@@ -199,6 +211,12 @@ static int do_maps_open(struct inode *in
46235 return ret;
46236 }
46237
46238+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46239+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
46240+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
46241+ _mm->pax_flags & MF_PAX_SEGMEXEC))
46242+#endif
46243+
46244 static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma)
46245 {
46246 struct mm_struct *mm = vma->vm_mm;
46247@@ -206,7 +224,6 @@ static void show_map_vma(struct seq_file
46248 int flags = vma->vm_flags;
46249 unsigned long ino = 0;
46250 unsigned long long pgoff = 0;
46251- unsigned long start;
46252 dev_t dev = 0;
46253 int len;
46254
46255@@ -217,20 +234,23 @@ static void show_map_vma(struct seq_file
46256 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
46257 }
46258
46259- /* We don't show the stack guard page in /proc/maps */
46260- start = vma->vm_start;
46261- if (vma->vm_flags & VM_GROWSDOWN)
46262- if (!vma_stack_continue(vma->vm_prev, vma->vm_start))
46263- start += PAGE_SIZE;
46264-
46265 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
46266- start,
46267+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46268+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start,
46269+ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end,
46270+#else
46271+ vma->vm_start,
46272 vma->vm_end,
46273+#endif
46274 flags & VM_READ ? 'r' : '-',
46275 flags & VM_WRITE ? 'w' : '-',
46276 flags & VM_EXEC ? 'x' : '-',
46277 flags & VM_MAYSHARE ? 's' : 'p',
46278+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46279+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
46280+#else
46281 pgoff,
46282+#endif
46283 MAJOR(dev), MINOR(dev), ino, &len);
46284
46285 /*
46286@@ -239,7 +259,7 @@ static void show_map_vma(struct seq_file
46287 */
46288 if (file) {
46289 pad_len_spaces(m, len);
46290- seq_path(m, &file->f_path, "\n");
46291+ seq_path(m, &file->f_path, "\n\\");
46292 } else {
46293 const char *name = arch_vma_name(vma);
46294 if (!name) {
46295@@ -247,8 +267,9 @@ static void show_map_vma(struct seq_file
46296 if (vma->vm_start <= mm->brk &&
46297 vma->vm_end >= mm->start_brk) {
46298 name = "[heap]";
46299- } else if (vma->vm_start <= mm->start_stack &&
46300- vma->vm_end >= mm->start_stack) {
46301+ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
46302+ (vma->vm_start <= mm->start_stack &&
46303+ vma->vm_end >= mm->start_stack)) {
46304 name = "[stack]";
46305 }
46306 } else {
46307@@ -391,9 +412,16 @@ static int show_smap(struct seq_file *m,
46308 };
46309
46310 memset(&mss, 0, sizeof mss);
46311- mss.vma = vma;
46312- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46313- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46314+
46315+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46316+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
46317+#endif
46318+ mss.vma = vma;
46319+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
46320+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
46321+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46322+ }
46323+#endif
46324
46325 show_map_vma(m, vma);
46326
46327@@ -409,7 +437,11 @@ static int show_smap(struct seq_file *m,
46328 "Swap: %8lu kB\n"
46329 "KernelPageSize: %8lu kB\n"
46330 "MMUPageSize: %8lu kB\n",
46331+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
46332+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
46333+#else
46334 (vma->vm_end - vma->vm_start) >> 10,
46335+#endif
46336 mss.resident >> 10,
46337 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
46338 mss.shared_clean >> 10,
46339diff -urNp linux-2.6.32.43/fs/proc/task_nommu.c linux-2.6.32.43/fs/proc/task_nommu.c
46340--- linux-2.6.32.43/fs/proc/task_nommu.c 2011-03-27 14:31:47.000000000 -0400
46341+++ linux-2.6.32.43/fs/proc/task_nommu.c 2011-04-17 15:56:46.000000000 -0400
46342@@ -50,7 +50,7 @@ void task_mem(struct seq_file *m, struct
46343 else
46344 bytes += kobjsize(mm);
46345
46346- if (current->fs && current->fs->users > 1)
46347+ if (current->fs && atomic_read(&current->fs->users) > 1)
46348 sbytes += kobjsize(current->fs);
46349 else
46350 bytes += kobjsize(current->fs);
46351@@ -154,7 +154,7 @@ static int nommu_vma_show(struct seq_fil
46352 if (len < 1)
46353 len = 1;
46354 seq_printf(m, "%*c", len, ' ');
46355- seq_path(m, &file->f_path, "");
46356+ seq_path(m, &file->f_path, "\n\\");
46357 }
46358
46359 seq_putc(m, '\n');
46360diff -urNp linux-2.6.32.43/fs/readdir.c linux-2.6.32.43/fs/readdir.c
46361--- linux-2.6.32.43/fs/readdir.c 2011-03-27 14:31:47.000000000 -0400
46362+++ linux-2.6.32.43/fs/readdir.c 2011-04-17 15:56:46.000000000 -0400
46363@@ -16,6 +16,7 @@
46364 #include <linux/security.h>
46365 #include <linux/syscalls.h>
46366 #include <linux/unistd.h>
46367+#include <linux/namei.h>
46368
46369 #include <asm/uaccess.h>
46370
46371@@ -67,6 +68,7 @@ struct old_linux_dirent {
46372
46373 struct readdir_callback {
46374 struct old_linux_dirent __user * dirent;
46375+ struct file * file;
46376 int result;
46377 };
46378
46379@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons
46380 buf->result = -EOVERFLOW;
46381 return -EOVERFLOW;
46382 }
46383+
46384+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46385+ return 0;
46386+
46387 buf->result++;
46388 dirent = buf->dirent;
46389 if (!access_ok(VERIFY_WRITE, dirent,
46390@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in
46391
46392 buf.result = 0;
46393 buf.dirent = dirent;
46394+ buf.file = file;
46395
46396 error = vfs_readdir(file, fillonedir, &buf);
46397 if (buf.result)
46398@@ -142,6 +149,7 @@ struct linux_dirent {
46399 struct getdents_callback {
46400 struct linux_dirent __user * current_dir;
46401 struct linux_dirent __user * previous;
46402+ struct file * file;
46403 int count;
46404 int error;
46405 };
46406@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c
46407 buf->error = -EOVERFLOW;
46408 return -EOVERFLOW;
46409 }
46410+
46411+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46412+ return 0;
46413+
46414 dirent = buf->previous;
46415 if (dirent) {
46416 if (__put_user(offset, &dirent->d_off))
46417@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int,
46418 buf.previous = NULL;
46419 buf.count = count;
46420 buf.error = 0;
46421+ buf.file = file;
46422
46423 error = vfs_readdir(file, filldir, &buf);
46424 if (error >= 0)
46425@@ -228,6 +241,7 @@ out:
46426 struct getdents_callback64 {
46427 struct linux_dirent64 __user * current_dir;
46428 struct linux_dirent64 __user * previous;
46429+ struct file *file;
46430 int count;
46431 int error;
46432 };
46433@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const
46434 buf->error = -EINVAL; /* only used if we fail.. */
46435 if (reclen > buf->count)
46436 return -EINVAL;
46437+
46438+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
46439+ return 0;
46440+
46441 dirent = buf->previous;
46442 if (dirent) {
46443 if (__put_user(offset, &dirent->d_off))
46444@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int
46445
46446 buf.current_dir = dirent;
46447 buf.previous = NULL;
46448+ buf.file = file;
46449 buf.count = count;
46450 buf.error = 0;
46451
46452diff -urNp linux-2.6.32.43/fs/reiserfs/dir.c linux-2.6.32.43/fs/reiserfs/dir.c
46453--- linux-2.6.32.43/fs/reiserfs/dir.c 2011-03-27 14:31:47.000000000 -0400
46454+++ linux-2.6.32.43/fs/reiserfs/dir.c 2011-05-16 21:46:57.000000000 -0400
46455@@ -66,6 +66,8 @@ int reiserfs_readdir_dentry(struct dentr
46456 struct reiserfs_dir_entry de;
46457 int ret = 0;
46458
46459+ pax_track_stack();
46460+
46461 reiserfs_write_lock(inode->i_sb);
46462
46463 reiserfs_check_lock_depth(inode->i_sb, "readdir");
46464diff -urNp linux-2.6.32.43/fs/reiserfs/do_balan.c linux-2.6.32.43/fs/reiserfs/do_balan.c
46465--- linux-2.6.32.43/fs/reiserfs/do_balan.c 2011-03-27 14:31:47.000000000 -0400
46466+++ linux-2.6.32.43/fs/reiserfs/do_balan.c 2011-04-17 15:56:46.000000000 -0400
46467@@ -2058,7 +2058,7 @@ void do_balance(struct tree_balance *tb,
46468 return;
46469 }
46470
46471- atomic_inc(&(fs_generation(tb->tb_sb)));
46472+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
46473 do_balance_starts(tb);
46474
46475 /* balance leaf returns 0 except if combining L R and S into
46476diff -urNp linux-2.6.32.43/fs/reiserfs/item_ops.c linux-2.6.32.43/fs/reiserfs/item_ops.c
46477--- linux-2.6.32.43/fs/reiserfs/item_ops.c 2011-03-27 14:31:47.000000000 -0400
46478+++ linux-2.6.32.43/fs/reiserfs/item_ops.c 2011-04-17 15:56:46.000000000 -0400
46479@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i
46480 vi->vi_index, vi->vi_type, vi->vi_ih);
46481 }
46482
46483-static struct item_operations stat_data_ops = {
46484+static const struct item_operations stat_data_ops = {
46485 .bytes_number = sd_bytes_number,
46486 .decrement_key = sd_decrement_key,
46487 .is_left_mergeable = sd_is_left_mergeable,
46488@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu
46489 vi->vi_index, vi->vi_type, vi->vi_ih);
46490 }
46491
46492-static struct item_operations direct_ops = {
46493+static const struct item_operations direct_ops = {
46494 .bytes_number = direct_bytes_number,
46495 .decrement_key = direct_decrement_key,
46496 .is_left_mergeable = direct_is_left_mergeable,
46497@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir
46498 vi->vi_index, vi->vi_type, vi->vi_ih);
46499 }
46500
46501-static struct item_operations indirect_ops = {
46502+static const struct item_operations indirect_ops = {
46503 .bytes_number = indirect_bytes_number,
46504 .decrement_key = indirect_decrement_key,
46505 .is_left_mergeable = indirect_is_left_mergeable,
46506@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir
46507 printk("\n");
46508 }
46509
46510-static struct item_operations direntry_ops = {
46511+static const struct item_operations direntry_ops = {
46512 .bytes_number = direntry_bytes_number,
46513 .decrement_key = direntry_decrement_key,
46514 .is_left_mergeable = direntry_is_left_mergeable,
46515@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir
46516 "Invalid item type observed, run fsck ASAP");
46517 }
46518
46519-static struct item_operations errcatch_ops = {
46520+static const struct item_operations errcatch_ops = {
46521 errcatch_bytes_number,
46522 errcatch_decrement_key,
46523 errcatch_is_left_mergeable,
46524@@ -746,7 +746,7 @@ static struct item_operations errcatch_o
46525 #error Item types must use disk-format assigned values.
46526 #endif
46527
46528-struct item_operations *item_ops[TYPE_ANY + 1] = {
46529+const struct item_operations * const item_ops[TYPE_ANY + 1] = {
46530 &stat_data_ops,
46531 &indirect_ops,
46532 &direct_ops,
46533diff -urNp linux-2.6.32.43/fs/reiserfs/journal.c linux-2.6.32.43/fs/reiserfs/journal.c
46534--- linux-2.6.32.43/fs/reiserfs/journal.c 2011-03-27 14:31:47.000000000 -0400
46535+++ linux-2.6.32.43/fs/reiserfs/journal.c 2011-05-16 21:46:57.000000000 -0400
46536@@ -2329,6 +2329,8 @@ static struct buffer_head *reiserfs_brea
46537 struct buffer_head *bh;
46538 int i, j;
46539
46540+ pax_track_stack();
46541+
46542 bh = __getblk(dev, block, bufsize);
46543 if (buffer_uptodate(bh))
46544 return (bh);
46545diff -urNp linux-2.6.32.43/fs/reiserfs/namei.c linux-2.6.32.43/fs/reiserfs/namei.c
46546--- linux-2.6.32.43/fs/reiserfs/namei.c 2011-03-27 14:31:47.000000000 -0400
46547+++ linux-2.6.32.43/fs/reiserfs/namei.c 2011-05-16 21:46:57.000000000 -0400
46548@@ -1214,6 +1214,8 @@ static int reiserfs_rename(struct inode
46549 unsigned long savelink = 1;
46550 struct timespec ctime;
46551
46552+ pax_track_stack();
46553+
46554 /* three balancings: (1) old name removal, (2) new name insertion
46555 and (3) maybe "save" link insertion
46556 stat data updates: (1) old directory,
46557diff -urNp linux-2.6.32.43/fs/reiserfs/procfs.c linux-2.6.32.43/fs/reiserfs/procfs.c
46558--- linux-2.6.32.43/fs/reiserfs/procfs.c 2011-03-27 14:31:47.000000000 -0400
46559+++ linux-2.6.32.43/fs/reiserfs/procfs.c 2011-05-16 21:46:57.000000000 -0400
46560@@ -123,7 +123,7 @@ static int show_super(struct seq_file *m
46561 "SMALL_TAILS " : "NO_TAILS ",
46562 replay_only(sb) ? "REPLAY_ONLY " : "",
46563 convert_reiserfs(sb) ? "CONV " : "",
46564- atomic_read(&r->s_generation_counter),
46565+ atomic_read_unchecked(&r->s_generation_counter),
46566 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
46567 SF(s_do_balance), SF(s_unneeded_left_neighbor),
46568 SF(s_good_search_by_key_reada), SF(s_bmaps),
46569@@ -309,6 +309,8 @@ static int show_journal(struct seq_file
46570 struct journal_params *jp = &rs->s_v1.s_journal;
46571 char b[BDEVNAME_SIZE];
46572
46573+ pax_track_stack();
46574+
46575 seq_printf(m, /* on-disk fields */
46576 "jp_journal_1st_block: \t%i\n"
46577 "jp_journal_dev: \t%s[%x]\n"
46578diff -urNp linux-2.6.32.43/fs/reiserfs/stree.c linux-2.6.32.43/fs/reiserfs/stree.c
46579--- linux-2.6.32.43/fs/reiserfs/stree.c 2011-03-27 14:31:47.000000000 -0400
46580+++ linux-2.6.32.43/fs/reiserfs/stree.c 2011-05-16 21:46:57.000000000 -0400
46581@@ -1159,6 +1159,8 @@ int reiserfs_delete_item(struct reiserfs
46582 int iter = 0;
46583 #endif
46584
46585+ pax_track_stack();
46586+
46587 BUG_ON(!th->t_trans_id);
46588
46589 init_tb_struct(th, &s_del_balance, sb, path,
46590@@ -1296,6 +1298,8 @@ void reiserfs_delete_solid_item(struct r
46591 int retval;
46592 int quota_cut_bytes = 0;
46593
46594+ pax_track_stack();
46595+
46596 BUG_ON(!th->t_trans_id);
46597
46598 le_key2cpu_key(&cpu_key, key);
46599@@ -1525,6 +1529,8 @@ int reiserfs_cut_from_item(struct reiser
46600 int quota_cut_bytes;
46601 loff_t tail_pos = 0;
46602
46603+ pax_track_stack();
46604+
46605 BUG_ON(!th->t_trans_id);
46606
46607 init_tb_struct(th, &s_cut_balance, inode->i_sb, path,
46608@@ -1920,6 +1926,8 @@ int reiserfs_paste_into_item(struct reis
46609 int retval;
46610 int fs_gen;
46611
46612+ pax_track_stack();
46613+
46614 BUG_ON(!th->t_trans_id);
46615
46616 fs_gen = get_generation(inode->i_sb);
46617@@ -2007,6 +2015,8 @@ int reiserfs_insert_item(struct reiserfs
46618 int fs_gen = 0;
46619 int quota_bytes = 0;
46620
46621+ pax_track_stack();
46622+
46623 BUG_ON(!th->t_trans_id);
46624
46625 if (inode) { /* Do we count quotas for item? */
46626diff -urNp linux-2.6.32.43/fs/reiserfs/super.c linux-2.6.32.43/fs/reiserfs/super.c
46627--- linux-2.6.32.43/fs/reiserfs/super.c 2011-03-27 14:31:47.000000000 -0400
46628+++ linux-2.6.32.43/fs/reiserfs/super.c 2011-05-16 21:46:57.000000000 -0400
46629@@ -912,6 +912,8 @@ static int reiserfs_parse_options(struct
46630 {.option_name = NULL}
46631 };
46632
46633+ pax_track_stack();
46634+
46635 *blocks = 0;
46636 if (!options || !*options)
46637 /* use default configuration: create tails, journaling on, no
46638diff -urNp linux-2.6.32.43/fs/select.c linux-2.6.32.43/fs/select.c
46639--- linux-2.6.32.43/fs/select.c 2011-03-27 14:31:47.000000000 -0400
46640+++ linux-2.6.32.43/fs/select.c 2011-05-16 21:46:57.000000000 -0400
46641@@ -20,6 +20,7 @@
46642 #include <linux/module.h>
46643 #include <linux/slab.h>
46644 #include <linux/poll.h>
46645+#include <linux/security.h>
46646 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
46647 #include <linux/file.h>
46648 #include <linux/fdtable.h>
46649@@ -401,6 +402,8 @@ int do_select(int n, fd_set_bits *fds, s
46650 int retval, i, timed_out = 0;
46651 unsigned long slack = 0;
46652
46653+ pax_track_stack();
46654+
46655 rcu_read_lock();
46656 retval = max_select_fd(n, fds);
46657 rcu_read_unlock();
46658@@ -529,6 +532,8 @@ int core_sys_select(int n, fd_set __user
46659 /* Allocate small arguments on the stack to save memory and be faster */
46660 long stack_fds[SELECT_STACK_ALLOC/sizeof(long)];
46661
46662+ pax_track_stack();
46663+
46664 ret = -EINVAL;
46665 if (n < 0)
46666 goto out_nofds;
46667@@ -821,6 +826,9 @@ int do_sys_poll(struct pollfd __user *uf
46668 struct poll_list *walk = head;
46669 unsigned long todo = nfds;
46670
46671+ pax_track_stack();
46672+
46673+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
46674 if (nfds > current->signal->rlim[RLIMIT_NOFILE].rlim_cur)
46675 return -EINVAL;
46676
46677diff -urNp linux-2.6.32.43/fs/seq_file.c linux-2.6.32.43/fs/seq_file.c
46678--- linux-2.6.32.43/fs/seq_file.c 2011-03-27 14:31:47.000000000 -0400
46679+++ linux-2.6.32.43/fs/seq_file.c 2011-08-05 20:33:55.000000000 -0400
46680@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m,
46681 return 0;
46682 }
46683 if (!m->buf) {
46684- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46685+ m->size = PAGE_SIZE;
46686+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46687 if (!m->buf)
46688 return -ENOMEM;
46689 }
46690@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m,
46691 Eoverflow:
46692 m->op->stop(m, p);
46693 kfree(m->buf);
46694- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46695+ m->size <<= 1;
46696+ m->buf = kmalloc(m->size, GFP_KERNEL);
46697 return !m->buf ? -ENOMEM : -EAGAIN;
46698 }
46699
46700@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char
46701 m->version = file->f_version;
46702 /* grab buffer if we didn't have one */
46703 if (!m->buf) {
46704- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
46705+ m->size = PAGE_SIZE;
46706+ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
46707 if (!m->buf)
46708 goto Enomem;
46709 }
46710@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char
46711 goto Fill;
46712 m->op->stop(m, p);
46713 kfree(m->buf);
46714- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
46715+ m->size <<= 1;
46716+ m->buf = kmalloc(m->size, GFP_KERNEL);
46717 if (!m->buf)
46718 goto Enomem;
46719 m->count = 0;
46720@@ -555,10 +559,10 @@ int single_open(struct file *file, int (
46721 int res = -ENOMEM;
46722
46723 if (op) {
46724- op->start = single_start;
46725- op->next = single_next;
46726- op->stop = single_stop;
46727- op->show = show;
46728+ *(void **)&op->start = single_start;
46729+ *(void **)&op->next = single_next;
46730+ *(void **)&op->stop = single_stop;
46731+ *(void **)&op->show = show;
46732 res = seq_open(file, op);
46733 if (!res)
46734 ((struct seq_file *)file->private_data)->private = data;
46735diff -urNp linux-2.6.32.43/fs/smbfs/proc.c linux-2.6.32.43/fs/smbfs/proc.c
46736--- linux-2.6.32.43/fs/smbfs/proc.c 2011-03-27 14:31:47.000000000 -0400
46737+++ linux-2.6.32.43/fs/smbfs/proc.c 2011-08-05 20:33:55.000000000 -0400
46738@@ -266,9 +266,9 @@ int smb_setcodepage(struct smb_sb_info *
46739
46740 out:
46741 if (server->local_nls != NULL && server->remote_nls != NULL)
46742- server->ops->convert = convert_cp;
46743+ *(void **)&server->ops->convert = convert_cp;
46744 else
46745- server->ops->convert = convert_memcpy;
46746+ *(void **)&server->ops->convert = convert_memcpy;
46747
46748 smb_unlock_server(server);
46749 return n;
46750@@ -933,9 +933,9 @@ smb_newconn(struct smb_sb_info *server,
46751
46752 /* FIXME: the win9x code wants to modify these ... (seek/trunc bug) */
46753 if (server->mnt->flags & SMB_MOUNT_OLDATTR) {
46754- server->ops->getattr = smb_proc_getattr_core;
46755+ *(void **)&server->ops->getattr = smb_proc_getattr_core;
46756 } else if (server->mnt->flags & SMB_MOUNT_DIRATTR) {
46757- server->ops->getattr = smb_proc_getattr_ff;
46758+ *(void **)&server->ops->getattr = smb_proc_getattr_ff;
46759 }
46760
46761 /* Decode server capabilities */
46762@@ -3439,7 +3439,7 @@ out:
46763 static void
46764 install_ops(struct smb_ops *dst, struct smb_ops *src)
46765 {
46766- memcpy(dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46767+ memcpy((void *)dst, src, sizeof(void *) * SMB_OPS_NUM_STATIC);
46768 }
46769
46770 /* < LANMAN2 */
46771diff -urNp linux-2.6.32.43/fs/smbfs/symlink.c linux-2.6.32.43/fs/smbfs/symlink.c
46772--- linux-2.6.32.43/fs/smbfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
46773+++ linux-2.6.32.43/fs/smbfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
46774@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent
46775
46776 static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
46777 {
46778- char *s = nd_get_link(nd);
46779+ const char *s = nd_get_link(nd);
46780 if (!IS_ERR(s))
46781 __putname(s);
46782 }
46783diff -urNp linux-2.6.32.43/fs/splice.c linux-2.6.32.43/fs/splice.c
46784--- linux-2.6.32.43/fs/splice.c 2011-03-27 14:31:47.000000000 -0400
46785+++ linux-2.6.32.43/fs/splice.c 2011-05-16 21:46:57.000000000 -0400
46786@@ -185,7 +185,7 @@ ssize_t splice_to_pipe(struct pipe_inode
46787 pipe_lock(pipe);
46788
46789 for (;;) {
46790- if (!pipe->readers) {
46791+ if (!atomic_read(&pipe->readers)) {
46792 send_sig(SIGPIPE, current, 0);
46793 if (!ret)
46794 ret = -EPIPE;
46795@@ -239,9 +239,9 @@ ssize_t splice_to_pipe(struct pipe_inode
46796 do_wakeup = 0;
46797 }
46798
46799- pipe->waiting_writers++;
46800+ atomic_inc(&pipe->waiting_writers);
46801 pipe_wait(pipe);
46802- pipe->waiting_writers--;
46803+ atomic_dec(&pipe->waiting_writers);
46804 }
46805
46806 pipe_unlock(pipe);
46807@@ -285,6 +285,8 @@ __generic_file_splice_read(struct file *
46808 .spd_release = spd_release_page,
46809 };
46810
46811+ pax_track_stack();
46812+
46813 index = *ppos >> PAGE_CACHE_SHIFT;
46814 loff = *ppos & ~PAGE_CACHE_MASK;
46815 req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46816@@ -521,7 +523,7 @@ static ssize_t kernel_readv(struct file
46817 old_fs = get_fs();
46818 set_fs(get_ds());
46819 /* The cast to a user pointer is valid due to the set_fs() */
46820- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
46821+ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos);
46822 set_fs(old_fs);
46823
46824 return res;
46825@@ -536,7 +538,7 @@ static ssize_t kernel_write(struct file
46826 old_fs = get_fs();
46827 set_fs(get_ds());
46828 /* The cast to a user pointer is valid due to the set_fs() */
46829- res = vfs_write(file, (const char __user *)buf, count, &pos);
46830+ res = vfs_write(file, (__force const char __user *)buf, count, &pos);
46831 set_fs(old_fs);
46832
46833 return res;
46834@@ -565,6 +567,8 @@ ssize_t default_file_splice_read(struct
46835 .spd_release = spd_release_page,
46836 };
46837
46838+ pax_track_stack();
46839+
46840 index = *ppos >> PAGE_CACHE_SHIFT;
46841 offset = *ppos & ~PAGE_CACHE_MASK;
46842 nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
46843@@ -578,7 +582,7 @@ ssize_t default_file_splice_read(struct
46844 goto err;
46845
46846 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
46847- vec[i].iov_base = (void __user *) page_address(page);
46848+ vec[i].iov_base = (__force void __user *) page_address(page);
46849 vec[i].iov_len = this_len;
46850 pages[i] = page;
46851 spd.nr_pages++;
46852@@ -800,10 +804,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
46853 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
46854 {
46855 while (!pipe->nrbufs) {
46856- if (!pipe->writers)
46857+ if (!atomic_read(&pipe->writers))
46858 return 0;
46859
46860- if (!pipe->waiting_writers && sd->num_spliced)
46861+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
46862 return 0;
46863
46864 if (sd->flags & SPLICE_F_NONBLOCK)
46865@@ -1140,7 +1144,7 @@ ssize_t splice_direct_to_actor(struct fi
46866 * out of the pipe right after the splice_to_pipe(). So set
46867 * PIPE_READERS appropriately.
46868 */
46869- pipe->readers = 1;
46870+ atomic_set(&pipe->readers, 1);
46871
46872 current->splice_pipe = pipe;
46873 }
46874@@ -1592,6 +1596,8 @@ static long vmsplice_to_pipe(struct file
46875 .spd_release = spd_release_page,
46876 };
46877
46878+ pax_track_stack();
46879+
46880 pipe = pipe_info(file->f_path.dentry->d_inode);
46881 if (!pipe)
46882 return -EBADF;
46883@@ -1700,9 +1706,9 @@ static int ipipe_prep(struct pipe_inode_
46884 ret = -ERESTARTSYS;
46885 break;
46886 }
46887- if (!pipe->writers)
46888+ if (!atomic_read(&pipe->writers))
46889 break;
46890- if (!pipe->waiting_writers) {
46891+ if (!atomic_read(&pipe->waiting_writers)) {
46892 if (flags & SPLICE_F_NONBLOCK) {
46893 ret = -EAGAIN;
46894 break;
46895@@ -1734,7 +1740,7 @@ static int opipe_prep(struct pipe_inode_
46896 pipe_lock(pipe);
46897
46898 while (pipe->nrbufs >= PIPE_BUFFERS) {
46899- if (!pipe->readers) {
46900+ if (!atomic_read(&pipe->readers)) {
46901 send_sig(SIGPIPE, current, 0);
46902 ret = -EPIPE;
46903 break;
46904@@ -1747,9 +1753,9 @@ static int opipe_prep(struct pipe_inode_
46905 ret = -ERESTARTSYS;
46906 break;
46907 }
46908- pipe->waiting_writers++;
46909+ atomic_inc(&pipe->waiting_writers);
46910 pipe_wait(pipe);
46911- pipe->waiting_writers--;
46912+ atomic_dec(&pipe->waiting_writers);
46913 }
46914
46915 pipe_unlock(pipe);
46916@@ -1785,14 +1791,14 @@ retry:
46917 pipe_double_lock(ipipe, opipe);
46918
46919 do {
46920- if (!opipe->readers) {
46921+ if (!atomic_read(&opipe->readers)) {
46922 send_sig(SIGPIPE, current, 0);
46923 if (!ret)
46924 ret = -EPIPE;
46925 break;
46926 }
46927
46928- if (!ipipe->nrbufs && !ipipe->writers)
46929+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
46930 break;
46931
46932 /*
46933@@ -1892,7 +1898,7 @@ static int link_pipe(struct pipe_inode_i
46934 pipe_double_lock(ipipe, opipe);
46935
46936 do {
46937- if (!opipe->readers) {
46938+ if (!atomic_read(&opipe->readers)) {
46939 send_sig(SIGPIPE, current, 0);
46940 if (!ret)
46941 ret = -EPIPE;
46942@@ -1937,7 +1943,7 @@ static int link_pipe(struct pipe_inode_i
46943 * return EAGAIN if we have the potential of some data in the
46944 * future, otherwise just return 0
46945 */
46946- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
46947+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
46948 ret = -EAGAIN;
46949
46950 pipe_unlock(ipipe);
46951diff -urNp linux-2.6.32.43/fs/sysfs/file.c linux-2.6.32.43/fs/sysfs/file.c
46952--- linux-2.6.32.43/fs/sysfs/file.c 2011-03-27 14:31:47.000000000 -0400
46953+++ linux-2.6.32.43/fs/sysfs/file.c 2011-05-04 17:56:20.000000000 -0400
46954@@ -44,7 +44,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent
46955
46956 struct sysfs_open_dirent {
46957 atomic_t refcnt;
46958- atomic_t event;
46959+ atomic_unchecked_t event;
46960 wait_queue_head_t poll;
46961 struct list_head buffers; /* goes through sysfs_buffer.list */
46962 };
46963@@ -53,7 +53,7 @@ struct sysfs_buffer {
46964 size_t count;
46965 loff_t pos;
46966 char * page;
46967- struct sysfs_ops * ops;
46968+ const struct sysfs_ops * ops;
46969 struct mutex mutex;
46970 int needs_read_fill;
46971 int event;
46972@@ -75,7 +75,7 @@ static int fill_read_buffer(struct dentr
46973 {
46974 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46975 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46976- struct sysfs_ops * ops = buffer->ops;
46977+ const struct sysfs_ops * ops = buffer->ops;
46978 int ret = 0;
46979 ssize_t count;
46980
46981@@ -88,7 +88,7 @@ static int fill_read_buffer(struct dentr
46982 if (!sysfs_get_active_two(attr_sd))
46983 return -ENODEV;
46984
46985- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
46986+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
46987 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
46988
46989 sysfs_put_active_two(attr_sd);
46990@@ -199,7 +199,7 @@ flush_write_buffer(struct dentry * dentr
46991 {
46992 struct sysfs_dirent *attr_sd = dentry->d_fsdata;
46993 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
46994- struct sysfs_ops * ops = buffer->ops;
46995+ const struct sysfs_ops * ops = buffer->ops;
46996 int rc;
46997
46998 /* need attr_sd for attr and ops, its parent for kobj */
46999@@ -294,7 +294,7 @@ static int sysfs_get_open_dirent(struct
47000 return -ENOMEM;
47001
47002 atomic_set(&new_od->refcnt, 0);
47003- atomic_set(&new_od->event, 1);
47004+ atomic_set_unchecked(&new_od->event, 1);
47005 init_waitqueue_head(&new_od->poll);
47006 INIT_LIST_HEAD(&new_od->buffers);
47007 goto retry;
47008@@ -335,7 +335,7 @@ static int sysfs_open_file(struct inode
47009 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
47010 struct kobject *kobj = attr_sd->s_parent->s_dir.kobj;
47011 struct sysfs_buffer *buffer;
47012- struct sysfs_ops *ops;
47013+ const struct sysfs_ops *ops;
47014 int error = -EACCES;
47015 char *p;
47016
47017@@ -444,7 +444,7 @@ static unsigned int sysfs_poll(struct fi
47018
47019 sysfs_put_active_two(attr_sd);
47020
47021- if (buffer->event != atomic_read(&od->event))
47022+ if (buffer->event != atomic_read_unchecked(&od->event))
47023 goto trigger;
47024
47025 return DEFAULT_POLLMASK;
47026@@ -463,7 +463,7 @@ void sysfs_notify_dirent(struct sysfs_di
47027
47028 od = sd->s_attr.open;
47029 if (od) {
47030- atomic_inc(&od->event);
47031+ atomic_inc_unchecked(&od->event);
47032 wake_up_interruptible(&od->poll);
47033 }
47034
47035diff -urNp linux-2.6.32.43/fs/sysfs/mount.c linux-2.6.32.43/fs/sysfs/mount.c
47036--- linux-2.6.32.43/fs/sysfs/mount.c 2011-03-27 14:31:47.000000000 -0400
47037+++ linux-2.6.32.43/fs/sysfs/mount.c 2011-04-17 15:56:46.000000000 -0400
47038@@ -36,7 +36,11 @@ struct sysfs_dirent sysfs_root = {
47039 .s_name = "",
47040 .s_count = ATOMIC_INIT(1),
47041 .s_flags = SYSFS_DIR,
47042+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
47043+ .s_mode = S_IFDIR | S_IRWXU,
47044+#else
47045 .s_mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
47046+#endif
47047 .s_ino = 1,
47048 };
47049
47050diff -urNp linux-2.6.32.43/fs/sysfs/symlink.c linux-2.6.32.43/fs/sysfs/symlink.c
47051--- linux-2.6.32.43/fs/sysfs/symlink.c 2011-03-27 14:31:47.000000000 -0400
47052+++ linux-2.6.32.43/fs/sysfs/symlink.c 2011-04-17 15:56:46.000000000 -0400
47053@@ -204,7 +204,7 @@ static void *sysfs_follow_link(struct de
47054
47055 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
47056 {
47057- char *page = nd_get_link(nd);
47058+ const char *page = nd_get_link(nd);
47059 if (!IS_ERR(page))
47060 free_page((unsigned long)page);
47061 }
47062diff -urNp linux-2.6.32.43/fs/udf/balloc.c linux-2.6.32.43/fs/udf/balloc.c
47063--- linux-2.6.32.43/fs/udf/balloc.c 2011-03-27 14:31:47.000000000 -0400
47064+++ linux-2.6.32.43/fs/udf/balloc.c 2011-04-17 15:56:46.000000000 -0400
47065@@ -172,9 +172,7 @@ static void udf_bitmap_free_blocks(struc
47066
47067 mutex_lock(&sbi->s_alloc_mutex);
47068 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
47069- if (bloc->logicalBlockNum < 0 ||
47070- (bloc->logicalBlockNum + count) >
47071- partmap->s_partition_len) {
47072+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
47073 udf_debug("%d < %d || %d + %d > %d\n",
47074 bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
47075 count, partmap->s_partition_len);
47076@@ -436,9 +434,7 @@ static void udf_table_free_blocks(struct
47077
47078 mutex_lock(&sbi->s_alloc_mutex);
47079 partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
47080- if (bloc->logicalBlockNum < 0 ||
47081- (bloc->logicalBlockNum + count) >
47082- partmap->s_partition_len) {
47083+ if ((bloc->logicalBlockNum + count) > partmap->s_partition_len) {
47084 udf_debug("%d < %d || %d + %d > %d\n",
47085 bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
47086 partmap->s_partition_len);
47087diff -urNp linux-2.6.32.43/fs/udf/inode.c linux-2.6.32.43/fs/udf/inode.c
47088--- linux-2.6.32.43/fs/udf/inode.c 2011-03-27 14:31:47.000000000 -0400
47089+++ linux-2.6.32.43/fs/udf/inode.c 2011-05-16 21:46:57.000000000 -0400
47090@@ -484,6 +484,8 @@ static struct buffer_head *inode_getblk(
47091 int goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
47092 int lastblock = 0;
47093
47094+ pax_track_stack();
47095+
47096 prev_epos.offset = udf_file_entry_alloc_offset(inode);
47097 prev_epos.block = iinfo->i_location;
47098 prev_epos.bh = NULL;
47099diff -urNp linux-2.6.32.43/fs/udf/misc.c linux-2.6.32.43/fs/udf/misc.c
47100--- linux-2.6.32.43/fs/udf/misc.c 2011-03-27 14:31:47.000000000 -0400
47101+++ linux-2.6.32.43/fs/udf/misc.c 2011-04-23 12:56:11.000000000 -0400
47102@@ -286,7 +286,7 @@ void udf_new_tag(char *data, uint16_t id
47103
47104 u8 udf_tag_checksum(const struct tag *t)
47105 {
47106- u8 *data = (u8 *)t;
47107+ const u8 *data = (const u8 *)t;
47108 u8 checksum = 0;
47109 int i;
47110 for (i = 0; i < sizeof(struct tag); ++i)
47111diff -urNp linux-2.6.32.43/fs/utimes.c linux-2.6.32.43/fs/utimes.c
47112--- linux-2.6.32.43/fs/utimes.c 2011-03-27 14:31:47.000000000 -0400
47113+++ linux-2.6.32.43/fs/utimes.c 2011-04-17 15:56:46.000000000 -0400
47114@@ -1,6 +1,7 @@
47115 #include <linux/compiler.h>
47116 #include <linux/file.h>
47117 #include <linux/fs.h>
47118+#include <linux/security.h>
47119 #include <linux/linkage.h>
47120 #include <linux/mount.h>
47121 #include <linux/namei.h>
47122@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa
47123 goto mnt_drop_write_and_out;
47124 }
47125 }
47126+
47127+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
47128+ error = -EACCES;
47129+ goto mnt_drop_write_and_out;
47130+ }
47131+
47132 mutex_lock(&inode->i_mutex);
47133 error = notify_change(path->dentry, &newattrs);
47134 mutex_unlock(&inode->i_mutex);
47135diff -urNp linux-2.6.32.43/fs/xattr_acl.c linux-2.6.32.43/fs/xattr_acl.c
47136--- linux-2.6.32.43/fs/xattr_acl.c 2011-03-27 14:31:47.000000000 -0400
47137+++ linux-2.6.32.43/fs/xattr_acl.c 2011-04-17 15:56:46.000000000 -0400
47138@@ -17,8 +17,8 @@
47139 struct posix_acl *
47140 posix_acl_from_xattr(const void *value, size_t size)
47141 {
47142- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
47143- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
47144+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
47145+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
47146 int count;
47147 struct posix_acl *acl;
47148 struct posix_acl_entry *acl_e;
47149diff -urNp linux-2.6.32.43/fs/xattr.c linux-2.6.32.43/fs/xattr.c
47150--- linux-2.6.32.43/fs/xattr.c 2011-03-27 14:31:47.000000000 -0400
47151+++ linux-2.6.32.43/fs/xattr.c 2011-04-17 15:56:46.000000000 -0400
47152@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
47153 * Extended attribute SET operations
47154 */
47155 static long
47156-setxattr(struct dentry *d, const char __user *name, const void __user *value,
47157+setxattr(struct path *path, const char __user *name, const void __user *value,
47158 size_t size, int flags)
47159 {
47160 int error;
47161@@ -271,7 +271,13 @@ setxattr(struct dentry *d, const char __
47162 return PTR_ERR(kvalue);
47163 }
47164
47165- error = vfs_setxattr(d, kname, kvalue, size, flags);
47166+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
47167+ error = -EACCES;
47168+ goto out;
47169+ }
47170+
47171+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
47172+out:
47173 kfree(kvalue);
47174 return error;
47175 }
47176@@ -288,7 +294,7 @@ SYSCALL_DEFINE5(setxattr, const char __u
47177 return error;
47178 error = mnt_want_write(path.mnt);
47179 if (!error) {
47180- error = setxattr(path.dentry, name, value, size, flags);
47181+ error = setxattr(&path, name, value, size, flags);
47182 mnt_drop_write(path.mnt);
47183 }
47184 path_put(&path);
47185@@ -307,7 +313,7 @@ SYSCALL_DEFINE5(lsetxattr, const char __
47186 return error;
47187 error = mnt_want_write(path.mnt);
47188 if (!error) {
47189- error = setxattr(path.dentry, name, value, size, flags);
47190+ error = setxattr(&path, name, value, size, flags);
47191 mnt_drop_write(path.mnt);
47192 }
47193 path_put(&path);
47194@@ -318,17 +324,15 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, cons
47195 const void __user *,value, size_t, size, int, flags)
47196 {
47197 struct file *f;
47198- struct dentry *dentry;
47199 int error = -EBADF;
47200
47201 f = fget(fd);
47202 if (!f)
47203 return error;
47204- dentry = f->f_path.dentry;
47205- audit_inode(NULL, dentry);
47206+ audit_inode(NULL, f->f_path.dentry);
47207 error = mnt_want_write_file(f);
47208 if (!error) {
47209- error = setxattr(dentry, name, value, size, flags);
47210+ error = setxattr(&f->f_path, name, value, size, flags);
47211 mnt_drop_write(f->f_path.mnt);
47212 }
47213 fput(f);
47214diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c
47215--- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-03-27 14:31:47.000000000 -0400
47216+++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl32.c 2011-04-17 15:56:46.000000000 -0400
47217@@ -75,6 +75,7 @@ xfs_compat_ioc_fsgeometry_v1(
47218 xfs_fsop_geom_t fsgeo;
47219 int error;
47220
47221+ memset(&fsgeo, 0, sizeof(fsgeo));
47222 error = xfs_fs_geometry(mp, &fsgeo, 3);
47223 if (error)
47224 return -error;
47225diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c
47226--- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 17:00:52.000000000 -0400
47227+++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_ioctl.c 2011-04-17 20:07:09.000000000 -0400
47228@@ -134,7 +134,7 @@ xfs_find_handle(
47229 }
47230
47231 error = -EFAULT;
47232- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
47233+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
47234 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
47235 goto out_put;
47236
47237@@ -423,7 +423,7 @@ xfs_attrlist_by_handle(
47238 if (IS_ERR(dentry))
47239 return PTR_ERR(dentry);
47240
47241- kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
47242+ kbuf = kzalloc(al_hreq.buflen, GFP_KERNEL);
47243 if (!kbuf)
47244 goto out_dput;
47245
47246@@ -697,7 +697,7 @@ xfs_ioc_fsgeometry_v1(
47247 xfs_mount_t *mp,
47248 void __user *arg)
47249 {
47250- xfs_fsop_geom_t fsgeo;
47251+ xfs_fsop_geom_t fsgeo;
47252 int error;
47253
47254 error = xfs_fs_geometry(mp, &fsgeo, 3);
47255diff -urNp linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c
47256--- linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c 2011-03-27 14:31:47.000000000 -0400
47257+++ linux-2.6.32.43/fs/xfs/linux-2.6/xfs_iops.c 2011-04-17 15:56:46.000000000 -0400
47258@@ -468,7 +468,7 @@ xfs_vn_put_link(
47259 struct nameidata *nd,
47260 void *p)
47261 {
47262- char *s = nd_get_link(nd);
47263+ const char *s = nd_get_link(nd);
47264
47265 if (!IS_ERR(s))
47266 kfree(s);
47267diff -urNp linux-2.6.32.43/fs/xfs/xfs_bmap.c linux-2.6.32.43/fs/xfs/xfs_bmap.c
47268--- linux-2.6.32.43/fs/xfs/xfs_bmap.c 2011-03-27 14:31:47.000000000 -0400
47269+++ linux-2.6.32.43/fs/xfs/xfs_bmap.c 2011-04-17 15:56:46.000000000 -0400
47270@@ -360,7 +360,7 @@ xfs_bmap_validate_ret(
47271 int nmap,
47272 int ret_nmap);
47273 #else
47274-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
47275+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0)
47276 #endif /* DEBUG */
47277
47278 #if defined(XFS_RW_TRACE)
47279diff -urNp linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c
47280--- linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c 2011-03-27 14:31:47.000000000 -0400
47281+++ linux-2.6.32.43/fs/xfs/xfs_dir2_sf.c 2011-04-18 22:07:30.000000000 -0400
47282@@ -779,7 +779,15 @@ xfs_dir2_sf_getdents(
47283 }
47284
47285 ino = xfs_dir2_sf_get_inumber(sfp, xfs_dir2_sf_inumberp(sfep));
47286- if (filldir(dirent, sfep->name, sfep->namelen,
47287+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
47288+ char name[sfep->namelen];
47289+ memcpy(name, sfep->name, sfep->namelen);
47290+ if (filldir(dirent, name, sfep->namelen,
47291+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
47292+ *offset = off & 0x7fffffff;
47293+ return 0;
47294+ }
47295+ } else if (filldir(dirent, sfep->name, sfep->namelen,
47296 off & 0x7fffffff, ino, DT_UNKNOWN)) {
47297 *offset = off & 0x7fffffff;
47298 return 0;
47299diff -urNp linux-2.6.32.43/grsecurity/gracl_alloc.c linux-2.6.32.43/grsecurity/gracl_alloc.c
47300--- linux-2.6.32.43/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
47301+++ linux-2.6.32.43/grsecurity/gracl_alloc.c 2011-04-17 15:56:46.000000000 -0400
47302@@ -0,0 +1,105 @@
47303+#include <linux/kernel.h>
47304+#include <linux/mm.h>
47305+#include <linux/slab.h>
47306+#include <linux/vmalloc.h>
47307+#include <linux/gracl.h>
47308+#include <linux/grsecurity.h>
47309+
47310+static unsigned long alloc_stack_next = 1;
47311+static unsigned long alloc_stack_size = 1;
47312+static void **alloc_stack;
47313+
47314+static __inline__ int
47315+alloc_pop(void)
47316+{
47317+ if (alloc_stack_next == 1)
47318+ return 0;
47319+
47320+ kfree(alloc_stack[alloc_stack_next - 2]);
47321+
47322+ alloc_stack_next--;
47323+
47324+ return 1;
47325+}
47326+
47327+static __inline__ int
47328+alloc_push(void *buf)
47329+{
47330+ if (alloc_stack_next >= alloc_stack_size)
47331+ return 1;
47332+
47333+ alloc_stack[alloc_stack_next - 1] = buf;
47334+
47335+ alloc_stack_next++;
47336+
47337+ return 0;
47338+}
47339+
47340+void *
47341+acl_alloc(unsigned long len)
47342+{
47343+ void *ret = NULL;
47344+
47345+ if (!len || len > PAGE_SIZE)
47346+ goto out;
47347+
47348+ ret = kmalloc(len, GFP_KERNEL);
47349+
47350+ if (ret) {
47351+ if (alloc_push(ret)) {
47352+ kfree(ret);
47353+ ret = NULL;
47354+ }
47355+ }
47356+
47357+out:
47358+ return ret;
47359+}
47360+
47361+void *
47362+acl_alloc_num(unsigned long num, unsigned long len)
47363+{
47364+ if (!len || (num > (PAGE_SIZE / len)))
47365+ return NULL;
47366+
47367+ return acl_alloc(num * len);
47368+}
47369+
47370+void
47371+acl_free_all(void)
47372+{
47373+ if (gr_acl_is_enabled() || !alloc_stack)
47374+ return;
47375+
47376+ while (alloc_pop()) ;
47377+
47378+ if (alloc_stack) {
47379+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
47380+ kfree(alloc_stack);
47381+ else
47382+ vfree(alloc_stack);
47383+ }
47384+
47385+ alloc_stack = NULL;
47386+ alloc_stack_size = 1;
47387+ alloc_stack_next = 1;
47388+
47389+ return;
47390+}
47391+
47392+int
47393+acl_alloc_stack_init(unsigned long size)
47394+{
47395+ if ((size * sizeof (void *)) <= PAGE_SIZE)
47396+ alloc_stack =
47397+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
47398+ else
47399+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
47400+
47401+ alloc_stack_size = size;
47402+
47403+ if (!alloc_stack)
47404+ return 0;
47405+ else
47406+ return 1;
47407+}
47408diff -urNp linux-2.6.32.43/grsecurity/gracl.c linux-2.6.32.43/grsecurity/gracl.c
47409--- linux-2.6.32.43/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
47410+++ linux-2.6.32.43/grsecurity/gracl.c 2011-07-14 20:02:48.000000000 -0400
47411@@ -0,0 +1,4082 @@
47412+#include <linux/kernel.h>
47413+#include <linux/module.h>
47414+#include <linux/sched.h>
47415+#include <linux/mm.h>
47416+#include <linux/file.h>
47417+#include <linux/fs.h>
47418+#include <linux/namei.h>
47419+#include <linux/mount.h>
47420+#include <linux/tty.h>
47421+#include <linux/proc_fs.h>
47422+#include <linux/smp_lock.h>
47423+#include <linux/slab.h>
47424+#include <linux/vmalloc.h>
47425+#include <linux/types.h>
47426+#include <linux/sysctl.h>
47427+#include <linux/netdevice.h>
47428+#include <linux/ptrace.h>
47429+#include <linux/gracl.h>
47430+#include <linux/gralloc.h>
47431+#include <linux/grsecurity.h>
47432+#include <linux/grinternal.h>
47433+#include <linux/pid_namespace.h>
47434+#include <linux/fdtable.h>
47435+#include <linux/percpu.h>
47436+
47437+#include <asm/uaccess.h>
47438+#include <asm/errno.h>
47439+#include <asm/mman.h>
47440+
47441+static struct acl_role_db acl_role_set;
47442+static struct name_db name_set;
47443+static struct inodev_db inodev_set;
47444+
47445+/* for keeping track of userspace pointers used for subjects, so we
47446+ can share references in the kernel as well
47447+*/
47448+
47449+static struct dentry *real_root;
47450+static struct vfsmount *real_root_mnt;
47451+
47452+static struct acl_subj_map_db subj_map_set;
47453+
47454+static struct acl_role_label *default_role;
47455+
47456+static struct acl_role_label *role_list;
47457+
47458+static u16 acl_sp_role_value;
47459+
47460+extern char *gr_shared_page[4];
47461+static DEFINE_MUTEX(gr_dev_mutex);
47462+DEFINE_RWLOCK(gr_inode_lock);
47463+
47464+struct gr_arg *gr_usermode;
47465+
47466+static unsigned int gr_status __read_only = GR_STATUS_INIT;
47467+
47468+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
47469+extern void gr_clear_learn_entries(void);
47470+
47471+#ifdef CONFIG_GRKERNSEC_RESLOG
47472+extern void gr_log_resource(const struct task_struct *task,
47473+ const int res, const unsigned long wanted, const int gt);
47474+#endif
47475+
47476+unsigned char *gr_system_salt;
47477+unsigned char *gr_system_sum;
47478+
47479+static struct sprole_pw **acl_special_roles = NULL;
47480+static __u16 num_sprole_pws = 0;
47481+
47482+static struct acl_role_label *kernel_role = NULL;
47483+
47484+static unsigned int gr_auth_attempts = 0;
47485+static unsigned long gr_auth_expires = 0UL;
47486+
47487+#ifdef CONFIG_NET
47488+extern struct vfsmount *sock_mnt;
47489+#endif
47490+extern struct vfsmount *pipe_mnt;
47491+extern struct vfsmount *shm_mnt;
47492+#ifdef CONFIG_HUGETLBFS
47493+extern struct vfsmount *hugetlbfs_vfsmount;
47494+#endif
47495+
47496+static struct acl_object_label *fakefs_obj_rw;
47497+static struct acl_object_label *fakefs_obj_rwx;
47498+
47499+extern int gr_init_uidset(void);
47500+extern void gr_free_uidset(void);
47501+extern void gr_remove_uid(uid_t uid);
47502+extern int gr_find_uid(uid_t uid);
47503+
47504+__inline__ int
47505+gr_acl_is_enabled(void)
47506+{
47507+ return (gr_status & GR_READY);
47508+}
47509+
47510+#ifdef CONFIG_BTRFS_FS
47511+extern dev_t get_btrfs_dev_from_inode(struct inode *inode);
47512+extern int btrfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat);
47513+#endif
47514+
47515+static inline dev_t __get_dev(const struct dentry *dentry)
47516+{
47517+#ifdef CONFIG_BTRFS_FS
47518+ if (dentry->d_inode->i_op && dentry->d_inode->i_op->getattr == &btrfs_getattr)
47519+ return get_btrfs_dev_from_inode(dentry->d_inode);
47520+ else
47521+#endif
47522+ return dentry->d_inode->i_sb->s_dev;
47523+}
47524+
47525+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
47526+{
47527+ return __get_dev(dentry);
47528+}
47529+
47530+static char gr_task_roletype_to_char(struct task_struct *task)
47531+{
47532+ switch (task->role->roletype &
47533+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
47534+ GR_ROLE_SPECIAL)) {
47535+ case GR_ROLE_DEFAULT:
47536+ return 'D';
47537+ case GR_ROLE_USER:
47538+ return 'U';
47539+ case GR_ROLE_GROUP:
47540+ return 'G';
47541+ case GR_ROLE_SPECIAL:
47542+ return 'S';
47543+ }
47544+
47545+ return 'X';
47546+}
47547+
47548+char gr_roletype_to_char(void)
47549+{
47550+ return gr_task_roletype_to_char(current);
47551+}
47552+
47553+__inline__ int
47554+gr_acl_tpe_check(void)
47555+{
47556+ if (unlikely(!(gr_status & GR_READY)))
47557+ return 0;
47558+ if (current->role->roletype & GR_ROLE_TPE)
47559+ return 1;
47560+ else
47561+ return 0;
47562+}
47563+
47564+int
47565+gr_handle_rawio(const struct inode *inode)
47566+{
47567+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
47568+ if (inode && S_ISBLK(inode->i_mode) &&
47569+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
47570+ !capable(CAP_SYS_RAWIO))
47571+ return 1;
47572+#endif
47573+ return 0;
47574+}
47575+
47576+static int
47577+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
47578+{
47579+ if (likely(lena != lenb))
47580+ return 0;
47581+
47582+ return !memcmp(a, b, lena);
47583+}
47584+
47585+/* this must be called with vfsmount_lock and dcache_lock held */
47586+
47587+static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47588+ struct dentry *root, struct vfsmount *rootmnt,
47589+ char *buffer, int buflen)
47590+{
47591+ char * end = buffer+buflen;
47592+ char * retval;
47593+ int namelen;
47594+
47595+ *--end = '\0';
47596+ buflen--;
47597+
47598+ if (buflen < 1)
47599+ goto Elong;
47600+ /* Get '/' right */
47601+ retval = end-1;
47602+ *retval = '/';
47603+
47604+ for (;;) {
47605+ struct dentry * parent;
47606+
47607+ if (dentry == root && vfsmnt == rootmnt)
47608+ break;
47609+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
47610+ /* Global root? */
47611+ if (vfsmnt->mnt_parent == vfsmnt)
47612+ goto global_root;
47613+ dentry = vfsmnt->mnt_mountpoint;
47614+ vfsmnt = vfsmnt->mnt_parent;
47615+ continue;
47616+ }
47617+ parent = dentry->d_parent;
47618+ prefetch(parent);
47619+ namelen = dentry->d_name.len;
47620+ buflen -= namelen + 1;
47621+ if (buflen < 0)
47622+ goto Elong;
47623+ end -= namelen;
47624+ memcpy(end, dentry->d_name.name, namelen);
47625+ *--end = '/';
47626+ retval = end;
47627+ dentry = parent;
47628+ }
47629+
47630+out:
47631+ return retval;
47632+
47633+global_root:
47634+ namelen = dentry->d_name.len;
47635+ buflen -= namelen;
47636+ if (buflen < 0)
47637+ goto Elong;
47638+ retval -= namelen-1; /* hit the slash */
47639+ memcpy(retval, dentry->d_name.name, namelen);
47640+ goto out;
47641+Elong:
47642+ retval = ERR_PTR(-ENAMETOOLONG);
47643+ goto out;
47644+}
47645+
47646+static char *
47647+gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt,
47648+ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen)
47649+{
47650+ char *retval;
47651+
47652+ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen);
47653+ if (unlikely(IS_ERR(retval)))
47654+ retval = strcpy(buf, "<path too long>");
47655+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
47656+ retval[1] = '\0';
47657+
47658+ return retval;
47659+}
47660+
47661+static char *
47662+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47663+ char *buf, int buflen)
47664+{
47665+ char *res;
47666+
47667+ /* we can use real_root, real_root_mnt, because this is only called
47668+ by the RBAC system */
47669+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen);
47670+
47671+ return res;
47672+}
47673+
47674+static char *
47675+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
47676+ char *buf, int buflen)
47677+{
47678+ char *res;
47679+ struct dentry *root;
47680+ struct vfsmount *rootmnt;
47681+ struct task_struct *reaper = &init_task;
47682+
47683+ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
47684+ read_lock(&reaper->fs->lock);
47685+ root = dget(reaper->fs->root.dentry);
47686+ rootmnt = mntget(reaper->fs->root.mnt);
47687+ read_unlock(&reaper->fs->lock);
47688+
47689+ spin_lock(&dcache_lock);
47690+ spin_lock(&vfsmount_lock);
47691+ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
47692+ spin_unlock(&vfsmount_lock);
47693+ spin_unlock(&dcache_lock);
47694+
47695+ dput(root);
47696+ mntput(rootmnt);
47697+ return res;
47698+}
47699+
47700+static char *
47701+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
47702+{
47703+ char *ret;
47704+ spin_lock(&dcache_lock);
47705+ spin_lock(&vfsmount_lock);
47706+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47707+ PAGE_SIZE);
47708+ spin_unlock(&vfsmount_lock);
47709+ spin_unlock(&dcache_lock);
47710+ return ret;
47711+}
47712+
47713+char *
47714+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
47715+{
47716+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
47717+ PAGE_SIZE);
47718+}
47719+
47720+char *
47721+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
47722+{
47723+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
47724+ PAGE_SIZE);
47725+}
47726+
47727+char *
47728+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
47729+{
47730+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
47731+ PAGE_SIZE);
47732+}
47733+
47734+char *
47735+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
47736+{
47737+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
47738+ PAGE_SIZE);
47739+}
47740+
47741+char *
47742+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
47743+{
47744+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
47745+ PAGE_SIZE);
47746+}
47747+
47748+__inline__ __u32
47749+to_gr_audit(const __u32 reqmode)
47750+{
47751+ /* masks off auditable permission flags, then shifts them to create
47752+ auditing flags, and adds the special case of append auditing if
47753+ we're requesting write */
47754+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
47755+}
47756+
47757+struct acl_subject_label *
47758+lookup_subject_map(const struct acl_subject_label *userp)
47759+{
47760+ unsigned int index = shash(userp, subj_map_set.s_size);
47761+ struct subject_map *match;
47762+
47763+ match = subj_map_set.s_hash[index];
47764+
47765+ while (match && match->user != userp)
47766+ match = match->next;
47767+
47768+ if (match != NULL)
47769+ return match->kernel;
47770+ else
47771+ return NULL;
47772+}
47773+
47774+static void
47775+insert_subj_map_entry(struct subject_map *subjmap)
47776+{
47777+ unsigned int index = shash(subjmap->user, subj_map_set.s_size);
47778+ struct subject_map **curr;
47779+
47780+ subjmap->prev = NULL;
47781+
47782+ curr = &subj_map_set.s_hash[index];
47783+ if (*curr != NULL)
47784+ (*curr)->prev = subjmap;
47785+
47786+ subjmap->next = *curr;
47787+ *curr = subjmap;
47788+
47789+ return;
47790+}
47791+
47792+static struct acl_role_label *
47793+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
47794+ const gid_t gid)
47795+{
47796+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
47797+ struct acl_role_label *match;
47798+ struct role_allowed_ip *ipp;
47799+ unsigned int x;
47800+ u32 curr_ip = task->signal->curr_ip;
47801+
47802+ task->signal->saved_ip = curr_ip;
47803+
47804+ match = acl_role_set.r_hash[index];
47805+
47806+ while (match) {
47807+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
47808+ for (x = 0; x < match->domain_child_num; x++) {
47809+ if (match->domain_children[x] == uid)
47810+ goto found;
47811+ }
47812+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
47813+ break;
47814+ match = match->next;
47815+ }
47816+found:
47817+ if (match == NULL) {
47818+ try_group:
47819+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
47820+ match = acl_role_set.r_hash[index];
47821+
47822+ while (match) {
47823+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
47824+ for (x = 0; x < match->domain_child_num; x++) {
47825+ if (match->domain_children[x] == gid)
47826+ goto found2;
47827+ }
47828+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
47829+ break;
47830+ match = match->next;
47831+ }
47832+found2:
47833+ if (match == NULL)
47834+ match = default_role;
47835+ if (match->allowed_ips == NULL)
47836+ return match;
47837+ else {
47838+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47839+ if (likely
47840+ ((ntohl(curr_ip) & ipp->netmask) ==
47841+ (ntohl(ipp->addr) & ipp->netmask)))
47842+ return match;
47843+ }
47844+ match = default_role;
47845+ }
47846+ } else if (match->allowed_ips == NULL) {
47847+ return match;
47848+ } else {
47849+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
47850+ if (likely
47851+ ((ntohl(curr_ip) & ipp->netmask) ==
47852+ (ntohl(ipp->addr) & ipp->netmask)))
47853+ return match;
47854+ }
47855+ goto try_group;
47856+ }
47857+
47858+ return match;
47859+}
47860+
47861+struct acl_subject_label *
47862+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
47863+ const struct acl_role_label *role)
47864+{
47865+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47866+ struct acl_subject_label *match;
47867+
47868+ match = role->subj_hash[index];
47869+
47870+ while (match && (match->inode != ino || match->device != dev ||
47871+ (match->mode & GR_DELETED))) {
47872+ match = match->next;
47873+ }
47874+
47875+ if (match && !(match->mode & GR_DELETED))
47876+ return match;
47877+ else
47878+ return NULL;
47879+}
47880+
47881+struct acl_subject_label *
47882+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
47883+ const struct acl_role_label *role)
47884+{
47885+ unsigned int index = fhash(ino, dev, role->subj_hash_size);
47886+ struct acl_subject_label *match;
47887+
47888+ match = role->subj_hash[index];
47889+
47890+ while (match && (match->inode != ino || match->device != dev ||
47891+ !(match->mode & GR_DELETED))) {
47892+ match = match->next;
47893+ }
47894+
47895+ if (match && (match->mode & GR_DELETED))
47896+ return match;
47897+ else
47898+ return NULL;
47899+}
47900+
47901+static struct acl_object_label *
47902+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
47903+ const struct acl_subject_label *subj)
47904+{
47905+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47906+ struct acl_object_label *match;
47907+
47908+ match = subj->obj_hash[index];
47909+
47910+ while (match && (match->inode != ino || match->device != dev ||
47911+ (match->mode & GR_DELETED))) {
47912+ match = match->next;
47913+ }
47914+
47915+ if (match && !(match->mode & GR_DELETED))
47916+ return match;
47917+ else
47918+ return NULL;
47919+}
47920+
47921+static struct acl_object_label *
47922+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
47923+ const struct acl_subject_label *subj)
47924+{
47925+ unsigned int index = fhash(ino, dev, subj->obj_hash_size);
47926+ struct acl_object_label *match;
47927+
47928+ match = subj->obj_hash[index];
47929+
47930+ while (match && (match->inode != ino || match->device != dev ||
47931+ !(match->mode & GR_DELETED))) {
47932+ match = match->next;
47933+ }
47934+
47935+ if (match && (match->mode & GR_DELETED))
47936+ return match;
47937+
47938+ match = subj->obj_hash[index];
47939+
47940+ while (match && (match->inode != ino || match->device != dev ||
47941+ (match->mode & GR_DELETED))) {
47942+ match = match->next;
47943+ }
47944+
47945+ if (match && !(match->mode & GR_DELETED))
47946+ return match;
47947+ else
47948+ return NULL;
47949+}
47950+
47951+static struct name_entry *
47952+lookup_name_entry(const char *name)
47953+{
47954+ unsigned int len = strlen(name);
47955+ unsigned int key = full_name_hash(name, len);
47956+ unsigned int index = key % name_set.n_size;
47957+ struct name_entry *match;
47958+
47959+ match = name_set.n_hash[index];
47960+
47961+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
47962+ match = match->next;
47963+
47964+ return match;
47965+}
47966+
47967+static struct name_entry *
47968+lookup_name_entry_create(const char *name)
47969+{
47970+ unsigned int len = strlen(name);
47971+ unsigned int key = full_name_hash(name, len);
47972+ unsigned int index = key % name_set.n_size;
47973+ struct name_entry *match;
47974+
47975+ match = name_set.n_hash[index];
47976+
47977+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47978+ !match->deleted))
47979+ match = match->next;
47980+
47981+ if (match && match->deleted)
47982+ return match;
47983+
47984+ match = name_set.n_hash[index];
47985+
47986+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
47987+ match->deleted))
47988+ match = match->next;
47989+
47990+ if (match && !match->deleted)
47991+ return match;
47992+ else
47993+ return NULL;
47994+}
47995+
47996+static struct inodev_entry *
47997+lookup_inodev_entry(const ino_t ino, const dev_t dev)
47998+{
47999+ unsigned int index = fhash(ino, dev, inodev_set.i_size);
48000+ struct inodev_entry *match;
48001+
48002+ match = inodev_set.i_hash[index];
48003+
48004+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
48005+ match = match->next;
48006+
48007+ return match;
48008+}
48009+
48010+static void
48011+insert_inodev_entry(struct inodev_entry *entry)
48012+{
48013+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device,
48014+ inodev_set.i_size);
48015+ struct inodev_entry **curr;
48016+
48017+ entry->prev = NULL;
48018+
48019+ curr = &inodev_set.i_hash[index];
48020+ if (*curr != NULL)
48021+ (*curr)->prev = entry;
48022+
48023+ entry->next = *curr;
48024+ *curr = entry;
48025+
48026+ return;
48027+}
48028+
48029+static void
48030+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
48031+{
48032+ unsigned int index =
48033+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
48034+ struct acl_role_label **curr;
48035+ struct acl_role_label *tmp;
48036+
48037+ curr = &acl_role_set.r_hash[index];
48038+
48039+ /* if role was already inserted due to domains and already has
48040+ a role in the same bucket as it attached, then we need to
48041+ combine these two buckets
48042+ */
48043+ if (role->next) {
48044+ tmp = role->next;
48045+ while (tmp->next)
48046+ tmp = tmp->next;
48047+ tmp->next = *curr;
48048+ } else
48049+ role->next = *curr;
48050+ *curr = role;
48051+
48052+ return;
48053+}
48054+
48055+static void
48056+insert_acl_role_label(struct acl_role_label *role)
48057+{
48058+ int i;
48059+
48060+ if (role_list == NULL) {
48061+ role_list = role;
48062+ role->prev = NULL;
48063+ } else {
48064+ role->prev = role_list;
48065+ role_list = role;
48066+ }
48067+
48068+ /* used for hash chains */
48069+ role->next = NULL;
48070+
48071+ if (role->roletype & GR_ROLE_DOMAIN) {
48072+ for (i = 0; i < role->domain_child_num; i++)
48073+ __insert_acl_role_label(role, role->domain_children[i]);
48074+ } else
48075+ __insert_acl_role_label(role, role->uidgid);
48076+}
48077+
48078+static int
48079+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
48080+{
48081+ struct name_entry **curr, *nentry;
48082+ struct inodev_entry *ientry;
48083+ unsigned int len = strlen(name);
48084+ unsigned int key = full_name_hash(name, len);
48085+ unsigned int index = key % name_set.n_size;
48086+
48087+ curr = &name_set.n_hash[index];
48088+
48089+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
48090+ curr = &((*curr)->next);
48091+
48092+ if (*curr != NULL)
48093+ return 1;
48094+
48095+ nentry = acl_alloc(sizeof (struct name_entry));
48096+ if (nentry == NULL)
48097+ return 0;
48098+ ientry = acl_alloc(sizeof (struct inodev_entry));
48099+ if (ientry == NULL)
48100+ return 0;
48101+ ientry->nentry = nentry;
48102+
48103+ nentry->key = key;
48104+ nentry->name = name;
48105+ nentry->inode = inode;
48106+ nentry->device = device;
48107+ nentry->len = len;
48108+ nentry->deleted = deleted;
48109+
48110+ nentry->prev = NULL;
48111+ curr = &name_set.n_hash[index];
48112+ if (*curr != NULL)
48113+ (*curr)->prev = nentry;
48114+ nentry->next = *curr;
48115+ *curr = nentry;
48116+
48117+ /* insert us into the table searchable by inode/dev */
48118+ insert_inodev_entry(ientry);
48119+
48120+ return 1;
48121+}
48122+
48123+static void
48124+insert_acl_obj_label(struct acl_object_label *obj,
48125+ struct acl_subject_label *subj)
48126+{
48127+ unsigned int index =
48128+ fhash(obj->inode, obj->device, subj->obj_hash_size);
48129+ struct acl_object_label **curr;
48130+
48131+
48132+ obj->prev = NULL;
48133+
48134+ curr = &subj->obj_hash[index];
48135+ if (*curr != NULL)
48136+ (*curr)->prev = obj;
48137+
48138+ obj->next = *curr;
48139+ *curr = obj;
48140+
48141+ return;
48142+}
48143+
48144+static void
48145+insert_acl_subj_label(struct acl_subject_label *obj,
48146+ struct acl_role_label *role)
48147+{
48148+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size);
48149+ struct acl_subject_label **curr;
48150+
48151+ obj->prev = NULL;
48152+
48153+ curr = &role->subj_hash[index];
48154+ if (*curr != NULL)
48155+ (*curr)->prev = obj;
48156+
48157+ obj->next = *curr;
48158+ *curr = obj;
48159+
48160+ return;
48161+}
48162+
48163+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
48164+
48165+static void *
48166+create_table(__u32 * len, int elementsize)
48167+{
48168+ unsigned int table_sizes[] = {
48169+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
48170+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
48171+ 4194301, 8388593, 16777213, 33554393, 67108859
48172+ };
48173+ void *newtable = NULL;
48174+ unsigned int pwr = 0;
48175+
48176+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
48177+ table_sizes[pwr] <= *len)
48178+ pwr++;
48179+
48180+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
48181+ return newtable;
48182+
48183+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
48184+ newtable =
48185+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
48186+ else
48187+ newtable = vmalloc(table_sizes[pwr] * elementsize);
48188+
48189+ *len = table_sizes[pwr];
48190+
48191+ return newtable;
48192+}
48193+
48194+static int
48195+init_variables(const struct gr_arg *arg)
48196+{
48197+ struct task_struct *reaper = &init_task;
48198+ unsigned int stacksize;
48199+
48200+ subj_map_set.s_size = arg->role_db.num_subjects;
48201+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
48202+ name_set.n_size = arg->role_db.num_objects;
48203+ inodev_set.i_size = arg->role_db.num_objects;
48204+
48205+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
48206+ !name_set.n_size || !inodev_set.i_size)
48207+ return 1;
48208+
48209+ if (!gr_init_uidset())
48210+ return 1;
48211+
48212+ /* set up the stack that holds allocation info */
48213+
48214+ stacksize = arg->role_db.num_pointers + 5;
48215+
48216+ if (!acl_alloc_stack_init(stacksize))
48217+ return 1;
48218+
48219+ /* grab reference for the real root dentry and vfsmount */
48220+ read_lock(&reaper->fs->lock);
48221+ real_root = dget(reaper->fs->root.dentry);
48222+ real_root_mnt = mntget(reaper->fs->root.mnt);
48223+ read_unlock(&reaper->fs->lock);
48224+
48225+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48226+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root), real_root->d_inode->i_ino);
48227+#endif
48228+
48229+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
48230+ if (fakefs_obj_rw == NULL)
48231+ return 1;
48232+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
48233+
48234+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
48235+ if (fakefs_obj_rwx == NULL)
48236+ return 1;
48237+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
48238+
48239+ subj_map_set.s_hash =
48240+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
48241+ acl_role_set.r_hash =
48242+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
48243+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
48244+ inodev_set.i_hash =
48245+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
48246+
48247+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
48248+ !name_set.n_hash || !inodev_set.i_hash)
48249+ return 1;
48250+
48251+ memset(subj_map_set.s_hash, 0,
48252+ sizeof(struct subject_map *) * subj_map_set.s_size);
48253+ memset(acl_role_set.r_hash, 0,
48254+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
48255+ memset(name_set.n_hash, 0,
48256+ sizeof (struct name_entry *) * name_set.n_size);
48257+ memset(inodev_set.i_hash, 0,
48258+ sizeof (struct inodev_entry *) * inodev_set.i_size);
48259+
48260+ return 0;
48261+}
48262+
48263+/* free information not needed after startup
48264+ currently contains user->kernel pointer mappings for subjects
48265+*/
48266+
48267+static void
48268+free_init_variables(void)
48269+{
48270+ __u32 i;
48271+
48272+ if (subj_map_set.s_hash) {
48273+ for (i = 0; i < subj_map_set.s_size; i++) {
48274+ if (subj_map_set.s_hash[i]) {
48275+ kfree(subj_map_set.s_hash[i]);
48276+ subj_map_set.s_hash[i] = NULL;
48277+ }
48278+ }
48279+
48280+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
48281+ PAGE_SIZE)
48282+ kfree(subj_map_set.s_hash);
48283+ else
48284+ vfree(subj_map_set.s_hash);
48285+ }
48286+
48287+ return;
48288+}
48289+
48290+static void
48291+free_variables(void)
48292+{
48293+ struct acl_subject_label *s;
48294+ struct acl_role_label *r;
48295+ struct task_struct *task, *task2;
48296+ unsigned int x;
48297+
48298+ gr_clear_learn_entries();
48299+
48300+ read_lock(&tasklist_lock);
48301+ do_each_thread(task2, task) {
48302+ task->acl_sp_role = 0;
48303+ task->acl_role_id = 0;
48304+ task->acl = NULL;
48305+ task->role = NULL;
48306+ } while_each_thread(task2, task);
48307+ read_unlock(&tasklist_lock);
48308+
48309+ /* release the reference to the real root dentry and vfsmount */
48310+ if (real_root)
48311+ dput(real_root);
48312+ real_root = NULL;
48313+ if (real_root_mnt)
48314+ mntput(real_root_mnt);
48315+ real_root_mnt = NULL;
48316+
48317+ /* free all object hash tables */
48318+
48319+ FOR_EACH_ROLE_START(r)
48320+ if (r->subj_hash == NULL)
48321+ goto next_role;
48322+ FOR_EACH_SUBJECT_START(r, s, x)
48323+ if (s->obj_hash == NULL)
48324+ break;
48325+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48326+ kfree(s->obj_hash);
48327+ else
48328+ vfree(s->obj_hash);
48329+ FOR_EACH_SUBJECT_END(s, x)
48330+ FOR_EACH_NESTED_SUBJECT_START(r, s)
48331+ if (s->obj_hash == NULL)
48332+ break;
48333+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
48334+ kfree(s->obj_hash);
48335+ else
48336+ vfree(s->obj_hash);
48337+ FOR_EACH_NESTED_SUBJECT_END(s)
48338+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
48339+ kfree(r->subj_hash);
48340+ else
48341+ vfree(r->subj_hash);
48342+ r->subj_hash = NULL;
48343+next_role:
48344+ FOR_EACH_ROLE_END(r)
48345+
48346+ acl_free_all();
48347+
48348+ if (acl_role_set.r_hash) {
48349+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
48350+ PAGE_SIZE)
48351+ kfree(acl_role_set.r_hash);
48352+ else
48353+ vfree(acl_role_set.r_hash);
48354+ }
48355+ if (name_set.n_hash) {
48356+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
48357+ PAGE_SIZE)
48358+ kfree(name_set.n_hash);
48359+ else
48360+ vfree(name_set.n_hash);
48361+ }
48362+
48363+ if (inodev_set.i_hash) {
48364+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
48365+ PAGE_SIZE)
48366+ kfree(inodev_set.i_hash);
48367+ else
48368+ vfree(inodev_set.i_hash);
48369+ }
48370+
48371+ gr_free_uidset();
48372+
48373+ memset(&name_set, 0, sizeof (struct name_db));
48374+ memset(&inodev_set, 0, sizeof (struct inodev_db));
48375+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
48376+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
48377+
48378+ default_role = NULL;
48379+ role_list = NULL;
48380+
48381+ return;
48382+}
48383+
48384+static __u32
48385+count_user_objs(struct acl_object_label *userp)
48386+{
48387+ struct acl_object_label o_tmp;
48388+ __u32 num = 0;
48389+
48390+ while (userp) {
48391+ if (copy_from_user(&o_tmp, userp,
48392+ sizeof (struct acl_object_label)))
48393+ break;
48394+
48395+ userp = o_tmp.prev;
48396+ num++;
48397+ }
48398+
48399+ return num;
48400+}
48401+
48402+static struct acl_subject_label *
48403+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
48404+
48405+static int
48406+copy_user_glob(struct acl_object_label *obj)
48407+{
48408+ struct acl_object_label *g_tmp, **guser;
48409+ unsigned int len;
48410+ char *tmp;
48411+
48412+ if (obj->globbed == NULL)
48413+ return 0;
48414+
48415+ guser = &obj->globbed;
48416+ while (*guser) {
48417+ g_tmp = (struct acl_object_label *)
48418+ acl_alloc(sizeof (struct acl_object_label));
48419+ if (g_tmp == NULL)
48420+ return -ENOMEM;
48421+
48422+ if (copy_from_user(g_tmp, *guser,
48423+ sizeof (struct acl_object_label)))
48424+ return -EFAULT;
48425+
48426+ len = strnlen_user(g_tmp->filename, PATH_MAX);
48427+
48428+ if (!len || len >= PATH_MAX)
48429+ return -EINVAL;
48430+
48431+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48432+ return -ENOMEM;
48433+
48434+ if (copy_from_user(tmp, g_tmp->filename, len))
48435+ return -EFAULT;
48436+ tmp[len-1] = '\0';
48437+ g_tmp->filename = tmp;
48438+
48439+ *guser = g_tmp;
48440+ guser = &(g_tmp->next);
48441+ }
48442+
48443+ return 0;
48444+}
48445+
48446+static int
48447+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
48448+ struct acl_role_label *role)
48449+{
48450+ struct acl_object_label *o_tmp;
48451+ unsigned int len;
48452+ int ret;
48453+ char *tmp;
48454+
48455+ while (userp) {
48456+ if ((o_tmp = (struct acl_object_label *)
48457+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
48458+ return -ENOMEM;
48459+
48460+ if (copy_from_user(o_tmp, userp,
48461+ sizeof (struct acl_object_label)))
48462+ return -EFAULT;
48463+
48464+ userp = o_tmp->prev;
48465+
48466+ len = strnlen_user(o_tmp->filename, PATH_MAX);
48467+
48468+ if (!len || len >= PATH_MAX)
48469+ return -EINVAL;
48470+
48471+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48472+ return -ENOMEM;
48473+
48474+ if (copy_from_user(tmp, o_tmp->filename, len))
48475+ return -EFAULT;
48476+ tmp[len-1] = '\0';
48477+ o_tmp->filename = tmp;
48478+
48479+ insert_acl_obj_label(o_tmp, subj);
48480+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
48481+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
48482+ return -ENOMEM;
48483+
48484+ ret = copy_user_glob(o_tmp);
48485+ if (ret)
48486+ return ret;
48487+
48488+ if (o_tmp->nested) {
48489+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
48490+ if (IS_ERR(o_tmp->nested))
48491+ return PTR_ERR(o_tmp->nested);
48492+
48493+ /* insert into nested subject list */
48494+ o_tmp->nested->next = role->hash->first;
48495+ role->hash->first = o_tmp->nested;
48496+ }
48497+ }
48498+
48499+ return 0;
48500+}
48501+
48502+static __u32
48503+count_user_subjs(struct acl_subject_label *userp)
48504+{
48505+ struct acl_subject_label s_tmp;
48506+ __u32 num = 0;
48507+
48508+ while (userp) {
48509+ if (copy_from_user(&s_tmp, userp,
48510+ sizeof (struct acl_subject_label)))
48511+ break;
48512+
48513+ userp = s_tmp.prev;
48514+ /* do not count nested subjects against this count, since
48515+ they are not included in the hash table, but are
48516+ attached to objects. We have already counted
48517+ the subjects in userspace for the allocation
48518+ stack
48519+ */
48520+ if (!(s_tmp.mode & GR_NESTED))
48521+ num++;
48522+ }
48523+
48524+ return num;
48525+}
48526+
48527+static int
48528+copy_user_allowedips(struct acl_role_label *rolep)
48529+{
48530+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
48531+
48532+ ruserip = rolep->allowed_ips;
48533+
48534+ while (ruserip) {
48535+ rlast = rtmp;
48536+
48537+ if ((rtmp = (struct role_allowed_ip *)
48538+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
48539+ return -ENOMEM;
48540+
48541+ if (copy_from_user(rtmp, ruserip,
48542+ sizeof (struct role_allowed_ip)))
48543+ return -EFAULT;
48544+
48545+ ruserip = rtmp->prev;
48546+
48547+ if (!rlast) {
48548+ rtmp->prev = NULL;
48549+ rolep->allowed_ips = rtmp;
48550+ } else {
48551+ rlast->next = rtmp;
48552+ rtmp->prev = rlast;
48553+ }
48554+
48555+ if (!ruserip)
48556+ rtmp->next = NULL;
48557+ }
48558+
48559+ return 0;
48560+}
48561+
48562+static int
48563+copy_user_transitions(struct acl_role_label *rolep)
48564+{
48565+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
48566+
48567+ unsigned int len;
48568+ char *tmp;
48569+
48570+ rusertp = rolep->transitions;
48571+
48572+ while (rusertp) {
48573+ rlast = rtmp;
48574+
48575+ if ((rtmp = (struct role_transition *)
48576+ acl_alloc(sizeof (struct role_transition))) == NULL)
48577+ return -ENOMEM;
48578+
48579+ if (copy_from_user(rtmp, rusertp,
48580+ sizeof (struct role_transition)))
48581+ return -EFAULT;
48582+
48583+ rusertp = rtmp->prev;
48584+
48585+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
48586+
48587+ if (!len || len >= GR_SPROLE_LEN)
48588+ return -EINVAL;
48589+
48590+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48591+ return -ENOMEM;
48592+
48593+ if (copy_from_user(tmp, rtmp->rolename, len))
48594+ return -EFAULT;
48595+ tmp[len-1] = '\0';
48596+ rtmp->rolename = tmp;
48597+
48598+ if (!rlast) {
48599+ rtmp->prev = NULL;
48600+ rolep->transitions = rtmp;
48601+ } else {
48602+ rlast->next = rtmp;
48603+ rtmp->prev = rlast;
48604+ }
48605+
48606+ if (!rusertp)
48607+ rtmp->next = NULL;
48608+ }
48609+
48610+ return 0;
48611+}
48612+
48613+static struct acl_subject_label *
48614+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
48615+{
48616+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
48617+ unsigned int len;
48618+ char *tmp;
48619+ __u32 num_objs;
48620+ struct acl_ip_label **i_tmp, *i_utmp2;
48621+ struct gr_hash_struct ghash;
48622+ struct subject_map *subjmap;
48623+ unsigned int i_num;
48624+ int err;
48625+
48626+ s_tmp = lookup_subject_map(userp);
48627+
48628+ /* we've already copied this subject into the kernel, just return
48629+ the reference to it, and don't copy it over again
48630+ */
48631+ if (s_tmp)
48632+ return(s_tmp);
48633+
48634+ if ((s_tmp = (struct acl_subject_label *)
48635+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
48636+ return ERR_PTR(-ENOMEM);
48637+
48638+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
48639+ if (subjmap == NULL)
48640+ return ERR_PTR(-ENOMEM);
48641+
48642+ subjmap->user = userp;
48643+ subjmap->kernel = s_tmp;
48644+ insert_subj_map_entry(subjmap);
48645+
48646+ if (copy_from_user(s_tmp, userp,
48647+ sizeof (struct acl_subject_label)))
48648+ return ERR_PTR(-EFAULT);
48649+
48650+ len = strnlen_user(s_tmp->filename, PATH_MAX);
48651+
48652+ if (!len || len >= PATH_MAX)
48653+ return ERR_PTR(-EINVAL);
48654+
48655+ if ((tmp = (char *) acl_alloc(len)) == NULL)
48656+ return ERR_PTR(-ENOMEM);
48657+
48658+ if (copy_from_user(tmp, s_tmp->filename, len))
48659+ return ERR_PTR(-EFAULT);
48660+ tmp[len-1] = '\0';
48661+ s_tmp->filename = tmp;
48662+
48663+ if (!strcmp(s_tmp->filename, "/"))
48664+ role->root_label = s_tmp;
48665+
48666+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
48667+ return ERR_PTR(-EFAULT);
48668+
48669+ /* copy user and group transition tables */
48670+
48671+ if (s_tmp->user_trans_num) {
48672+ uid_t *uidlist;
48673+
48674+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
48675+ if (uidlist == NULL)
48676+ return ERR_PTR(-ENOMEM);
48677+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
48678+ return ERR_PTR(-EFAULT);
48679+
48680+ s_tmp->user_transitions = uidlist;
48681+ }
48682+
48683+ if (s_tmp->group_trans_num) {
48684+ gid_t *gidlist;
48685+
48686+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
48687+ if (gidlist == NULL)
48688+ return ERR_PTR(-ENOMEM);
48689+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
48690+ return ERR_PTR(-EFAULT);
48691+
48692+ s_tmp->group_transitions = gidlist;
48693+ }
48694+
48695+ /* set up object hash table */
48696+ num_objs = count_user_objs(ghash.first);
48697+
48698+ s_tmp->obj_hash_size = num_objs;
48699+ s_tmp->obj_hash =
48700+ (struct acl_object_label **)
48701+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
48702+
48703+ if (!s_tmp->obj_hash)
48704+ return ERR_PTR(-ENOMEM);
48705+
48706+ memset(s_tmp->obj_hash, 0,
48707+ s_tmp->obj_hash_size *
48708+ sizeof (struct acl_object_label *));
48709+
48710+ /* add in objects */
48711+ err = copy_user_objs(ghash.first, s_tmp, role);
48712+
48713+ if (err)
48714+ return ERR_PTR(err);
48715+
48716+ /* set pointer for parent subject */
48717+ if (s_tmp->parent_subject) {
48718+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
48719+
48720+ if (IS_ERR(s_tmp2))
48721+ return s_tmp2;
48722+
48723+ s_tmp->parent_subject = s_tmp2;
48724+ }
48725+
48726+ /* add in ip acls */
48727+
48728+ if (!s_tmp->ip_num) {
48729+ s_tmp->ips = NULL;
48730+ goto insert;
48731+ }
48732+
48733+ i_tmp =
48734+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
48735+ sizeof (struct acl_ip_label *));
48736+
48737+ if (!i_tmp)
48738+ return ERR_PTR(-ENOMEM);
48739+
48740+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
48741+ *(i_tmp + i_num) =
48742+ (struct acl_ip_label *)
48743+ acl_alloc(sizeof (struct acl_ip_label));
48744+ if (!*(i_tmp + i_num))
48745+ return ERR_PTR(-ENOMEM);
48746+
48747+ if (copy_from_user
48748+ (&i_utmp2, s_tmp->ips + i_num,
48749+ sizeof (struct acl_ip_label *)))
48750+ return ERR_PTR(-EFAULT);
48751+
48752+ if (copy_from_user
48753+ (*(i_tmp + i_num), i_utmp2,
48754+ sizeof (struct acl_ip_label)))
48755+ return ERR_PTR(-EFAULT);
48756+
48757+ if ((*(i_tmp + i_num))->iface == NULL)
48758+ continue;
48759+
48760+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
48761+ if (!len || len >= IFNAMSIZ)
48762+ return ERR_PTR(-EINVAL);
48763+ tmp = acl_alloc(len);
48764+ if (tmp == NULL)
48765+ return ERR_PTR(-ENOMEM);
48766+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
48767+ return ERR_PTR(-EFAULT);
48768+ (*(i_tmp + i_num))->iface = tmp;
48769+ }
48770+
48771+ s_tmp->ips = i_tmp;
48772+
48773+insert:
48774+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
48775+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
48776+ return ERR_PTR(-ENOMEM);
48777+
48778+ return s_tmp;
48779+}
48780+
48781+static int
48782+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
48783+{
48784+ struct acl_subject_label s_pre;
48785+ struct acl_subject_label * ret;
48786+ int err;
48787+
48788+ while (userp) {
48789+ if (copy_from_user(&s_pre, userp,
48790+ sizeof (struct acl_subject_label)))
48791+ return -EFAULT;
48792+
48793+ /* do not add nested subjects here, add
48794+ while parsing objects
48795+ */
48796+
48797+ if (s_pre.mode & GR_NESTED) {
48798+ userp = s_pre.prev;
48799+ continue;
48800+ }
48801+
48802+ ret = do_copy_user_subj(userp, role);
48803+
48804+ err = PTR_ERR(ret);
48805+ if (IS_ERR(ret))
48806+ return err;
48807+
48808+ insert_acl_subj_label(ret, role);
48809+
48810+ userp = s_pre.prev;
48811+ }
48812+
48813+ return 0;
48814+}
48815+
48816+static int
48817+copy_user_acl(struct gr_arg *arg)
48818+{
48819+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
48820+ struct sprole_pw *sptmp;
48821+ struct gr_hash_struct *ghash;
48822+ uid_t *domainlist;
48823+ unsigned int r_num;
48824+ unsigned int len;
48825+ char *tmp;
48826+ int err = 0;
48827+ __u16 i;
48828+ __u32 num_subjs;
48829+
48830+ /* we need a default and kernel role */
48831+ if (arg->role_db.num_roles < 2)
48832+ return -EINVAL;
48833+
48834+ /* copy special role authentication info from userspace */
48835+
48836+ num_sprole_pws = arg->num_sprole_pws;
48837+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
48838+
48839+ if (!acl_special_roles) {
48840+ err = -ENOMEM;
48841+ goto cleanup;
48842+ }
48843+
48844+ for (i = 0; i < num_sprole_pws; i++) {
48845+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
48846+ if (!sptmp) {
48847+ err = -ENOMEM;
48848+ goto cleanup;
48849+ }
48850+ if (copy_from_user(sptmp, arg->sprole_pws + i,
48851+ sizeof (struct sprole_pw))) {
48852+ err = -EFAULT;
48853+ goto cleanup;
48854+ }
48855+
48856+ len =
48857+ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
48858+
48859+ if (!len || len >= GR_SPROLE_LEN) {
48860+ err = -EINVAL;
48861+ goto cleanup;
48862+ }
48863+
48864+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48865+ err = -ENOMEM;
48866+ goto cleanup;
48867+ }
48868+
48869+ if (copy_from_user(tmp, sptmp->rolename, len)) {
48870+ err = -EFAULT;
48871+ goto cleanup;
48872+ }
48873+ tmp[len-1] = '\0';
48874+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
48875+ printk(KERN_ALERT "Copying special role %s\n", tmp);
48876+#endif
48877+ sptmp->rolename = tmp;
48878+ acl_special_roles[i] = sptmp;
48879+ }
48880+
48881+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
48882+
48883+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
48884+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
48885+
48886+ if (!r_tmp) {
48887+ err = -ENOMEM;
48888+ goto cleanup;
48889+ }
48890+
48891+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
48892+ sizeof (struct acl_role_label *))) {
48893+ err = -EFAULT;
48894+ goto cleanup;
48895+ }
48896+
48897+ if (copy_from_user(r_tmp, r_utmp2,
48898+ sizeof (struct acl_role_label))) {
48899+ err = -EFAULT;
48900+ goto cleanup;
48901+ }
48902+
48903+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
48904+
48905+ if (!len || len >= PATH_MAX) {
48906+ err = -EINVAL;
48907+ goto cleanup;
48908+ }
48909+
48910+ if ((tmp = (char *) acl_alloc(len)) == NULL) {
48911+ err = -ENOMEM;
48912+ goto cleanup;
48913+ }
48914+ if (copy_from_user(tmp, r_tmp->rolename, len)) {
48915+ err = -EFAULT;
48916+ goto cleanup;
48917+ }
48918+ tmp[len-1] = '\0';
48919+ r_tmp->rolename = tmp;
48920+
48921+ if (!strcmp(r_tmp->rolename, "default")
48922+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
48923+ default_role = r_tmp;
48924+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
48925+ kernel_role = r_tmp;
48926+ }
48927+
48928+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
48929+ err = -ENOMEM;
48930+ goto cleanup;
48931+ }
48932+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
48933+ err = -EFAULT;
48934+ goto cleanup;
48935+ }
48936+
48937+ r_tmp->hash = ghash;
48938+
48939+ num_subjs = count_user_subjs(r_tmp->hash->first);
48940+
48941+ r_tmp->subj_hash_size = num_subjs;
48942+ r_tmp->subj_hash =
48943+ (struct acl_subject_label **)
48944+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
48945+
48946+ if (!r_tmp->subj_hash) {
48947+ err = -ENOMEM;
48948+ goto cleanup;
48949+ }
48950+
48951+ err = copy_user_allowedips(r_tmp);
48952+ if (err)
48953+ goto cleanup;
48954+
48955+ /* copy domain info */
48956+ if (r_tmp->domain_children != NULL) {
48957+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
48958+ if (domainlist == NULL) {
48959+ err = -ENOMEM;
48960+ goto cleanup;
48961+ }
48962+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
48963+ err = -EFAULT;
48964+ goto cleanup;
48965+ }
48966+ r_tmp->domain_children = domainlist;
48967+ }
48968+
48969+ err = copy_user_transitions(r_tmp);
48970+ if (err)
48971+ goto cleanup;
48972+
48973+ memset(r_tmp->subj_hash, 0,
48974+ r_tmp->subj_hash_size *
48975+ sizeof (struct acl_subject_label *));
48976+
48977+ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
48978+
48979+ if (err)
48980+ goto cleanup;
48981+
48982+ /* set nested subject list to null */
48983+ r_tmp->hash->first = NULL;
48984+
48985+ insert_acl_role_label(r_tmp);
48986+ }
48987+
48988+ goto return_err;
48989+ cleanup:
48990+ free_variables();
48991+ return_err:
48992+ return err;
48993+
48994+}
48995+
48996+static int
48997+gracl_init(struct gr_arg *args)
48998+{
48999+ int error = 0;
49000+
49001+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
49002+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
49003+
49004+ if (init_variables(args)) {
49005+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
49006+ error = -ENOMEM;
49007+ free_variables();
49008+ goto out;
49009+ }
49010+
49011+ error = copy_user_acl(args);
49012+ free_init_variables();
49013+ if (error) {
49014+ free_variables();
49015+ goto out;
49016+ }
49017+
49018+ if ((error = gr_set_acls(0))) {
49019+ free_variables();
49020+ goto out;
49021+ }
49022+
49023+ pax_open_kernel();
49024+ gr_status |= GR_READY;
49025+ pax_close_kernel();
49026+
49027+ out:
49028+ return error;
49029+}
49030+
49031+/* derived from glibc fnmatch() 0: match, 1: no match*/
49032+
49033+static int
49034+glob_match(const char *p, const char *n)
49035+{
49036+ char c;
49037+
49038+ while ((c = *p++) != '\0') {
49039+ switch (c) {
49040+ case '?':
49041+ if (*n == '\0')
49042+ return 1;
49043+ else if (*n == '/')
49044+ return 1;
49045+ break;
49046+ case '\\':
49047+ if (*n != c)
49048+ return 1;
49049+ break;
49050+ case '*':
49051+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
49052+ if (*n == '/')
49053+ return 1;
49054+ else if (c == '?') {
49055+ if (*n == '\0')
49056+ return 1;
49057+ else
49058+ ++n;
49059+ }
49060+ }
49061+ if (c == '\0') {
49062+ return 0;
49063+ } else {
49064+ const char *endp;
49065+
49066+ if ((endp = strchr(n, '/')) == NULL)
49067+ endp = n + strlen(n);
49068+
49069+ if (c == '[') {
49070+ for (--p; n < endp; ++n)
49071+ if (!glob_match(p, n))
49072+ return 0;
49073+ } else if (c == '/') {
49074+ while (*n != '\0' && *n != '/')
49075+ ++n;
49076+ if (*n == '/' && !glob_match(p, n + 1))
49077+ return 0;
49078+ } else {
49079+ for (--p; n < endp; ++n)
49080+ if (*n == c && !glob_match(p, n))
49081+ return 0;
49082+ }
49083+
49084+ return 1;
49085+ }
49086+ case '[':
49087+ {
49088+ int not;
49089+ char cold;
49090+
49091+ if (*n == '\0' || *n == '/')
49092+ return 1;
49093+
49094+ not = (*p == '!' || *p == '^');
49095+ if (not)
49096+ ++p;
49097+
49098+ c = *p++;
49099+ for (;;) {
49100+ unsigned char fn = (unsigned char)*n;
49101+
49102+ if (c == '\0')
49103+ return 1;
49104+ else {
49105+ if (c == fn)
49106+ goto matched;
49107+ cold = c;
49108+ c = *p++;
49109+
49110+ if (c == '-' && *p != ']') {
49111+ unsigned char cend = *p++;
49112+
49113+ if (cend == '\0')
49114+ return 1;
49115+
49116+ if (cold <= fn && fn <= cend)
49117+ goto matched;
49118+
49119+ c = *p++;
49120+ }
49121+ }
49122+
49123+ if (c == ']')
49124+ break;
49125+ }
49126+ if (!not)
49127+ return 1;
49128+ break;
49129+ matched:
49130+ while (c != ']') {
49131+ if (c == '\0')
49132+ return 1;
49133+
49134+ c = *p++;
49135+ }
49136+ if (not)
49137+ return 1;
49138+ }
49139+ break;
49140+ default:
49141+ if (c != *n)
49142+ return 1;
49143+ }
49144+
49145+ ++n;
49146+ }
49147+
49148+ if (*n == '\0')
49149+ return 0;
49150+
49151+ if (*n == '/')
49152+ return 0;
49153+
49154+ return 1;
49155+}
49156+
49157+static struct acl_object_label *
49158+chk_glob_label(struct acl_object_label *globbed,
49159+ struct dentry *dentry, struct vfsmount *mnt, char **path)
49160+{
49161+ struct acl_object_label *tmp;
49162+
49163+ if (*path == NULL)
49164+ *path = gr_to_filename_nolock(dentry, mnt);
49165+
49166+ tmp = globbed;
49167+
49168+ while (tmp) {
49169+ if (!glob_match(tmp->filename, *path))
49170+ return tmp;
49171+ tmp = tmp->next;
49172+ }
49173+
49174+ return NULL;
49175+}
49176+
49177+static struct acl_object_label *
49178+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49179+ const ino_t curr_ino, const dev_t curr_dev,
49180+ const struct acl_subject_label *subj, char **path, const int checkglob)
49181+{
49182+ struct acl_subject_label *tmpsubj;
49183+ struct acl_object_label *retval;
49184+ struct acl_object_label *retval2;
49185+
49186+ tmpsubj = (struct acl_subject_label *) subj;
49187+ read_lock(&gr_inode_lock);
49188+ do {
49189+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
49190+ if (retval) {
49191+ if (checkglob && retval->globbed) {
49192+ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
49193+ (struct vfsmount *)orig_mnt, path);
49194+ if (retval2)
49195+ retval = retval2;
49196+ }
49197+ break;
49198+ }
49199+ } while ((tmpsubj = tmpsubj->parent_subject));
49200+ read_unlock(&gr_inode_lock);
49201+
49202+ return retval;
49203+}
49204+
49205+static __inline__ struct acl_object_label *
49206+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
49207+ const struct dentry *curr_dentry,
49208+ const struct acl_subject_label *subj, char **path, const int checkglob)
49209+{
49210+ int newglob = checkglob;
49211+
49212+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
49213+ as we don't want a / * rule to match instead of the / object
49214+ don't do this for create lookups that call this function though, since they're looking up
49215+ on the parent and thus need globbing checks on all paths
49216+ */
49217+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
49218+ newglob = GR_NO_GLOB;
49219+
49220+ return __full_lookup(orig_dentry, orig_mnt,
49221+ curr_dentry->d_inode->i_ino,
49222+ __get_dev(curr_dentry), subj, path, newglob);
49223+}
49224+
49225+static struct acl_object_label *
49226+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49227+ const struct acl_subject_label *subj, char *path, const int checkglob)
49228+{
49229+ struct dentry *dentry = (struct dentry *) l_dentry;
49230+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49231+ struct acl_object_label *retval;
49232+
49233+ spin_lock(&dcache_lock);
49234+ spin_lock(&vfsmount_lock);
49235+
49236+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
49237+#ifdef CONFIG_NET
49238+ mnt == sock_mnt ||
49239+#endif
49240+#ifdef CONFIG_HUGETLBFS
49241+ (mnt == hugetlbfs_vfsmount && dentry->d_inode->i_nlink == 0) ||
49242+#endif
49243+ /* ignore Eric Biederman */
49244+ IS_PRIVATE(l_dentry->d_inode))) {
49245+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
49246+ goto out;
49247+ }
49248+
49249+ for (;;) {
49250+ if (dentry == real_root && mnt == real_root_mnt)
49251+ break;
49252+
49253+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49254+ if (mnt->mnt_parent == mnt)
49255+ break;
49256+
49257+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49258+ if (retval != NULL)
49259+ goto out;
49260+
49261+ dentry = mnt->mnt_mountpoint;
49262+ mnt = mnt->mnt_parent;
49263+ continue;
49264+ }
49265+
49266+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49267+ if (retval != NULL)
49268+ goto out;
49269+
49270+ dentry = dentry->d_parent;
49271+ }
49272+
49273+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
49274+
49275+ if (retval == NULL)
49276+ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob);
49277+out:
49278+ spin_unlock(&vfsmount_lock);
49279+ spin_unlock(&dcache_lock);
49280+
49281+ BUG_ON(retval == NULL);
49282+
49283+ return retval;
49284+}
49285+
49286+static __inline__ struct acl_object_label *
49287+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49288+ const struct acl_subject_label *subj)
49289+{
49290+ char *path = NULL;
49291+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
49292+}
49293+
49294+static __inline__ struct acl_object_label *
49295+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49296+ const struct acl_subject_label *subj)
49297+{
49298+ char *path = NULL;
49299+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
49300+}
49301+
49302+static __inline__ struct acl_object_label *
49303+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49304+ const struct acl_subject_label *subj, char *path)
49305+{
49306+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
49307+}
49308+
49309+static struct acl_subject_label *
49310+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
49311+ const struct acl_role_label *role)
49312+{
49313+ struct dentry *dentry = (struct dentry *) l_dentry;
49314+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
49315+ struct acl_subject_label *retval;
49316+
49317+ spin_lock(&dcache_lock);
49318+ spin_lock(&vfsmount_lock);
49319+
49320+ for (;;) {
49321+ if (dentry == real_root && mnt == real_root_mnt)
49322+ break;
49323+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
49324+ if (mnt->mnt_parent == mnt)
49325+ break;
49326+
49327+ read_lock(&gr_inode_lock);
49328+ retval =
49329+ lookup_acl_subj_label(dentry->d_inode->i_ino,
49330+ __get_dev(dentry), role);
49331+ read_unlock(&gr_inode_lock);
49332+ if (retval != NULL)
49333+ goto out;
49334+
49335+ dentry = mnt->mnt_mountpoint;
49336+ mnt = mnt->mnt_parent;
49337+ continue;
49338+ }
49339+
49340+ read_lock(&gr_inode_lock);
49341+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49342+ __get_dev(dentry), role);
49343+ read_unlock(&gr_inode_lock);
49344+ if (retval != NULL)
49345+ goto out;
49346+
49347+ dentry = dentry->d_parent;
49348+ }
49349+
49350+ read_lock(&gr_inode_lock);
49351+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
49352+ __get_dev(dentry), role);
49353+ read_unlock(&gr_inode_lock);
49354+
49355+ if (unlikely(retval == NULL)) {
49356+ read_lock(&gr_inode_lock);
49357+ retval = lookup_acl_subj_label(real_root->d_inode->i_ino,
49358+ __get_dev(real_root), role);
49359+ read_unlock(&gr_inode_lock);
49360+ }
49361+out:
49362+ spin_unlock(&vfsmount_lock);
49363+ spin_unlock(&dcache_lock);
49364+
49365+ BUG_ON(retval == NULL);
49366+
49367+ return retval;
49368+}
49369+
49370+static void
49371+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
49372+{
49373+ struct task_struct *task = current;
49374+ const struct cred *cred = current_cred();
49375+
49376+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49377+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49378+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49379+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
49380+
49381+ return;
49382+}
49383+
49384+static void
49385+gr_log_learn_sysctl(const char *path, const __u32 mode)
49386+{
49387+ struct task_struct *task = current;
49388+ const struct cred *cred = current_cred();
49389+
49390+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
49391+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49392+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49393+ 1UL, 1UL, path, (unsigned long) mode, &task->signal->saved_ip);
49394+
49395+ return;
49396+}
49397+
49398+static void
49399+gr_log_learn_id_change(const char type, const unsigned int real,
49400+ const unsigned int effective, const unsigned int fs)
49401+{
49402+ struct task_struct *task = current;
49403+ const struct cred *cred = current_cred();
49404+
49405+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
49406+ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
49407+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
49408+ type, real, effective, fs, &task->signal->saved_ip);
49409+
49410+ return;
49411+}
49412+
49413+__u32
49414+gr_check_link(const struct dentry * new_dentry,
49415+ const struct dentry * parent_dentry,
49416+ const struct vfsmount * parent_mnt,
49417+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
49418+{
49419+ struct acl_object_label *obj;
49420+ __u32 oldmode, newmode;
49421+ __u32 needmode;
49422+
49423+ if (unlikely(!(gr_status & GR_READY)))
49424+ return (GR_CREATE | GR_LINK);
49425+
49426+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
49427+ oldmode = obj->mode;
49428+
49429+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49430+ oldmode |= (GR_CREATE | GR_LINK);
49431+
49432+ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS;
49433+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49434+ needmode |= GR_SETID | GR_AUDIT_SETID;
49435+
49436+ newmode =
49437+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
49438+ oldmode | needmode);
49439+
49440+ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC |
49441+ GR_SETID | GR_READ | GR_FIND | GR_DELETE |
49442+ GR_INHERIT | GR_AUDIT_INHERIT);
49443+
49444+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID))
49445+ goto bad;
49446+
49447+ if ((oldmode & needmode) != needmode)
49448+ goto bad;
49449+
49450+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
49451+ if ((newmode & needmode) != needmode)
49452+ goto bad;
49453+
49454+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
49455+ return newmode;
49456+bad:
49457+ needmode = oldmode;
49458+ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID))
49459+ needmode |= GR_SETID;
49460+
49461+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49462+ gr_log_learn(old_dentry, old_mnt, needmode);
49463+ return (GR_CREATE | GR_LINK);
49464+ } else if (newmode & GR_SUPPRESS)
49465+ return GR_SUPPRESS;
49466+ else
49467+ return 0;
49468+}
49469+
49470+__u32
49471+gr_search_file(const struct dentry * dentry, const __u32 mode,
49472+ const struct vfsmount * mnt)
49473+{
49474+ __u32 retval = mode;
49475+ struct acl_subject_label *curracl;
49476+ struct acl_object_label *currobj;
49477+
49478+ if (unlikely(!(gr_status & GR_READY)))
49479+ return (mode & ~GR_AUDITS);
49480+
49481+ curracl = current->acl;
49482+
49483+ currobj = chk_obj_label(dentry, mnt, curracl);
49484+ retval = currobj->mode & mode;
49485+
49486+ /* if we're opening a specified transfer file for writing
49487+ (e.g. /dev/initctl), then transfer our role to init
49488+ */
49489+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
49490+ current->role->roletype & GR_ROLE_PERSIST)) {
49491+ struct task_struct *task = init_pid_ns.child_reaper;
49492+
49493+ if (task->role != current->role) {
49494+ task->acl_sp_role = 0;
49495+ task->acl_role_id = current->acl_role_id;
49496+ task->role = current->role;
49497+ rcu_read_lock();
49498+ read_lock(&grsec_exec_file_lock);
49499+ gr_apply_subject_to_task(task);
49500+ read_unlock(&grsec_exec_file_lock);
49501+ rcu_read_unlock();
49502+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
49503+ }
49504+ }
49505+
49506+ if (unlikely
49507+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
49508+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
49509+ __u32 new_mode = mode;
49510+
49511+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49512+
49513+ retval = new_mode;
49514+
49515+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
49516+ new_mode |= GR_INHERIT;
49517+
49518+ if (!(mode & GR_NOLEARN))
49519+ gr_log_learn(dentry, mnt, new_mode);
49520+ }
49521+
49522+ return retval;
49523+}
49524+
49525+__u32
49526+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
49527+ const struct vfsmount * mnt, const __u32 mode)
49528+{
49529+ struct name_entry *match;
49530+ struct acl_object_label *matchpo;
49531+ struct acl_subject_label *curracl;
49532+ char *path;
49533+ __u32 retval;
49534+
49535+ if (unlikely(!(gr_status & GR_READY)))
49536+ return (mode & ~GR_AUDITS);
49537+
49538+ preempt_disable();
49539+ path = gr_to_filename_rbac(new_dentry, mnt);
49540+ match = lookup_name_entry_create(path);
49541+
49542+ if (!match)
49543+ goto check_parent;
49544+
49545+ curracl = current->acl;
49546+
49547+ read_lock(&gr_inode_lock);
49548+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
49549+ read_unlock(&gr_inode_lock);
49550+
49551+ if (matchpo) {
49552+ if ((matchpo->mode & mode) !=
49553+ (mode & ~(GR_AUDITS | GR_SUPPRESS))
49554+ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) {
49555+ __u32 new_mode = mode;
49556+
49557+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49558+
49559+ gr_log_learn(new_dentry, mnt, new_mode);
49560+
49561+ preempt_enable();
49562+ return new_mode;
49563+ }
49564+ preempt_enable();
49565+ return (matchpo->mode & mode);
49566+ }
49567+
49568+ check_parent:
49569+ curracl = current->acl;
49570+
49571+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
49572+ retval = matchpo->mode & mode;
49573+
49574+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
49575+ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) {
49576+ __u32 new_mode = mode;
49577+
49578+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
49579+
49580+ gr_log_learn(new_dentry, mnt, new_mode);
49581+ preempt_enable();
49582+ return new_mode;
49583+ }
49584+
49585+ preempt_enable();
49586+ return retval;
49587+}
49588+
49589+int
49590+gr_check_hidden_task(const struct task_struct *task)
49591+{
49592+ if (unlikely(!(gr_status & GR_READY)))
49593+ return 0;
49594+
49595+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
49596+ return 1;
49597+
49598+ return 0;
49599+}
49600+
49601+int
49602+gr_check_protected_task(const struct task_struct *task)
49603+{
49604+ if (unlikely(!(gr_status & GR_READY) || !task))
49605+ return 0;
49606+
49607+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49608+ task->acl != current->acl)
49609+ return 1;
49610+
49611+ return 0;
49612+}
49613+
49614+int
49615+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
49616+{
49617+ struct task_struct *p;
49618+ int ret = 0;
49619+
49620+ if (unlikely(!(gr_status & GR_READY) || !pid))
49621+ return ret;
49622+
49623+ read_lock(&tasklist_lock);
49624+ do_each_pid_task(pid, type, p) {
49625+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
49626+ p->acl != current->acl) {
49627+ ret = 1;
49628+ goto out;
49629+ }
49630+ } while_each_pid_task(pid, type, p);
49631+out:
49632+ read_unlock(&tasklist_lock);
49633+
49634+ return ret;
49635+}
49636+
49637+void
49638+gr_copy_label(struct task_struct *tsk)
49639+{
49640+ tsk->signal->used_accept = 0;
49641+ tsk->acl_sp_role = 0;
49642+ tsk->acl_role_id = current->acl_role_id;
49643+ tsk->acl = current->acl;
49644+ tsk->role = current->role;
49645+ tsk->signal->curr_ip = current->signal->curr_ip;
49646+ tsk->signal->saved_ip = current->signal->saved_ip;
49647+ if (current->exec_file)
49648+ get_file(current->exec_file);
49649+ tsk->exec_file = current->exec_file;
49650+ tsk->is_writable = current->is_writable;
49651+ if (unlikely(current->signal->used_accept)) {
49652+ current->signal->curr_ip = 0;
49653+ current->signal->saved_ip = 0;
49654+ }
49655+
49656+ return;
49657+}
49658+
49659+static void
49660+gr_set_proc_res(struct task_struct *task)
49661+{
49662+ struct acl_subject_label *proc;
49663+ unsigned short i;
49664+
49665+ proc = task->acl;
49666+
49667+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
49668+ return;
49669+
49670+ for (i = 0; i < RLIM_NLIMITS; i++) {
49671+ if (!(proc->resmask & (1 << i)))
49672+ continue;
49673+
49674+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
49675+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
49676+ }
49677+
49678+ return;
49679+}
49680+
49681+extern int __gr_process_user_ban(struct user_struct *user);
49682+
49683+int
49684+gr_check_user_change(int real, int effective, int fs)
49685+{
49686+ unsigned int i;
49687+ __u16 num;
49688+ uid_t *uidlist;
49689+ int curuid;
49690+ int realok = 0;
49691+ int effectiveok = 0;
49692+ int fsok = 0;
49693+
49694+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
49695+ struct user_struct *user;
49696+
49697+ if (real == -1)
49698+ goto skipit;
49699+
49700+ user = find_user(real);
49701+ if (user == NULL)
49702+ goto skipit;
49703+
49704+ if (__gr_process_user_ban(user)) {
49705+ /* for find_user */
49706+ free_uid(user);
49707+ return 1;
49708+ }
49709+
49710+ /* for find_user */
49711+ free_uid(user);
49712+
49713+skipit:
49714+#endif
49715+
49716+ if (unlikely(!(gr_status & GR_READY)))
49717+ return 0;
49718+
49719+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49720+ gr_log_learn_id_change('u', real, effective, fs);
49721+
49722+ num = current->acl->user_trans_num;
49723+ uidlist = current->acl->user_transitions;
49724+
49725+ if (uidlist == NULL)
49726+ return 0;
49727+
49728+ if (real == -1)
49729+ realok = 1;
49730+ if (effective == -1)
49731+ effectiveok = 1;
49732+ if (fs == -1)
49733+ fsok = 1;
49734+
49735+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
49736+ for (i = 0; i < num; i++) {
49737+ curuid = (int)uidlist[i];
49738+ if (real == curuid)
49739+ realok = 1;
49740+ if (effective == curuid)
49741+ effectiveok = 1;
49742+ if (fs == curuid)
49743+ fsok = 1;
49744+ }
49745+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
49746+ for (i = 0; i < num; i++) {
49747+ curuid = (int)uidlist[i];
49748+ if (real == curuid)
49749+ break;
49750+ if (effective == curuid)
49751+ break;
49752+ if (fs == curuid)
49753+ break;
49754+ }
49755+ /* not in deny list */
49756+ if (i == num) {
49757+ realok = 1;
49758+ effectiveok = 1;
49759+ fsok = 1;
49760+ }
49761+ }
49762+
49763+ if (realok && effectiveok && fsok)
49764+ return 0;
49765+ else {
49766+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49767+ return 1;
49768+ }
49769+}
49770+
49771+int
49772+gr_check_group_change(int real, int effective, int fs)
49773+{
49774+ unsigned int i;
49775+ __u16 num;
49776+ gid_t *gidlist;
49777+ int curgid;
49778+ int realok = 0;
49779+ int effectiveok = 0;
49780+ int fsok = 0;
49781+
49782+ if (unlikely(!(gr_status & GR_READY)))
49783+ return 0;
49784+
49785+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
49786+ gr_log_learn_id_change('g', real, effective, fs);
49787+
49788+ num = current->acl->group_trans_num;
49789+ gidlist = current->acl->group_transitions;
49790+
49791+ if (gidlist == NULL)
49792+ return 0;
49793+
49794+ if (real == -1)
49795+ realok = 1;
49796+ if (effective == -1)
49797+ effectiveok = 1;
49798+ if (fs == -1)
49799+ fsok = 1;
49800+
49801+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
49802+ for (i = 0; i < num; i++) {
49803+ curgid = (int)gidlist[i];
49804+ if (real == curgid)
49805+ realok = 1;
49806+ if (effective == curgid)
49807+ effectiveok = 1;
49808+ if (fs == curgid)
49809+ fsok = 1;
49810+ }
49811+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
49812+ for (i = 0; i < num; i++) {
49813+ curgid = (int)gidlist[i];
49814+ if (real == curgid)
49815+ break;
49816+ if (effective == curgid)
49817+ break;
49818+ if (fs == curgid)
49819+ break;
49820+ }
49821+ /* not in deny list */
49822+ if (i == num) {
49823+ realok = 1;
49824+ effectiveok = 1;
49825+ fsok = 1;
49826+ }
49827+ }
49828+
49829+ if (realok && effectiveok && fsok)
49830+ return 0;
49831+ else {
49832+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real);
49833+ return 1;
49834+ }
49835+}
49836+
49837+void
49838+gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
49839+{
49840+ struct acl_role_label *role = task->role;
49841+ struct acl_subject_label *subj = NULL;
49842+ struct acl_object_label *obj;
49843+ struct file *filp;
49844+
49845+ if (unlikely(!(gr_status & GR_READY)))
49846+ return;
49847+
49848+ filp = task->exec_file;
49849+
49850+ /* kernel process, we'll give them the kernel role */
49851+ if (unlikely(!filp)) {
49852+ task->role = kernel_role;
49853+ task->acl = kernel_role->root_label;
49854+ return;
49855+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
49856+ role = lookup_acl_role_label(task, uid, gid);
49857+
49858+ /* perform subject lookup in possibly new role
49859+ we can use this result below in the case where role == task->role
49860+ */
49861+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
49862+
49863+ /* if we changed uid/gid, but result in the same role
49864+ and are using inheritance, don't lose the inherited subject
49865+ if current subject is other than what normal lookup
49866+ would result in, we arrived via inheritance, don't
49867+ lose subject
49868+ */
49869+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
49870+ (subj == task->acl)))
49871+ task->acl = subj;
49872+
49873+ task->role = role;
49874+
49875+ task->is_writable = 0;
49876+
49877+ /* ignore additional mmap checks for processes that are writable
49878+ by the default ACL */
49879+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
49880+ if (unlikely(obj->mode & GR_WRITE))
49881+ task->is_writable = 1;
49882+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
49883+ if (unlikely(obj->mode & GR_WRITE))
49884+ task->is_writable = 1;
49885+
49886+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49887+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49888+#endif
49889+
49890+ gr_set_proc_res(task);
49891+
49892+ return;
49893+}
49894+
49895+int
49896+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
49897+ const int unsafe_share)
49898+{
49899+ struct task_struct *task = current;
49900+ struct acl_subject_label *newacl;
49901+ struct acl_object_label *obj;
49902+ __u32 retmode;
49903+
49904+ if (unlikely(!(gr_status & GR_READY)))
49905+ return 0;
49906+
49907+ newacl = chk_subj_label(dentry, mnt, task->role);
49908+
49909+ task_lock(task);
49910+ if ((((task->ptrace & PT_PTRACED) || unsafe_share) &&
49911+ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
49912+ !(task->role->roletype & GR_ROLE_GOD) &&
49913+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
49914+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) {
49915+ task_unlock(task);
49916+ if (unsafe_share)
49917+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
49918+ else
49919+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
49920+ return -EACCES;
49921+ }
49922+ task_unlock(task);
49923+
49924+ obj = chk_obj_label(dentry, mnt, task->acl);
49925+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
49926+
49927+ if (!(task->acl->mode & GR_INHERITLEARN) &&
49928+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
49929+ if (obj->nested)
49930+ task->acl = obj->nested;
49931+ else
49932+ task->acl = newacl;
49933+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
49934+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
49935+
49936+ task->is_writable = 0;
49937+
49938+ /* ignore additional mmap checks for processes that are writable
49939+ by the default ACL */
49940+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
49941+ if (unlikely(obj->mode & GR_WRITE))
49942+ task->is_writable = 1;
49943+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
49944+ if (unlikely(obj->mode & GR_WRITE))
49945+ task->is_writable = 1;
49946+
49947+ gr_set_proc_res(task);
49948+
49949+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
49950+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
49951+#endif
49952+ return 0;
49953+}
49954+
49955+/* always called with valid inodev ptr */
49956+static void
49957+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
49958+{
49959+ struct acl_object_label *matchpo;
49960+ struct acl_subject_label *matchps;
49961+ struct acl_subject_label *subj;
49962+ struct acl_role_label *role;
49963+ unsigned int x;
49964+
49965+ FOR_EACH_ROLE_START(role)
49966+ FOR_EACH_SUBJECT_START(role, subj, x)
49967+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
49968+ matchpo->mode |= GR_DELETED;
49969+ FOR_EACH_SUBJECT_END(subj,x)
49970+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
49971+ if (subj->inode == ino && subj->device == dev)
49972+ subj->mode |= GR_DELETED;
49973+ FOR_EACH_NESTED_SUBJECT_END(subj)
49974+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
49975+ matchps->mode |= GR_DELETED;
49976+ FOR_EACH_ROLE_END(role)
49977+
49978+ inodev->nentry->deleted = 1;
49979+
49980+ return;
49981+}
49982+
49983+void
49984+gr_handle_delete(const ino_t ino, const dev_t dev)
49985+{
49986+ struct inodev_entry *inodev;
49987+
49988+ if (unlikely(!(gr_status & GR_READY)))
49989+ return;
49990+
49991+ write_lock(&gr_inode_lock);
49992+ inodev = lookup_inodev_entry(ino, dev);
49993+ if (inodev != NULL)
49994+ do_handle_delete(inodev, ino, dev);
49995+ write_unlock(&gr_inode_lock);
49996+
49997+ return;
49998+}
49999+
50000+static void
50001+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
50002+ const ino_t newinode, const dev_t newdevice,
50003+ struct acl_subject_label *subj)
50004+{
50005+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size);
50006+ struct acl_object_label *match;
50007+
50008+ match = subj->obj_hash[index];
50009+
50010+ while (match && (match->inode != oldinode ||
50011+ match->device != olddevice ||
50012+ !(match->mode & GR_DELETED)))
50013+ match = match->next;
50014+
50015+ if (match && (match->inode == oldinode)
50016+ && (match->device == olddevice)
50017+ && (match->mode & GR_DELETED)) {
50018+ if (match->prev == NULL) {
50019+ subj->obj_hash[index] = match->next;
50020+ if (match->next != NULL)
50021+ match->next->prev = NULL;
50022+ } else {
50023+ match->prev->next = match->next;
50024+ if (match->next != NULL)
50025+ match->next->prev = match->prev;
50026+ }
50027+ match->prev = NULL;
50028+ match->next = NULL;
50029+ match->inode = newinode;
50030+ match->device = newdevice;
50031+ match->mode &= ~GR_DELETED;
50032+
50033+ insert_acl_obj_label(match, subj);
50034+ }
50035+
50036+ return;
50037+}
50038+
50039+static void
50040+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
50041+ const ino_t newinode, const dev_t newdevice,
50042+ struct acl_role_label *role)
50043+{
50044+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size);
50045+ struct acl_subject_label *match;
50046+
50047+ match = role->subj_hash[index];
50048+
50049+ while (match && (match->inode != oldinode ||
50050+ match->device != olddevice ||
50051+ !(match->mode & GR_DELETED)))
50052+ match = match->next;
50053+
50054+ if (match && (match->inode == oldinode)
50055+ && (match->device == olddevice)
50056+ && (match->mode & GR_DELETED)) {
50057+ if (match->prev == NULL) {
50058+ role->subj_hash[index] = match->next;
50059+ if (match->next != NULL)
50060+ match->next->prev = NULL;
50061+ } else {
50062+ match->prev->next = match->next;
50063+ if (match->next != NULL)
50064+ match->next->prev = match->prev;
50065+ }
50066+ match->prev = NULL;
50067+ match->next = NULL;
50068+ match->inode = newinode;
50069+ match->device = newdevice;
50070+ match->mode &= ~GR_DELETED;
50071+
50072+ insert_acl_subj_label(match, role);
50073+ }
50074+
50075+ return;
50076+}
50077+
50078+static void
50079+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
50080+ const ino_t newinode, const dev_t newdevice)
50081+{
50082+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size);
50083+ struct inodev_entry *match;
50084+
50085+ match = inodev_set.i_hash[index];
50086+
50087+ while (match && (match->nentry->inode != oldinode ||
50088+ match->nentry->device != olddevice || !match->nentry->deleted))
50089+ match = match->next;
50090+
50091+ if (match && (match->nentry->inode == oldinode)
50092+ && (match->nentry->device == olddevice) &&
50093+ match->nentry->deleted) {
50094+ if (match->prev == NULL) {
50095+ inodev_set.i_hash[index] = match->next;
50096+ if (match->next != NULL)
50097+ match->next->prev = NULL;
50098+ } else {
50099+ match->prev->next = match->next;
50100+ if (match->next != NULL)
50101+ match->next->prev = match->prev;
50102+ }
50103+ match->prev = NULL;
50104+ match->next = NULL;
50105+ match->nentry->inode = newinode;
50106+ match->nentry->device = newdevice;
50107+ match->nentry->deleted = 0;
50108+
50109+ insert_inodev_entry(match);
50110+ }
50111+
50112+ return;
50113+}
50114+
50115+static void
50116+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
50117+ const struct vfsmount *mnt)
50118+{
50119+ struct acl_subject_label *subj;
50120+ struct acl_role_label *role;
50121+ unsigned int x;
50122+ ino_t inode = dentry->d_inode->i_ino;
50123+ dev_t dev = __get_dev(dentry);
50124+
50125+ FOR_EACH_ROLE_START(role)
50126+ update_acl_subj_label(matchn->inode, matchn->device,
50127+ inode, dev, role);
50128+
50129+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
50130+ if ((subj->inode == inode) && (subj->device == dev)) {
50131+ subj->inode = inode;
50132+ subj->device = dev;
50133+ }
50134+ FOR_EACH_NESTED_SUBJECT_END(subj)
50135+ FOR_EACH_SUBJECT_START(role, subj, x)
50136+ update_acl_obj_label(matchn->inode, matchn->device,
50137+ inode, dev, subj);
50138+ FOR_EACH_SUBJECT_END(subj,x)
50139+ FOR_EACH_ROLE_END(role)
50140+
50141+ update_inodev_entry(matchn->inode, matchn->device, inode, dev);
50142+
50143+ return;
50144+}
50145+
50146+void
50147+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
50148+{
50149+ struct name_entry *matchn;
50150+
50151+ if (unlikely(!(gr_status & GR_READY)))
50152+ return;
50153+
50154+ preempt_disable();
50155+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
50156+
50157+ if (unlikely((unsigned long)matchn)) {
50158+ write_lock(&gr_inode_lock);
50159+ do_handle_create(matchn, dentry, mnt);
50160+ write_unlock(&gr_inode_lock);
50161+ }
50162+ preempt_enable();
50163+
50164+ return;
50165+}
50166+
50167+void
50168+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
50169+ struct dentry *old_dentry,
50170+ struct dentry *new_dentry,
50171+ struct vfsmount *mnt, const __u8 replace)
50172+{
50173+ struct name_entry *matchn;
50174+ struct inodev_entry *inodev;
50175+ ino_t oldinode = old_dentry->d_inode->i_ino;
50176+ dev_t olddev = __get_dev(old_dentry);
50177+
50178+ /* vfs_rename swaps the name and parent link for old_dentry and
50179+ new_dentry
50180+ at this point, old_dentry has the new name, parent link, and inode
50181+ for the renamed file
50182+ if a file is being replaced by a rename, new_dentry has the inode
50183+ and name for the replaced file
50184+ */
50185+
50186+ if (unlikely(!(gr_status & GR_READY)))
50187+ return;
50188+
50189+ preempt_disable();
50190+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
50191+
50192+ /* we wouldn't have to check d_inode if it weren't for
50193+ NFS silly-renaming
50194+ */
50195+
50196+ write_lock(&gr_inode_lock);
50197+ if (unlikely(replace && new_dentry->d_inode)) {
50198+ ino_t newinode = new_dentry->d_inode->i_ino;
50199+ dev_t newdev = __get_dev(new_dentry);
50200+ inodev = lookup_inodev_entry(newinode, newdev);
50201+ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1))
50202+ do_handle_delete(inodev, newinode, newdev);
50203+ }
50204+
50205+ inodev = lookup_inodev_entry(oldinode, olddev);
50206+ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1))
50207+ do_handle_delete(inodev, oldinode, olddev);
50208+
50209+ if (unlikely((unsigned long)matchn))
50210+ do_handle_create(matchn, old_dentry, mnt);
50211+
50212+ write_unlock(&gr_inode_lock);
50213+ preempt_enable();
50214+
50215+ return;
50216+}
50217+
50218+static int
50219+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
50220+ unsigned char **sum)
50221+{
50222+ struct acl_role_label *r;
50223+ struct role_allowed_ip *ipp;
50224+ struct role_transition *trans;
50225+ unsigned int i;
50226+ int found = 0;
50227+ u32 curr_ip = current->signal->curr_ip;
50228+
50229+ current->signal->saved_ip = curr_ip;
50230+
50231+ /* check transition table */
50232+
50233+ for (trans = current->role->transitions; trans; trans = trans->next) {
50234+ if (!strcmp(rolename, trans->rolename)) {
50235+ found = 1;
50236+ break;
50237+ }
50238+ }
50239+
50240+ if (!found)
50241+ return 0;
50242+
50243+ /* handle special roles that do not require authentication
50244+ and check ip */
50245+
50246+ FOR_EACH_ROLE_START(r)
50247+ if (!strcmp(rolename, r->rolename) &&
50248+ (r->roletype & GR_ROLE_SPECIAL)) {
50249+ found = 0;
50250+ if (r->allowed_ips != NULL) {
50251+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
50252+ if ((ntohl(curr_ip) & ipp->netmask) ==
50253+ (ntohl(ipp->addr) & ipp->netmask))
50254+ found = 1;
50255+ }
50256+ } else
50257+ found = 2;
50258+ if (!found)
50259+ return 0;
50260+
50261+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
50262+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
50263+ *salt = NULL;
50264+ *sum = NULL;
50265+ return 1;
50266+ }
50267+ }
50268+ FOR_EACH_ROLE_END(r)
50269+
50270+ for (i = 0; i < num_sprole_pws; i++) {
50271+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
50272+ *salt = acl_special_roles[i]->salt;
50273+ *sum = acl_special_roles[i]->sum;
50274+ return 1;
50275+ }
50276+ }
50277+
50278+ return 0;
50279+}
50280+
50281+static void
50282+assign_special_role(char *rolename)
50283+{
50284+ struct acl_object_label *obj;
50285+ struct acl_role_label *r;
50286+ struct acl_role_label *assigned = NULL;
50287+ struct task_struct *tsk;
50288+ struct file *filp;
50289+
50290+ FOR_EACH_ROLE_START(r)
50291+ if (!strcmp(rolename, r->rolename) &&
50292+ (r->roletype & GR_ROLE_SPECIAL)) {
50293+ assigned = r;
50294+ break;
50295+ }
50296+ FOR_EACH_ROLE_END(r)
50297+
50298+ if (!assigned)
50299+ return;
50300+
50301+ read_lock(&tasklist_lock);
50302+ read_lock(&grsec_exec_file_lock);
50303+
50304+ tsk = current->real_parent;
50305+ if (tsk == NULL)
50306+ goto out_unlock;
50307+
50308+ filp = tsk->exec_file;
50309+ if (filp == NULL)
50310+ goto out_unlock;
50311+
50312+ tsk->is_writable = 0;
50313+
50314+ tsk->acl_sp_role = 1;
50315+ tsk->acl_role_id = ++acl_sp_role_value;
50316+ tsk->role = assigned;
50317+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
50318+
50319+ /* ignore additional mmap checks for processes that are writable
50320+ by the default ACL */
50321+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50322+ if (unlikely(obj->mode & GR_WRITE))
50323+ tsk->is_writable = 1;
50324+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
50325+ if (unlikely(obj->mode & GR_WRITE))
50326+ tsk->is_writable = 1;
50327+
50328+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50329+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
50330+#endif
50331+
50332+out_unlock:
50333+ read_unlock(&grsec_exec_file_lock);
50334+ read_unlock(&tasklist_lock);
50335+ return;
50336+}
50337+
50338+int gr_check_secure_terminal(struct task_struct *task)
50339+{
50340+ struct task_struct *p, *p2, *p3;
50341+ struct files_struct *files;
50342+ struct fdtable *fdt;
50343+ struct file *our_file = NULL, *file;
50344+ int i;
50345+
50346+ if (task->signal->tty == NULL)
50347+ return 1;
50348+
50349+ files = get_files_struct(task);
50350+ if (files != NULL) {
50351+ rcu_read_lock();
50352+ fdt = files_fdtable(files);
50353+ for (i=0; i < fdt->max_fds; i++) {
50354+ file = fcheck_files(files, i);
50355+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
50356+ get_file(file);
50357+ our_file = file;
50358+ }
50359+ }
50360+ rcu_read_unlock();
50361+ put_files_struct(files);
50362+ }
50363+
50364+ if (our_file == NULL)
50365+ return 1;
50366+
50367+ read_lock(&tasklist_lock);
50368+ do_each_thread(p2, p) {
50369+ files = get_files_struct(p);
50370+ if (files == NULL ||
50371+ (p->signal && p->signal->tty == task->signal->tty)) {
50372+ if (files != NULL)
50373+ put_files_struct(files);
50374+ continue;
50375+ }
50376+ rcu_read_lock();
50377+ fdt = files_fdtable(files);
50378+ for (i=0; i < fdt->max_fds; i++) {
50379+ file = fcheck_files(files, i);
50380+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
50381+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
50382+ p3 = task;
50383+ while (p3->pid > 0) {
50384+ if (p3 == p)
50385+ break;
50386+ p3 = p3->real_parent;
50387+ }
50388+ if (p3 == p)
50389+ break;
50390+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
50391+ gr_handle_alertkill(p);
50392+ rcu_read_unlock();
50393+ put_files_struct(files);
50394+ read_unlock(&tasklist_lock);
50395+ fput(our_file);
50396+ return 0;
50397+ }
50398+ }
50399+ rcu_read_unlock();
50400+ put_files_struct(files);
50401+ } while_each_thread(p2, p);
50402+ read_unlock(&tasklist_lock);
50403+
50404+ fput(our_file);
50405+ return 1;
50406+}
50407+
50408+ssize_t
50409+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
50410+{
50411+ struct gr_arg_wrapper uwrap;
50412+ unsigned char *sprole_salt = NULL;
50413+ unsigned char *sprole_sum = NULL;
50414+ int error = sizeof (struct gr_arg_wrapper);
50415+ int error2 = 0;
50416+
50417+ mutex_lock(&gr_dev_mutex);
50418+
50419+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
50420+ error = -EPERM;
50421+ goto out;
50422+ }
50423+
50424+ if (count != sizeof (struct gr_arg_wrapper)) {
50425+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
50426+ error = -EINVAL;
50427+ goto out;
50428+ }
50429+
50430+
50431+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
50432+ gr_auth_expires = 0;
50433+ gr_auth_attempts = 0;
50434+ }
50435+
50436+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
50437+ error = -EFAULT;
50438+ goto out;
50439+ }
50440+
50441+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
50442+ error = -EINVAL;
50443+ goto out;
50444+ }
50445+
50446+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
50447+ error = -EFAULT;
50448+ goto out;
50449+ }
50450+
50451+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50452+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50453+ time_after(gr_auth_expires, get_seconds())) {
50454+ error = -EBUSY;
50455+ goto out;
50456+ }
50457+
50458+ /* if non-root trying to do anything other than use a special role,
50459+ do not attempt authentication, do not count towards authentication
50460+ locking
50461+ */
50462+
50463+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
50464+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
50465+ current_uid()) {
50466+ error = -EPERM;
50467+ goto out;
50468+ }
50469+
50470+ /* ensure pw and special role name are null terminated */
50471+
50472+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
50473+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
50474+
50475+ /* Okay.
50476+ * We have our enough of the argument structure..(we have yet
50477+ * to copy_from_user the tables themselves) . Copy the tables
50478+ * only if we need them, i.e. for loading operations. */
50479+
50480+ switch (gr_usermode->mode) {
50481+ case GR_STATUS:
50482+ if (gr_status & GR_READY) {
50483+ error = 1;
50484+ if (!gr_check_secure_terminal(current))
50485+ error = 3;
50486+ } else
50487+ error = 2;
50488+ goto out;
50489+ case GR_SHUTDOWN:
50490+ if ((gr_status & GR_READY)
50491+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50492+ pax_open_kernel();
50493+ gr_status &= ~GR_READY;
50494+ pax_close_kernel();
50495+
50496+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
50497+ free_variables();
50498+ memset(gr_usermode, 0, sizeof (struct gr_arg));
50499+ memset(gr_system_salt, 0, GR_SALT_LEN);
50500+ memset(gr_system_sum, 0, GR_SHA_LEN);
50501+ } else if (gr_status & GR_READY) {
50502+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
50503+ error = -EPERM;
50504+ } else {
50505+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
50506+ error = -EAGAIN;
50507+ }
50508+ break;
50509+ case GR_ENABLE:
50510+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
50511+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
50512+ else {
50513+ if (gr_status & GR_READY)
50514+ error = -EAGAIN;
50515+ else
50516+ error = error2;
50517+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
50518+ }
50519+ break;
50520+ case GR_RELOAD:
50521+ if (!(gr_status & GR_READY)) {
50522+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
50523+ error = -EAGAIN;
50524+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50525+ lock_kernel();
50526+
50527+ pax_open_kernel();
50528+ gr_status &= ~GR_READY;
50529+ pax_close_kernel();
50530+
50531+ free_variables();
50532+ if (!(error2 = gracl_init(gr_usermode))) {
50533+ unlock_kernel();
50534+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
50535+ } else {
50536+ unlock_kernel();
50537+ error = error2;
50538+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50539+ }
50540+ } else {
50541+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
50542+ error = -EPERM;
50543+ }
50544+ break;
50545+ case GR_SEGVMOD:
50546+ if (unlikely(!(gr_status & GR_READY))) {
50547+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
50548+ error = -EAGAIN;
50549+ break;
50550+ }
50551+
50552+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
50553+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
50554+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
50555+ struct acl_subject_label *segvacl;
50556+ segvacl =
50557+ lookup_acl_subj_label(gr_usermode->segv_inode,
50558+ gr_usermode->segv_device,
50559+ current->role);
50560+ if (segvacl) {
50561+ segvacl->crashes = 0;
50562+ segvacl->expires = 0;
50563+ }
50564+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
50565+ gr_remove_uid(gr_usermode->segv_uid);
50566+ }
50567+ } else {
50568+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
50569+ error = -EPERM;
50570+ }
50571+ break;
50572+ case GR_SPROLE:
50573+ case GR_SPROLEPAM:
50574+ if (unlikely(!(gr_status & GR_READY))) {
50575+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
50576+ error = -EAGAIN;
50577+ break;
50578+ }
50579+
50580+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
50581+ current->role->expires = 0;
50582+ current->role->auth_attempts = 0;
50583+ }
50584+
50585+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
50586+ time_after(current->role->expires, get_seconds())) {
50587+ error = -EBUSY;
50588+ goto out;
50589+ }
50590+
50591+ if (lookup_special_role_auth
50592+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
50593+ && ((!sprole_salt && !sprole_sum)
50594+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
50595+ char *p = "";
50596+ assign_special_role(gr_usermode->sp_role);
50597+ read_lock(&tasklist_lock);
50598+ if (current->real_parent)
50599+ p = current->real_parent->role->rolename;
50600+ read_unlock(&tasklist_lock);
50601+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
50602+ p, acl_sp_role_value);
50603+ } else {
50604+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
50605+ error = -EPERM;
50606+ if(!(current->role->auth_attempts++))
50607+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50608+
50609+ goto out;
50610+ }
50611+ break;
50612+ case GR_UNSPROLE:
50613+ if (unlikely(!(gr_status & GR_READY))) {
50614+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
50615+ error = -EAGAIN;
50616+ break;
50617+ }
50618+
50619+ if (current->role->roletype & GR_ROLE_SPECIAL) {
50620+ char *p = "";
50621+ int i = 0;
50622+
50623+ read_lock(&tasklist_lock);
50624+ if (current->real_parent) {
50625+ p = current->real_parent->role->rolename;
50626+ i = current->real_parent->acl_role_id;
50627+ }
50628+ read_unlock(&tasklist_lock);
50629+
50630+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
50631+ gr_set_acls(1);
50632+ } else {
50633+ error = -EPERM;
50634+ goto out;
50635+ }
50636+ break;
50637+ default:
50638+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
50639+ error = -EINVAL;
50640+ break;
50641+ }
50642+
50643+ if (error != -EPERM)
50644+ goto out;
50645+
50646+ if(!(gr_auth_attempts++))
50647+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
50648+
50649+ out:
50650+ mutex_unlock(&gr_dev_mutex);
50651+ return error;
50652+}
50653+
50654+/* must be called with
50655+ rcu_read_lock();
50656+ read_lock(&tasklist_lock);
50657+ read_lock(&grsec_exec_file_lock);
50658+*/
50659+int gr_apply_subject_to_task(struct task_struct *task)
50660+{
50661+ struct acl_object_label *obj;
50662+ char *tmpname;
50663+ struct acl_subject_label *tmpsubj;
50664+ struct file *filp;
50665+ struct name_entry *nmatch;
50666+
50667+ filp = task->exec_file;
50668+ if (filp == NULL)
50669+ return 0;
50670+
50671+ /* the following is to apply the correct subject
50672+ on binaries running when the RBAC system
50673+ is enabled, when the binaries have been
50674+ replaced or deleted since their execution
50675+ -----
50676+ when the RBAC system starts, the inode/dev
50677+ from exec_file will be one the RBAC system
50678+ is unaware of. It only knows the inode/dev
50679+ of the present file on disk, or the absence
50680+ of it.
50681+ */
50682+ preempt_disable();
50683+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
50684+
50685+ nmatch = lookup_name_entry(tmpname);
50686+ preempt_enable();
50687+ tmpsubj = NULL;
50688+ if (nmatch) {
50689+ if (nmatch->deleted)
50690+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
50691+ else
50692+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
50693+ if (tmpsubj != NULL)
50694+ task->acl = tmpsubj;
50695+ }
50696+ if (tmpsubj == NULL)
50697+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
50698+ task->role);
50699+ if (task->acl) {
50700+ task->is_writable = 0;
50701+ /* ignore additional mmap checks for processes that are writable
50702+ by the default ACL */
50703+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
50704+ if (unlikely(obj->mode & GR_WRITE))
50705+ task->is_writable = 1;
50706+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
50707+ if (unlikely(obj->mode & GR_WRITE))
50708+ task->is_writable = 1;
50709+
50710+ gr_set_proc_res(task);
50711+
50712+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
50713+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
50714+#endif
50715+ } else {
50716+ return 1;
50717+ }
50718+
50719+ return 0;
50720+}
50721+
50722+int
50723+gr_set_acls(const int type)
50724+{
50725+ struct task_struct *task, *task2;
50726+ struct acl_role_label *role = current->role;
50727+ __u16 acl_role_id = current->acl_role_id;
50728+ const struct cred *cred;
50729+ int ret;
50730+
50731+ rcu_read_lock();
50732+ read_lock(&tasklist_lock);
50733+ read_lock(&grsec_exec_file_lock);
50734+ do_each_thread(task2, task) {
50735+ /* check to see if we're called from the exit handler,
50736+ if so, only replace ACLs that have inherited the admin
50737+ ACL */
50738+
50739+ if (type && (task->role != role ||
50740+ task->acl_role_id != acl_role_id))
50741+ continue;
50742+
50743+ task->acl_role_id = 0;
50744+ task->acl_sp_role = 0;
50745+
50746+ if (task->exec_file) {
50747+ cred = __task_cred(task);
50748+ task->role = lookup_acl_role_label(task, cred->uid, cred->gid);
50749+
50750+ ret = gr_apply_subject_to_task(task);
50751+ if (ret) {
50752+ read_unlock(&grsec_exec_file_lock);
50753+ read_unlock(&tasklist_lock);
50754+ rcu_read_unlock();
50755+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid);
50756+ return ret;
50757+ }
50758+ } else {
50759+ // it's a kernel process
50760+ task->role = kernel_role;
50761+ task->acl = kernel_role->root_label;
50762+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
50763+ task->acl->mode &= ~GR_PROCFIND;
50764+#endif
50765+ }
50766+ } while_each_thread(task2, task);
50767+ read_unlock(&grsec_exec_file_lock);
50768+ read_unlock(&tasklist_lock);
50769+ rcu_read_unlock();
50770+
50771+ return 0;
50772+}
50773+
50774+void
50775+gr_learn_resource(const struct task_struct *task,
50776+ const int res, const unsigned long wanted, const int gt)
50777+{
50778+ struct acl_subject_label *acl;
50779+ const struct cred *cred;
50780+
50781+ if (unlikely((gr_status & GR_READY) &&
50782+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
50783+ goto skip_reslog;
50784+
50785+#ifdef CONFIG_GRKERNSEC_RESLOG
50786+ gr_log_resource(task, res, wanted, gt);
50787+#endif
50788+ skip_reslog:
50789+
50790+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
50791+ return;
50792+
50793+ acl = task->acl;
50794+
50795+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
50796+ !(acl->resmask & (1 << (unsigned short) res))))
50797+ return;
50798+
50799+ if (wanted >= acl->res[res].rlim_cur) {
50800+ unsigned long res_add;
50801+
50802+ res_add = wanted;
50803+ switch (res) {
50804+ case RLIMIT_CPU:
50805+ res_add += GR_RLIM_CPU_BUMP;
50806+ break;
50807+ case RLIMIT_FSIZE:
50808+ res_add += GR_RLIM_FSIZE_BUMP;
50809+ break;
50810+ case RLIMIT_DATA:
50811+ res_add += GR_RLIM_DATA_BUMP;
50812+ break;
50813+ case RLIMIT_STACK:
50814+ res_add += GR_RLIM_STACK_BUMP;
50815+ break;
50816+ case RLIMIT_CORE:
50817+ res_add += GR_RLIM_CORE_BUMP;
50818+ break;
50819+ case RLIMIT_RSS:
50820+ res_add += GR_RLIM_RSS_BUMP;
50821+ break;
50822+ case RLIMIT_NPROC:
50823+ res_add += GR_RLIM_NPROC_BUMP;
50824+ break;
50825+ case RLIMIT_NOFILE:
50826+ res_add += GR_RLIM_NOFILE_BUMP;
50827+ break;
50828+ case RLIMIT_MEMLOCK:
50829+ res_add += GR_RLIM_MEMLOCK_BUMP;
50830+ break;
50831+ case RLIMIT_AS:
50832+ res_add += GR_RLIM_AS_BUMP;
50833+ break;
50834+ case RLIMIT_LOCKS:
50835+ res_add += GR_RLIM_LOCKS_BUMP;
50836+ break;
50837+ case RLIMIT_SIGPENDING:
50838+ res_add += GR_RLIM_SIGPENDING_BUMP;
50839+ break;
50840+ case RLIMIT_MSGQUEUE:
50841+ res_add += GR_RLIM_MSGQUEUE_BUMP;
50842+ break;
50843+ case RLIMIT_NICE:
50844+ res_add += GR_RLIM_NICE_BUMP;
50845+ break;
50846+ case RLIMIT_RTPRIO:
50847+ res_add += GR_RLIM_RTPRIO_BUMP;
50848+ break;
50849+ case RLIMIT_RTTIME:
50850+ res_add += GR_RLIM_RTTIME_BUMP;
50851+ break;
50852+ }
50853+
50854+ acl->res[res].rlim_cur = res_add;
50855+
50856+ if (wanted > acl->res[res].rlim_max)
50857+ acl->res[res].rlim_max = res_add;
50858+
50859+ /* only log the subject filename, since resource logging is supported for
50860+ single-subject learning only */
50861+ rcu_read_lock();
50862+ cred = __task_cred(task);
50863+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
50864+ task->role->roletype, cred->uid, cred->gid, acl->filename,
50865+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
50866+ "", (unsigned long) res, &task->signal->saved_ip);
50867+ rcu_read_unlock();
50868+ }
50869+
50870+ return;
50871+}
50872+
50873+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
50874+void
50875+pax_set_initial_flags(struct linux_binprm *bprm)
50876+{
50877+ struct task_struct *task = current;
50878+ struct acl_subject_label *proc;
50879+ unsigned long flags;
50880+
50881+ if (unlikely(!(gr_status & GR_READY)))
50882+ return;
50883+
50884+ flags = pax_get_flags(task);
50885+
50886+ proc = task->acl;
50887+
50888+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
50889+ flags &= ~MF_PAX_PAGEEXEC;
50890+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
50891+ flags &= ~MF_PAX_SEGMEXEC;
50892+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
50893+ flags &= ~MF_PAX_RANDMMAP;
50894+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
50895+ flags &= ~MF_PAX_EMUTRAMP;
50896+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
50897+ flags &= ~MF_PAX_MPROTECT;
50898+
50899+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
50900+ flags |= MF_PAX_PAGEEXEC;
50901+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
50902+ flags |= MF_PAX_SEGMEXEC;
50903+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
50904+ flags |= MF_PAX_RANDMMAP;
50905+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
50906+ flags |= MF_PAX_EMUTRAMP;
50907+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
50908+ flags |= MF_PAX_MPROTECT;
50909+
50910+ pax_set_flags(task, flags);
50911+
50912+ return;
50913+}
50914+#endif
50915+
50916+#ifdef CONFIG_SYSCTL
50917+/* Eric Biederman likes breaking userland ABI and every inode-based security
50918+ system to save 35kb of memory */
50919+
50920+/* we modify the passed in filename, but adjust it back before returning */
50921+static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len)
50922+{
50923+ struct name_entry *nmatch;
50924+ char *p, *lastp = NULL;
50925+ struct acl_object_label *obj = NULL, *tmp;
50926+ struct acl_subject_label *tmpsubj;
50927+ char c = '\0';
50928+
50929+ read_lock(&gr_inode_lock);
50930+
50931+ p = name + len - 1;
50932+ do {
50933+ nmatch = lookup_name_entry(name);
50934+ if (lastp != NULL)
50935+ *lastp = c;
50936+
50937+ if (nmatch == NULL)
50938+ goto next_component;
50939+ tmpsubj = current->acl;
50940+ do {
50941+ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj);
50942+ if (obj != NULL) {
50943+ tmp = obj->globbed;
50944+ while (tmp) {
50945+ if (!glob_match(tmp->filename, name)) {
50946+ obj = tmp;
50947+ goto found_obj;
50948+ }
50949+ tmp = tmp->next;
50950+ }
50951+ goto found_obj;
50952+ }
50953+ } while ((tmpsubj = tmpsubj->parent_subject));
50954+next_component:
50955+ /* end case */
50956+ if (p == name)
50957+ break;
50958+
50959+ while (*p != '/')
50960+ p--;
50961+ if (p == name)
50962+ lastp = p + 1;
50963+ else {
50964+ lastp = p;
50965+ p--;
50966+ }
50967+ c = *lastp;
50968+ *lastp = '\0';
50969+ } while (1);
50970+found_obj:
50971+ read_unlock(&gr_inode_lock);
50972+ /* obj returned will always be non-null */
50973+ return obj;
50974+}
50975+
50976+/* returns 0 when allowing, non-zero on error
50977+ op of 0 is used for readdir, so we don't log the names of hidden files
50978+*/
50979+__u32
50980+gr_handle_sysctl(const struct ctl_table *table, const int op)
50981+{
50982+ ctl_table *tmp;
50983+ const char *proc_sys = "/proc/sys";
50984+ char *path;
50985+ struct acl_object_label *obj;
50986+ unsigned short len = 0, pos = 0, depth = 0, i;
50987+ __u32 err = 0;
50988+ __u32 mode = 0;
50989+
50990+ if (unlikely(!(gr_status & GR_READY)))
50991+ return 0;
50992+
50993+ /* for now, ignore operations on non-sysctl entries if it's not a
50994+ readdir*/
50995+ if (table->child != NULL && op != 0)
50996+ return 0;
50997+
50998+ mode |= GR_FIND;
50999+ /* it's only a read if it's an entry, read on dirs is for readdir */
51000+ if (op & MAY_READ)
51001+ mode |= GR_READ;
51002+ if (op & MAY_WRITE)
51003+ mode |= GR_WRITE;
51004+
51005+ preempt_disable();
51006+
51007+ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
51008+
51009+ /* it's only a read/write if it's an actual entry, not a dir
51010+ (which are opened for readdir)
51011+ */
51012+
51013+ /* convert the requested sysctl entry into a pathname */
51014+
51015+ for (tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51016+ len += strlen(tmp->procname);
51017+ len++;
51018+ depth++;
51019+ }
51020+
51021+ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) {
51022+ /* deny */
51023+ goto out;
51024+ }
51025+
51026+ memset(path, 0, PAGE_SIZE);
51027+
51028+ memcpy(path, proc_sys, strlen(proc_sys));
51029+
51030+ pos += strlen(proc_sys);
51031+
51032+ for (; depth > 0; depth--) {
51033+ path[pos] = '/';
51034+ pos++;
51035+ for (i = 1, tmp = (ctl_table *)table; tmp != NULL; tmp = tmp->parent) {
51036+ if (depth == i) {
51037+ memcpy(path + pos, tmp->procname,
51038+ strlen(tmp->procname));
51039+ pos += strlen(tmp->procname);
51040+ }
51041+ i++;
51042+ }
51043+ }
51044+
51045+ obj = gr_lookup_by_name(path, pos);
51046+ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
51047+
51048+ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) &&
51049+ ((err & mode) != mode))) {
51050+ __u32 new_mode = mode;
51051+
51052+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
51053+
51054+ err = 0;
51055+ gr_log_learn_sysctl(path, new_mode);
51056+ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) {
51057+ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path);
51058+ err = -ENOENT;
51059+ } else if (!(err & GR_FIND)) {
51060+ err = -ENOENT;
51061+ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) {
51062+ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied",
51063+ path, (mode & GR_READ) ? " reading" : "",
51064+ (mode & GR_WRITE) ? " writing" : "");
51065+ err = -EACCES;
51066+ } else if ((err & mode) != mode) {
51067+ err = -EACCES;
51068+ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) {
51069+ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful",
51070+ path, (mode & GR_READ) ? " reading" : "",
51071+ (mode & GR_WRITE) ? " writing" : "");
51072+ err = 0;
51073+ } else
51074+ err = 0;
51075+
51076+ out:
51077+ preempt_enable();
51078+
51079+ return err;
51080+}
51081+#endif
51082+
51083+int
51084+gr_handle_proc_ptrace(struct task_struct *task)
51085+{
51086+ struct file *filp;
51087+ struct task_struct *tmp = task;
51088+ struct task_struct *curtemp = current;
51089+ __u32 retmode;
51090+
51091+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51092+ if (unlikely(!(gr_status & GR_READY)))
51093+ return 0;
51094+#endif
51095+
51096+ read_lock(&tasklist_lock);
51097+ read_lock(&grsec_exec_file_lock);
51098+ filp = task->exec_file;
51099+
51100+ while (tmp->pid > 0) {
51101+ if (tmp == curtemp)
51102+ break;
51103+ tmp = tmp->real_parent;
51104+ }
51105+
51106+ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51107+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
51108+ read_unlock(&grsec_exec_file_lock);
51109+ read_unlock(&tasklist_lock);
51110+ return 1;
51111+ }
51112+
51113+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51114+ if (!(gr_status & GR_READY)) {
51115+ read_unlock(&grsec_exec_file_lock);
51116+ read_unlock(&tasklist_lock);
51117+ return 0;
51118+ }
51119+#endif
51120+
51121+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
51122+ read_unlock(&grsec_exec_file_lock);
51123+ read_unlock(&tasklist_lock);
51124+
51125+ if (retmode & GR_NOPTRACE)
51126+ return 1;
51127+
51128+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
51129+ && (current->acl != task->acl || (current->acl != current->role->root_label
51130+ && current->pid != task->pid)))
51131+ return 1;
51132+
51133+ return 0;
51134+}
51135+
51136+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
51137+{
51138+ if (unlikely(!(gr_status & GR_READY)))
51139+ return;
51140+
51141+ if (!(current->role->roletype & GR_ROLE_GOD))
51142+ return;
51143+
51144+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
51145+ p->role->rolename, gr_task_roletype_to_char(p),
51146+ p->acl->filename);
51147+}
51148+
51149+int
51150+gr_handle_ptrace(struct task_struct *task, const long request)
51151+{
51152+ struct task_struct *tmp = task;
51153+ struct task_struct *curtemp = current;
51154+ __u32 retmode;
51155+
51156+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
51157+ if (unlikely(!(gr_status & GR_READY)))
51158+ return 0;
51159+#endif
51160+
51161+ read_lock(&tasklist_lock);
51162+ while (tmp->pid > 0) {
51163+ if (tmp == curtemp)
51164+ break;
51165+ tmp = tmp->real_parent;
51166+ }
51167+
51168+ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) ||
51169+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
51170+ read_unlock(&tasklist_lock);
51171+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51172+ return 1;
51173+ }
51174+ read_unlock(&tasklist_lock);
51175+
51176+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
51177+ if (!(gr_status & GR_READY))
51178+ return 0;
51179+#endif
51180+
51181+ read_lock(&grsec_exec_file_lock);
51182+ if (unlikely(!task->exec_file)) {
51183+ read_unlock(&grsec_exec_file_lock);
51184+ return 0;
51185+ }
51186+
51187+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
51188+ read_unlock(&grsec_exec_file_lock);
51189+
51190+ if (retmode & GR_NOPTRACE) {
51191+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51192+ return 1;
51193+ }
51194+
51195+ if (retmode & GR_PTRACERD) {
51196+ switch (request) {
51197+ case PTRACE_POKETEXT:
51198+ case PTRACE_POKEDATA:
51199+ case PTRACE_POKEUSR:
51200+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
51201+ case PTRACE_SETREGS:
51202+ case PTRACE_SETFPREGS:
51203+#endif
51204+#ifdef CONFIG_X86
51205+ case PTRACE_SETFPXREGS:
51206+#endif
51207+#ifdef CONFIG_ALTIVEC
51208+ case PTRACE_SETVRREGS:
51209+#endif
51210+ return 1;
51211+ default:
51212+ return 0;
51213+ }
51214+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
51215+ !(current->role->roletype & GR_ROLE_GOD) &&
51216+ (current->acl != task->acl)) {
51217+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
51218+ return 1;
51219+ }
51220+
51221+ return 0;
51222+}
51223+
51224+static int is_writable_mmap(const struct file *filp)
51225+{
51226+ struct task_struct *task = current;
51227+ struct acl_object_label *obj, *obj2;
51228+
51229+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
51230+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
51231+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
51232+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
51233+ task->role->root_label);
51234+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
51235+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
51236+ return 1;
51237+ }
51238+ }
51239+ return 0;
51240+}
51241+
51242+int
51243+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
51244+{
51245+ __u32 mode;
51246+
51247+ if (unlikely(!file || !(prot & PROT_EXEC)))
51248+ return 1;
51249+
51250+ if (is_writable_mmap(file))
51251+ return 0;
51252+
51253+ mode =
51254+ gr_search_file(file->f_path.dentry,
51255+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51256+ file->f_path.mnt);
51257+
51258+ if (!gr_tpe_allow(file))
51259+ return 0;
51260+
51261+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51262+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51263+ return 0;
51264+ } else if (unlikely(!(mode & GR_EXEC))) {
51265+ return 0;
51266+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51267+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51268+ return 1;
51269+ }
51270+
51271+ return 1;
51272+}
51273+
51274+int
51275+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
51276+{
51277+ __u32 mode;
51278+
51279+ if (unlikely(!file || !(prot & PROT_EXEC)))
51280+ return 1;
51281+
51282+ if (is_writable_mmap(file))
51283+ return 0;
51284+
51285+ mode =
51286+ gr_search_file(file->f_path.dentry,
51287+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
51288+ file->f_path.mnt);
51289+
51290+ if (!gr_tpe_allow(file))
51291+ return 0;
51292+
51293+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
51294+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51295+ return 0;
51296+ } else if (unlikely(!(mode & GR_EXEC))) {
51297+ return 0;
51298+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
51299+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
51300+ return 1;
51301+ }
51302+
51303+ return 1;
51304+}
51305+
51306+void
51307+gr_acl_handle_psacct(struct task_struct *task, const long code)
51308+{
51309+ unsigned long runtime;
51310+ unsigned long cputime;
51311+ unsigned int wday, cday;
51312+ __u8 whr, chr;
51313+ __u8 wmin, cmin;
51314+ __u8 wsec, csec;
51315+ struct timespec timeval;
51316+
51317+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
51318+ !(task->acl->mode & GR_PROCACCT)))
51319+ return;
51320+
51321+ do_posix_clock_monotonic_gettime(&timeval);
51322+ runtime = timeval.tv_sec - task->start_time.tv_sec;
51323+ wday = runtime / (3600 * 24);
51324+ runtime -= wday * (3600 * 24);
51325+ whr = runtime / 3600;
51326+ runtime -= whr * 3600;
51327+ wmin = runtime / 60;
51328+ runtime -= wmin * 60;
51329+ wsec = runtime;
51330+
51331+ cputime = (task->utime + task->stime) / HZ;
51332+ cday = cputime / (3600 * 24);
51333+ cputime -= cday * (3600 * 24);
51334+ chr = cputime / 3600;
51335+ cputime -= chr * 3600;
51336+ cmin = cputime / 60;
51337+ cputime -= cmin * 60;
51338+ csec = cputime;
51339+
51340+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
51341+
51342+ return;
51343+}
51344+
51345+void gr_set_kernel_label(struct task_struct *task)
51346+{
51347+ if (gr_status & GR_READY) {
51348+ task->role = kernel_role;
51349+ task->acl = kernel_role->root_label;
51350+ }
51351+ return;
51352+}
51353+
51354+#ifdef CONFIG_TASKSTATS
51355+int gr_is_taskstats_denied(int pid)
51356+{
51357+ struct task_struct *task;
51358+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51359+ const struct cred *cred;
51360+#endif
51361+ int ret = 0;
51362+
51363+ /* restrict taskstats viewing to un-chrooted root users
51364+ who have the 'view' subject flag if the RBAC system is enabled
51365+ */
51366+
51367+ rcu_read_lock();
51368+ read_lock(&tasklist_lock);
51369+ task = find_task_by_vpid(pid);
51370+ if (task) {
51371+#ifdef CONFIG_GRKERNSEC_CHROOT
51372+ if (proc_is_chrooted(task))
51373+ ret = -EACCES;
51374+#endif
51375+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51376+ cred = __task_cred(task);
51377+#ifdef CONFIG_GRKERNSEC_PROC_USER
51378+ if (cred->uid != 0)
51379+ ret = -EACCES;
51380+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
51381+ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID))
51382+ ret = -EACCES;
51383+#endif
51384+#endif
51385+ if (gr_status & GR_READY) {
51386+ if (!(task->acl->mode & GR_VIEW))
51387+ ret = -EACCES;
51388+ }
51389+ } else
51390+ ret = -ENOENT;
51391+
51392+ read_unlock(&tasklist_lock);
51393+ rcu_read_unlock();
51394+
51395+ return ret;
51396+}
51397+#endif
51398+
51399+/* AUXV entries are filled via a descendant of search_binary_handler
51400+ after we've already applied the subject for the target
51401+*/
51402+int gr_acl_enable_at_secure(void)
51403+{
51404+ if (unlikely(!(gr_status & GR_READY)))
51405+ return 0;
51406+
51407+ if (current->acl->mode & GR_ATSECURE)
51408+ return 1;
51409+
51410+ return 0;
51411+}
51412+
51413+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
51414+{
51415+ struct task_struct *task = current;
51416+ struct dentry *dentry = file->f_path.dentry;
51417+ struct vfsmount *mnt = file->f_path.mnt;
51418+ struct acl_object_label *obj, *tmp;
51419+ struct acl_subject_label *subj;
51420+ unsigned int bufsize;
51421+ int is_not_root;
51422+ char *path;
51423+ dev_t dev = __get_dev(dentry);
51424+
51425+ if (unlikely(!(gr_status & GR_READY)))
51426+ return 1;
51427+
51428+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
51429+ return 1;
51430+
51431+ /* ignore Eric Biederman */
51432+ if (IS_PRIVATE(dentry->d_inode))
51433+ return 1;
51434+
51435+ subj = task->acl;
51436+ do {
51437+ obj = lookup_acl_obj_label(ino, dev, subj);
51438+ if (obj != NULL)
51439+ return (obj->mode & GR_FIND) ? 1 : 0;
51440+ } while ((subj = subj->parent_subject));
51441+
51442+ /* this is purely an optimization since we're looking for an object
51443+ for the directory we're doing a readdir on
51444+ if it's possible for any globbed object to match the entry we're
51445+ filling into the directory, then the object we find here will be
51446+ an anchor point with attached globbed objects
51447+ */
51448+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
51449+ if (obj->globbed == NULL)
51450+ return (obj->mode & GR_FIND) ? 1 : 0;
51451+
51452+ is_not_root = ((obj->filename[0] == '/') &&
51453+ (obj->filename[1] == '\0')) ? 0 : 1;
51454+ bufsize = PAGE_SIZE - namelen - is_not_root;
51455+
51456+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
51457+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
51458+ return 1;
51459+
51460+ preempt_disable();
51461+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
51462+ bufsize);
51463+
51464+ bufsize = strlen(path);
51465+
51466+ /* if base is "/", don't append an additional slash */
51467+ if (is_not_root)
51468+ *(path + bufsize) = '/';
51469+ memcpy(path + bufsize + is_not_root, name, namelen);
51470+ *(path + bufsize + namelen + is_not_root) = '\0';
51471+
51472+ tmp = obj->globbed;
51473+ while (tmp) {
51474+ if (!glob_match(tmp->filename, path)) {
51475+ preempt_enable();
51476+ return (tmp->mode & GR_FIND) ? 1 : 0;
51477+ }
51478+ tmp = tmp->next;
51479+ }
51480+ preempt_enable();
51481+ return (obj->mode & GR_FIND) ? 1 : 0;
51482+}
51483+
51484+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
51485+EXPORT_SYMBOL(gr_acl_is_enabled);
51486+#endif
51487+EXPORT_SYMBOL(gr_learn_resource);
51488+EXPORT_SYMBOL(gr_set_kernel_label);
51489+#ifdef CONFIG_SECURITY
51490+EXPORT_SYMBOL(gr_check_user_change);
51491+EXPORT_SYMBOL(gr_check_group_change);
51492+#endif
51493+
51494diff -urNp linux-2.6.32.43/grsecurity/gracl_cap.c linux-2.6.32.43/grsecurity/gracl_cap.c
51495--- linux-2.6.32.43/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
51496+++ linux-2.6.32.43/grsecurity/gracl_cap.c 2011-04-17 15:56:46.000000000 -0400
51497@@ -0,0 +1,138 @@
51498+#include <linux/kernel.h>
51499+#include <linux/module.h>
51500+#include <linux/sched.h>
51501+#include <linux/gracl.h>
51502+#include <linux/grsecurity.h>
51503+#include <linux/grinternal.h>
51504+
51505+static const char *captab_log[] = {
51506+ "CAP_CHOWN",
51507+ "CAP_DAC_OVERRIDE",
51508+ "CAP_DAC_READ_SEARCH",
51509+ "CAP_FOWNER",
51510+ "CAP_FSETID",
51511+ "CAP_KILL",
51512+ "CAP_SETGID",
51513+ "CAP_SETUID",
51514+ "CAP_SETPCAP",
51515+ "CAP_LINUX_IMMUTABLE",
51516+ "CAP_NET_BIND_SERVICE",
51517+ "CAP_NET_BROADCAST",
51518+ "CAP_NET_ADMIN",
51519+ "CAP_NET_RAW",
51520+ "CAP_IPC_LOCK",
51521+ "CAP_IPC_OWNER",
51522+ "CAP_SYS_MODULE",
51523+ "CAP_SYS_RAWIO",
51524+ "CAP_SYS_CHROOT",
51525+ "CAP_SYS_PTRACE",
51526+ "CAP_SYS_PACCT",
51527+ "CAP_SYS_ADMIN",
51528+ "CAP_SYS_BOOT",
51529+ "CAP_SYS_NICE",
51530+ "CAP_SYS_RESOURCE",
51531+ "CAP_SYS_TIME",
51532+ "CAP_SYS_TTY_CONFIG",
51533+ "CAP_MKNOD",
51534+ "CAP_LEASE",
51535+ "CAP_AUDIT_WRITE",
51536+ "CAP_AUDIT_CONTROL",
51537+ "CAP_SETFCAP",
51538+ "CAP_MAC_OVERRIDE",
51539+ "CAP_MAC_ADMIN"
51540+};
51541+
51542+EXPORT_SYMBOL(gr_is_capable);
51543+EXPORT_SYMBOL(gr_is_capable_nolog);
51544+
51545+int
51546+gr_is_capable(const int cap)
51547+{
51548+ struct task_struct *task = current;
51549+ const struct cred *cred = current_cred();
51550+ struct acl_subject_label *curracl;
51551+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51552+ kernel_cap_t cap_audit = __cap_empty_set;
51553+
51554+ if (!gr_acl_is_enabled())
51555+ return 1;
51556+
51557+ curracl = task->acl;
51558+
51559+ cap_drop = curracl->cap_lower;
51560+ cap_mask = curracl->cap_mask;
51561+ cap_audit = curracl->cap_invert_audit;
51562+
51563+ while ((curracl = curracl->parent_subject)) {
51564+ /* if the cap isn't specified in the current computed mask but is specified in the
51565+ current level subject, and is lowered in the current level subject, then add
51566+ it to the set of dropped capabilities
51567+ otherwise, add the current level subject's mask to the current computed mask
51568+ */
51569+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51570+ cap_raise(cap_mask, cap);
51571+ if (cap_raised(curracl->cap_lower, cap))
51572+ cap_raise(cap_drop, cap);
51573+ if (cap_raised(curracl->cap_invert_audit, cap))
51574+ cap_raise(cap_audit, cap);
51575+ }
51576+ }
51577+
51578+ if (!cap_raised(cap_drop, cap)) {
51579+ if (cap_raised(cap_audit, cap))
51580+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
51581+ return 1;
51582+ }
51583+
51584+ curracl = task->acl;
51585+
51586+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
51587+ && cap_raised(cred->cap_effective, cap)) {
51588+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
51589+ task->role->roletype, cred->uid,
51590+ cred->gid, task->exec_file ?
51591+ gr_to_filename(task->exec_file->f_path.dentry,
51592+ task->exec_file->f_path.mnt) : curracl->filename,
51593+ curracl->filename, 0UL,
51594+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
51595+ return 1;
51596+ }
51597+
51598+ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
51599+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
51600+ return 0;
51601+}
51602+
51603+int
51604+gr_is_capable_nolog(const int cap)
51605+{
51606+ struct acl_subject_label *curracl;
51607+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
51608+
51609+ if (!gr_acl_is_enabled())
51610+ return 1;
51611+
51612+ curracl = current->acl;
51613+
51614+ cap_drop = curracl->cap_lower;
51615+ cap_mask = curracl->cap_mask;
51616+
51617+ while ((curracl = curracl->parent_subject)) {
51618+ /* if the cap isn't specified in the current computed mask but is specified in the
51619+ current level subject, and is lowered in the current level subject, then add
51620+ it to the set of dropped capabilities
51621+ otherwise, add the current level subject's mask to the current computed mask
51622+ */
51623+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
51624+ cap_raise(cap_mask, cap);
51625+ if (cap_raised(curracl->cap_lower, cap))
51626+ cap_raise(cap_drop, cap);
51627+ }
51628+ }
51629+
51630+ if (!cap_raised(cap_drop, cap))
51631+ return 1;
51632+
51633+ return 0;
51634+}
51635+
51636diff -urNp linux-2.6.32.43/grsecurity/gracl_fs.c linux-2.6.32.43/grsecurity/gracl_fs.c
51637--- linux-2.6.32.43/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
51638+++ linux-2.6.32.43/grsecurity/gracl_fs.c 2011-04-17 15:56:46.000000000 -0400
51639@@ -0,0 +1,431 @@
51640+#include <linux/kernel.h>
51641+#include <linux/sched.h>
51642+#include <linux/types.h>
51643+#include <linux/fs.h>
51644+#include <linux/file.h>
51645+#include <linux/stat.h>
51646+#include <linux/grsecurity.h>
51647+#include <linux/grinternal.h>
51648+#include <linux/gracl.h>
51649+
51650+__u32
51651+gr_acl_handle_hidden_file(const struct dentry * dentry,
51652+ const struct vfsmount * mnt)
51653+{
51654+ __u32 mode;
51655+
51656+ if (unlikely(!dentry->d_inode))
51657+ return GR_FIND;
51658+
51659+ mode =
51660+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
51661+
51662+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
51663+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51664+ return mode;
51665+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
51666+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
51667+ return 0;
51668+ } else if (unlikely(!(mode & GR_FIND)))
51669+ return 0;
51670+
51671+ return GR_FIND;
51672+}
51673+
51674+__u32
51675+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
51676+ const int fmode)
51677+{
51678+ __u32 reqmode = GR_FIND;
51679+ __u32 mode;
51680+
51681+ if (unlikely(!dentry->d_inode))
51682+ return reqmode;
51683+
51684+ if (unlikely(fmode & O_APPEND))
51685+ reqmode |= GR_APPEND;
51686+ else if (unlikely(fmode & FMODE_WRITE))
51687+ reqmode |= GR_WRITE;
51688+ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51689+ reqmode |= GR_READ;
51690+ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC))
51691+ reqmode &= ~GR_READ;
51692+ mode =
51693+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51694+ mnt);
51695+
51696+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51697+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51698+ reqmode & GR_READ ? " reading" : "",
51699+ reqmode & GR_WRITE ? " writing" : reqmode &
51700+ GR_APPEND ? " appending" : "");
51701+ return reqmode;
51702+ } else
51703+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51704+ {
51705+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
51706+ reqmode & GR_READ ? " reading" : "",
51707+ reqmode & GR_WRITE ? " writing" : reqmode &
51708+ GR_APPEND ? " appending" : "");
51709+ return 0;
51710+ } else if (unlikely((mode & reqmode) != reqmode))
51711+ return 0;
51712+
51713+ return reqmode;
51714+}
51715+
51716+__u32
51717+gr_acl_handle_creat(const struct dentry * dentry,
51718+ const struct dentry * p_dentry,
51719+ const struct vfsmount * p_mnt, const int fmode,
51720+ const int imode)
51721+{
51722+ __u32 reqmode = GR_WRITE | GR_CREATE;
51723+ __u32 mode;
51724+
51725+ if (unlikely(fmode & O_APPEND))
51726+ reqmode |= GR_APPEND;
51727+ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
51728+ reqmode |= GR_READ;
51729+ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
51730+ reqmode |= GR_SETID;
51731+
51732+ mode =
51733+ gr_check_create(dentry, p_dentry, p_mnt,
51734+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51735+
51736+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51737+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51738+ reqmode & GR_READ ? " reading" : "",
51739+ reqmode & GR_WRITE ? " writing" : reqmode &
51740+ GR_APPEND ? " appending" : "");
51741+ return reqmode;
51742+ } else
51743+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51744+ {
51745+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
51746+ reqmode & GR_READ ? " reading" : "",
51747+ reqmode & GR_WRITE ? " writing" : reqmode &
51748+ GR_APPEND ? " appending" : "");
51749+ return 0;
51750+ } else if (unlikely((mode & reqmode) != reqmode))
51751+ return 0;
51752+
51753+ return reqmode;
51754+}
51755+
51756+__u32
51757+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
51758+ const int fmode)
51759+{
51760+ __u32 mode, reqmode = GR_FIND;
51761+
51762+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
51763+ reqmode |= GR_EXEC;
51764+ if (fmode & S_IWOTH)
51765+ reqmode |= GR_WRITE;
51766+ if (fmode & S_IROTH)
51767+ reqmode |= GR_READ;
51768+
51769+ mode =
51770+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
51771+ mnt);
51772+
51773+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
51774+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51775+ reqmode & GR_READ ? " reading" : "",
51776+ reqmode & GR_WRITE ? " writing" : "",
51777+ reqmode & GR_EXEC ? " executing" : "");
51778+ return reqmode;
51779+ } else
51780+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
51781+ {
51782+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
51783+ reqmode & GR_READ ? " reading" : "",
51784+ reqmode & GR_WRITE ? " writing" : "",
51785+ reqmode & GR_EXEC ? " executing" : "");
51786+ return 0;
51787+ } else if (unlikely((mode & reqmode) != reqmode))
51788+ return 0;
51789+
51790+ return reqmode;
51791+}
51792+
51793+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
51794+{
51795+ __u32 mode;
51796+
51797+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
51798+
51799+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51800+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
51801+ return mode;
51802+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51803+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
51804+ return 0;
51805+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51806+ return 0;
51807+
51808+ return (reqmode);
51809+}
51810+
51811+__u32
51812+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
51813+{
51814+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
51815+}
51816+
51817+__u32
51818+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
51819+{
51820+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
51821+}
51822+
51823+__u32
51824+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
51825+{
51826+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
51827+}
51828+
51829+__u32
51830+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
51831+{
51832+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
51833+}
51834+
51835+__u32
51836+gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
51837+ mode_t mode)
51838+{
51839+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
51840+ return 1;
51841+
51842+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51843+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51844+ GR_FCHMOD_ACL_MSG);
51845+ } else {
51846+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
51847+ }
51848+}
51849+
51850+__u32
51851+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
51852+ mode_t mode)
51853+{
51854+ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
51855+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
51856+ GR_CHMOD_ACL_MSG);
51857+ } else {
51858+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
51859+ }
51860+}
51861+
51862+__u32
51863+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
51864+{
51865+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
51866+}
51867+
51868+__u32
51869+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
51870+{
51871+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
51872+}
51873+
51874+__u32
51875+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
51876+{
51877+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
51878+}
51879+
51880+__u32
51881+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
51882+{
51883+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
51884+ GR_UNIXCONNECT_ACL_MSG);
51885+}
51886+
51887+/* hardlinks require at minimum create permission,
51888+ any additional privilege required is based on the
51889+ privilege of the file being linked to
51890+*/
51891+__u32
51892+gr_acl_handle_link(const struct dentry * new_dentry,
51893+ const struct dentry * parent_dentry,
51894+ const struct vfsmount * parent_mnt,
51895+ const struct dentry * old_dentry,
51896+ const struct vfsmount * old_mnt, const char *to)
51897+{
51898+ __u32 mode;
51899+ __u32 needmode = GR_CREATE | GR_LINK;
51900+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
51901+
51902+ mode =
51903+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
51904+ old_mnt);
51905+
51906+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
51907+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51908+ return mode;
51909+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51910+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to);
51911+ return 0;
51912+ } else if (unlikely((mode & needmode) != needmode))
51913+ return 0;
51914+
51915+ return 1;
51916+}
51917+
51918+__u32
51919+gr_acl_handle_symlink(const struct dentry * new_dentry,
51920+ const struct dentry * parent_dentry,
51921+ const struct vfsmount * parent_mnt, const char *from)
51922+{
51923+ __u32 needmode = GR_WRITE | GR_CREATE;
51924+ __u32 mode;
51925+
51926+ mode =
51927+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
51928+ GR_CREATE | GR_AUDIT_CREATE |
51929+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
51930+
51931+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
51932+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51933+ return mode;
51934+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
51935+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt);
51936+ return 0;
51937+ } else if (unlikely((mode & needmode) != needmode))
51938+ return 0;
51939+
51940+ return (GR_WRITE | GR_CREATE);
51941+}
51942+
51943+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
51944+{
51945+ __u32 mode;
51946+
51947+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
51948+
51949+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
51950+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
51951+ return mode;
51952+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
51953+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
51954+ return 0;
51955+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
51956+ return 0;
51957+
51958+ return (reqmode);
51959+}
51960+
51961+__u32
51962+gr_acl_handle_mknod(const struct dentry * new_dentry,
51963+ const struct dentry * parent_dentry,
51964+ const struct vfsmount * parent_mnt,
51965+ const int mode)
51966+{
51967+ __u32 reqmode = GR_WRITE | GR_CREATE;
51968+ if (unlikely(mode & (S_ISUID | S_ISGID)))
51969+ reqmode |= GR_SETID;
51970+
51971+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51972+ reqmode, GR_MKNOD_ACL_MSG);
51973+}
51974+
51975+__u32
51976+gr_acl_handle_mkdir(const struct dentry *new_dentry,
51977+ const struct dentry *parent_dentry,
51978+ const struct vfsmount *parent_mnt)
51979+{
51980+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
51981+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
51982+}
51983+
51984+#define RENAME_CHECK_SUCCESS(old, new) \
51985+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
51986+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
51987+
51988+int
51989+gr_acl_handle_rename(struct dentry *new_dentry,
51990+ struct dentry *parent_dentry,
51991+ const struct vfsmount *parent_mnt,
51992+ struct dentry *old_dentry,
51993+ struct inode *old_parent_inode,
51994+ struct vfsmount *old_mnt, const char *newname)
51995+{
51996+ __u32 comp1, comp2;
51997+ int error = 0;
51998+
51999+ if (unlikely(!gr_acl_is_enabled()))
52000+ return 0;
52001+
52002+ if (!new_dentry->d_inode) {
52003+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
52004+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
52005+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
52006+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
52007+ GR_DELETE | GR_AUDIT_DELETE |
52008+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52009+ GR_SUPPRESS, old_mnt);
52010+ } else {
52011+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
52012+ GR_CREATE | GR_DELETE |
52013+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
52014+ GR_AUDIT_READ | GR_AUDIT_WRITE |
52015+ GR_SUPPRESS, parent_mnt);
52016+ comp2 =
52017+ gr_search_file(old_dentry,
52018+ GR_READ | GR_WRITE | GR_AUDIT_READ |
52019+ GR_DELETE | GR_AUDIT_DELETE |
52020+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
52021+ }
52022+
52023+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
52024+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
52025+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52026+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
52027+ && !(comp2 & GR_SUPPRESS)) {
52028+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname);
52029+ error = -EACCES;
52030+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
52031+ error = -EACCES;
52032+
52033+ return error;
52034+}
52035+
52036+void
52037+gr_acl_handle_exit(void)
52038+{
52039+ u16 id;
52040+ char *rolename;
52041+ struct file *exec_file;
52042+
52043+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
52044+ !(current->role->roletype & GR_ROLE_PERSIST))) {
52045+ id = current->acl_role_id;
52046+ rolename = current->role->rolename;
52047+ gr_set_acls(1);
52048+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
52049+ }
52050+
52051+ write_lock(&grsec_exec_file_lock);
52052+ exec_file = current->exec_file;
52053+ current->exec_file = NULL;
52054+ write_unlock(&grsec_exec_file_lock);
52055+
52056+ if (exec_file)
52057+ fput(exec_file);
52058+}
52059+
52060+int
52061+gr_acl_handle_procpidmem(const struct task_struct *task)
52062+{
52063+ if (unlikely(!gr_acl_is_enabled()))
52064+ return 0;
52065+
52066+ if (task != current && task->acl->mode & GR_PROTPROCFD)
52067+ return -EACCES;
52068+
52069+ return 0;
52070+}
52071diff -urNp linux-2.6.32.43/grsecurity/gracl_ip.c linux-2.6.32.43/grsecurity/gracl_ip.c
52072--- linux-2.6.32.43/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
52073+++ linux-2.6.32.43/grsecurity/gracl_ip.c 2011-04-17 15:56:46.000000000 -0400
52074@@ -0,0 +1,382 @@
52075+#include <linux/kernel.h>
52076+#include <asm/uaccess.h>
52077+#include <asm/errno.h>
52078+#include <net/sock.h>
52079+#include <linux/file.h>
52080+#include <linux/fs.h>
52081+#include <linux/net.h>
52082+#include <linux/in.h>
52083+#include <linux/skbuff.h>
52084+#include <linux/ip.h>
52085+#include <linux/udp.h>
52086+#include <linux/smp_lock.h>
52087+#include <linux/types.h>
52088+#include <linux/sched.h>
52089+#include <linux/netdevice.h>
52090+#include <linux/inetdevice.h>
52091+#include <linux/gracl.h>
52092+#include <linux/grsecurity.h>
52093+#include <linux/grinternal.h>
52094+
52095+#define GR_BIND 0x01
52096+#define GR_CONNECT 0x02
52097+#define GR_INVERT 0x04
52098+#define GR_BINDOVERRIDE 0x08
52099+#define GR_CONNECTOVERRIDE 0x10
52100+#define GR_SOCK_FAMILY 0x20
52101+
52102+static const char * gr_protocols[IPPROTO_MAX] = {
52103+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
52104+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
52105+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
52106+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
52107+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
52108+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
52109+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
52110+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
52111+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
52112+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
52113+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
52114+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
52115+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
52116+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
52117+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
52118+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
52119+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
52120+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
52121+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
52122+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
52123+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
52124+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
52125+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
52126+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
52127+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
52128+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
52129+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
52130+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
52131+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
52132+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
52133+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
52134+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
52135+ };
52136+
52137+static const char * gr_socktypes[SOCK_MAX] = {
52138+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
52139+ "unknown:7", "unknown:8", "unknown:9", "packet"
52140+ };
52141+
52142+static const char * gr_sockfamilies[AF_MAX+1] = {
52143+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
52144+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
52145+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
52146+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154"
52147+ };
52148+
52149+const char *
52150+gr_proto_to_name(unsigned char proto)
52151+{
52152+ return gr_protocols[proto];
52153+}
52154+
52155+const char *
52156+gr_socktype_to_name(unsigned char type)
52157+{
52158+ return gr_socktypes[type];
52159+}
52160+
52161+const char *
52162+gr_sockfamily_to_name(unsigned char family)
52163+{
52164+ return gr_sockfamilies[family];
52165+}
52166+
52167+int
52168+gr_search_socket(const int domain, const int type, const int protocol)
52169+{
52170+ struct acl_subject_label *curr;
52171+ const struct cred *cred = current_cred();
52172+
52173+ if (unlikely(!gr_acl_is_enabled()))
52174+ goto exit;
52175+
52176+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
52177+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
52178+ goto exit; // let the kernel handle it
52179+
52180+ curr = current->acl;
52181+
52182+ if (curr->sock_families[domain / 32] & (1 << (domain % 32))) {
52183+ /* the family is allowed, if this is PF_INET allow it only if
52184+ the extra sock type/protocol checks pass */
52185+ if (domain == PF_INET)
52186+ goto inet_check;
52187+ goto exit;
52188+ } else {
52189+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52190+ __u32 fakeip = 0;
52191+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52192+ current->role->roletype, cred->uid,
52193+ cred->gid, current->exec_file ?
52194+ gr_to_filename(current->exec_file->f_path.dentry,
52195+ current->exec_file->f_path.mnt) :
52196+ curr->filename, curr->filename,
52197+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
52198+ &current->signal->saved_ip);
52199+ goto exit;
52200+ }
52201+ goto exit_fail;
52202+ }
52203+
52204+inet_check:
52205+ /* the rest of this checking is for IPv4 only */
52206+ if (!curr->ips)
52207+ goto exit;
52208+
52209+ if ((curr->ip_type & (1 << type)) &&
52210+ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
52211+ goto exit;
52212+
52213+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52214+ /* we don't place acls on raw sockets , and sometimes
52215+ dgram/ip sockets are opened for ioctl and not
52216+ bind/connect, so we'll fake a bind learn log */
52217+ if (type == SOCK_RAW || type == SOCK_PACKET) {
52218+ __u32 fakeip = 0;
52219+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52220+ current->role->roletype, cred->uid,
52221+ cred->gid, current->exec_file ?
52222+ gr_to_filename(current->exec_file->f_path.dentry,
52223+ current->exec_file->f_path.mnt) :
52224+ curr->filename, curr->filename,
52225+ &fakeip, 0, type,
52226+ protocol, GR_CONNECT, &current->signal->saved_ip);
52227+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
52228+ __u32 fakeip = 0;
52229+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52230+ current->role->roletype, cred->uid,
52231+ cred->gid, current->exec_file ?
52232+ gr_to_filename(current->exec_file->f_path.dentry,
52233+ current->exec_file->f_path.mnt) :
52234+ curr->filename, curr->filename,
52235+ &fakeip, 0, type,
52236+ protocol, GR_BIND, &current->signal->saved_ip);
52237+ }
52238+ /* we'll log when they use connect or bind */
52239+ goto exit;
52240+ }
52241+
52242+exit_fail:
52243+ if (domain == PF_INET)
52244+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
52245+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
52246+ else
52247+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
52248+ gr_socktype_to_name(type), protocol);
52249+
52250+ return 0;
52251+exit:
52252+ return 1;
52253+}
52254+
52255+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
52256+{
52257+ if ((ip->mode & mode) &&
52258+ (ip_port >= ip->low) &&
52259+ (ip_port <= ip->high) &&
52260+ ((ntohl(ip_addr) & our_netmask) ==
52261+ (ntohl(our_addr) & our_netmask))
52262+ && (ip->proto[protocol / 32] & (1 << (protocol % 32)))
52263+ && (ip->type & (1 << type))) {
52264+ if (ip->mode & GR_INVERT)
52265+ return 2; // specifically denied
52266+ else
52267+ return 1; // allowed
52268+ }
52269+
52270+ return 0; // not specifically allowed, may continue parsing
52271+}
52272+
52273+static int
52274+gr_search_connectbind(const int full_mode, struct sock *sk,
52275+ struct sockaddr_in *addr, const int type)
52276+{
52277+ char iface[IFNAMSIZ] = {0};
52278+ struct acl_subject_label *curr;
52279+ struct acl_ip_label *ip;
52280+ struct inet_sock *isk;
52281+ struct net_device *dev;
52282+ struct in_device *idev;
52283+ unsigned long i;
52284+ int ret;
52285+ int mode = full_mode & (GR_BIND | GR_CONNECT);
52286+ __u32 ip_addr = 0;
52287+ __u32 our_addr;
52288+ __u32 our_netmask;
52289+ char *p;
52290+ __u16 ip_port = 0;
52291+ const struct cred *cred = current_cred();
52292+
52293+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
52294+ return 0;
52295+
52296+ curr = current->acl;
52297+ isk = inet_sk(sk);
52298+
52299+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
52300+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
52301+ addr->sin_addr.s_addr = curr->inaddr_any_override;
52302+ if ((full_mode & GR_CONNECT) && isk->saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
52303+ struct sockaddr_in saddr;
52304+ int err;
52305+
52306+ saddr.sin_family = AF_INET;
52307+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
52308+ saddr.sin_port = isk->sport;
52309+
52310+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52311+ if (err)
52312+ return err;
52313+
52314+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
52315+ if (err)
52316+ return err;
52317+ }
52318+
52319+ if (!curr->ips)
52320+ return 0;
52321+
52322+ ip_addr = addr->sin_addr.s_addr;
52323+ ip_port = ntohs(addr->sin_port);
52324+
52325+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
52326+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
52327+ current->role->roletype, cred->uid,
52328+ cred->gid, current->exec_file ?
52329+ gr_to_filename(current->exec_file->f_path.dentry,
52330+ current->exec_file->f_path.mnt) :
52331+ curr->filename, curr->filename,
52332+ &ip_addr, ip_port, type,
52333+ sk->sk_protocol, mode, &current->signal->saved_ip);
52334+ return 0;
52335+ }
52336+
52337+ for (i = 0; i < curr->ip_num; i++) {
52338+ ip = *(curr->ips + i);
52339+ if (ip->iface != NULL) {
52340+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
52341+ p = strchr(iface, ':');
52342+ if (p != NULL)
52343+ *p = '\0';
52344+ dev = dev_get_by_name(sock_net(sk), iface);
52345+ if (dev == NULL)
52346+ continue;
52347+ idev = in_dev_get(dev);
52348+ if (idev == NULL) {
52349+ dev_put(dev);
52350+ continue;
52351+ }
52352+ rcu_read_lock();
52353+ for_ifa(idev) {
52354+ if (!strcmp(ip->iface, ifa->ifa_label)) {
52355+ our_addr = ifa->ifa_address;
52356+ our_netmask = 0xffffffff;
52357+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52358+ if (ret == 1) {
52359+ rcu_read_unlock();
52360+ in_dev_put(idev);
52361+ dev_put(dev);
52362+ return 0;
52363+ } else if (ret == 2) {
52364+ rcu_read_unlock();
52365+ in_dev_put(idev);
52366+ dev_put(dev);
52367+ goto denied;
52368+ }
52369+ }
52370+ } endfor_ifa(idev);
52371+ rcu_read_unlock();
52372+ in_dev_put(idev);
52373+ dev_put(dev);
52374+ } else {
52375+ our_addr = ip->addr;
52376+ our_netmask = ip->netmask;
52377+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
52378+ if (ret == 1)
52379+ return 0;
52380+ else if (ret == 2)
52381+ goto denied;
52382+ }
52383+ }
52384+
52385+denied:
52386+ if (mode == GR_BIND)
52387+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52388+ else if (mode == GR_CONNECT)
52389+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
52390+
52391+ return -EACCES;
52392+}
52393+
52394+int
52395+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
52396+{
52397+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
52398+}
52399+
52400+int
52401+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
52402+{
52403+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
52404+}
52405+
52406+int gr_search_listen(struct socket *sock)
52407+{
52408+ struct sock *sk = sock->sk;
52409+ struct sockaddr_in addr;
52410+
52411+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
52412+ addr.sin_port = inet_sk(sk)->sport;
52413+
52414+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52415+}
52416+
52417+int gr_search_accept(struct socket *sock)
52418+{
52419+ struct sock *sk = sock->sk;
52420+ struct sockaddr_in addr;
52421+
52422+ addr.sin_addr.s_addr = inet_sk(sk)->saddr;
52423+ addr.sin_port = inet_sk(sk)->sport;
52424+
52425+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
52426+}
52427+
52428+int
52429+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
52430+{
52431+ if (addr)
52432+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
52433+ else {
52434+ struct sockaddr_in sin;
52435+ const struct inet_sock *inet = inet_sk(sk);
52436+
52437+ sin.sin_addr.s_addr = inet->daddr;
52438+ sin.sin_port = inet->dport;
52439+
52440+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52441+ }
52442+}
52443+
52444+int
52445+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
52446+{
52447+ struct sockaddr_in sin;
52448+
52449+ if (unlikely(skb->len < sizeof (struct udphdr)))
52450+ return 0; // skip this packet
52451+
52452+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
52453+ sin.sin_port = udp_hdr(skb)->source;
52454+
52455+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
52456+}
52457diff -urNp linux-2.6.32.43/grsecurity/gracl_learn.c linux-2.6.32.43/grsecurity/gracl_learn.c
52458--- linux-2.6.32.43/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
52459+++ linux-2.6.32.43/grsecurity/gracl_learn.c 2011-07-14 21:02:03.000000000 -0400
52460@@ -0,0 +1,208 @@
52461+#include <linux/kernel.h>
52462+#include <linux/mm.h>
52463+#include <linux/sched.h>
52464+#include <linux/poll.h>
52465+#include <linux/smp_lock.h>
52466+#include <linux/string.h>
52467+#include <linux/file.h>
52468+#include <linux/types.h>
52469+#include <linux/vmalloc.h>
52470+#include <linux/grinternal.h>
52471+
52472+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
52473+ size_t count, loff_t *ppos);
52474+extern int gr_acl_is_enabled(void);
52475+
52476+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
52477+static int gr_learn_attached;
52478+
52479+/* use a 512k buffer */
52480+#define LEARN_BUFFER_SIZE (512 * 1024)
52481+
52482+static DEFINE_SPINLOCK(gr_learn_lock);
52483+static DEFINE_MUTEX(gr_learn_user_mutex);
52484+
52485+/* we need to maintain two buffers, so that the kernel context of grlearn
52486+ uses a semaphore around the userspace copying, and the other kernel contexts
52487+ use a spinlock when copying into the buffer, since they cannot sleep
52488+*/
52489+static char *learn_buffer;
52490+static char *learn_buffer_user;
52491+static int learn_buffer_len;
52492+static int learn_buffer_user_len;
52493+
52494+static ssize_t
52495+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
52496+{
52497+ DECLARE_WAITQUEUE(wait, current);
52498+ ssize_t retval = 0;
52499+
52500+ add_wait_queue(&learn_wait, &wait);
52501+ set_current_state(TASK_INTERRUPTIBLE);
52502+ do {
52503+ mutex_lock(&gr_learn_user_mutex);
52504+ spin_lock(&gr_learn_lock);
52505+ if (learn_buffer_len)
52506+ break;
52507+ spin_unlock(&gr_learn_lock);
52508+ mutex_unlock(&gr_learn_user_mutex);
52509+ if (file->f_flags & O_NONBLOCK) {
52510+ retval = -EAGAIN;
52511+ goto out;
52512+ }
52513+ if (signal_pending(current)) {
52514+ retval = -ERESTARTSYS;
52515+ goto out;
52516+ }
52517+
52518+ schedule();
52519+ } while (1);
52520+
52521+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
52522+ learn_buffer_user_len = learn_buffer_len;
52523+ retval = learn_buffer_len;
52524+ learn_buffer_len = 0;
52525+
52526+ spin_unlock(&gr_learn_lock);
52527+
52528+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
52529+ retval = -EFAULT;
52530+
52531+ mutex_unlock(&gr_learn_user_mutex);
52532+out:
52533+ set_current_state(TASK_RUNNING);
52534+ remove_wait_queue(&learn_wait, &wait);
52535+ return retval;
52536+}
52537+
52538+static unsigned int
52539+poll_learn(struct file * file, poll_table * wait)
52540+{
52541+ poll_wait(file, &learn_wait, wait);
52542+
52543+ if (learn_buffer_len)
52544+ return (POLLIN | POLLRDNORM);
52545+
52546+ return 0;
52547+}
52548+
52549+void
52550+gr_clear_learn_entries(void)
52551+{
52552+ char *tmp;
52553+
52554+ mutex_lock(&gr_learn_user_mutex);
52555+ spin_lock(&gr_learn_lock);
52556+ tmp = learn_buffer;
52557+ learn_buffer = NULL;
52558+ spin_unlock(&gr_learn_lock);
52559+ if (tmp)
52560+ vfree(tmp);
52561+ if (learn_buffer_user != NULL) {
52562+ vfree(learn_buffer_user);
52563+ learn_buffer_user = NULL;
52564+ }
52565+ learn_buffer_len = 0;
52566+ mutex_unlock(&gr_learn_user_mutex);
52567+
52568+ return;
52569+}
52570+
52571+void
52572+gr_add_learn_entry(const char *fmt, ...)
52573+{
52574+ va_list args;
52575+ unsigned int len;
52576+
52577+ if (!gr_learn_attached)
52578+ return;
52579+
52580+ spin_lock(&gr_learn_lock);
52581+
52582+ /* leave a gap at the end so we know when it's "full" but don't have to
52583+ compute the exact length of the string we're trying to append
52584+ */
52585+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
52586+ spin_unlock(&gr_learn_lock);
52587+ wake_up_interruptible(&learn_wait);
52588+ return;
52589+ }
52590+ if (learn_buffer == NULL) {
52591+ spin_unlock(&gr_learn_lock);
52592+ return;
52593+ }
52594+
52595+ va_start(args, fmt);
52596+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
52597+ va_end(args);
52598+
52599+ learn_buffer_len += len + 1;
52600+
52601+ spin_unlock(&gr_learn_lock);
52602+ wake_up_interruptible(&learn_wait);
52603+
52604+ return;
52605+}
52606+
52607+static int
52608+open_learn(struct inode *inode, struct file *file)
52609+{
52610+ if (file->f_mode & FMODE_READ && gr_learn_attached)
52611+ return -EBUSY;
52612+ if (file->f_mode & FMODE_READ) {
52613+ int retval = 0;
52614+ mutex_lock(&gr_learn_user_mutex);
52615+ if (learn_buffer == NULL)
52616+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
52617+ if (learn_buffer_user == NULL)
52618+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
52619+ if (learn_buffer == NULL) {
52620+ retval = -ENOMEM;
52621+ goto out_error;
52622+ }
52623+ if (learn_buffer_user == NULL) {
52624+ retval = -ENOMEM;
52625+ goto out_error;
52626+ }
52627+ learn_buffer_len = 0;
52628+ learn_buffer_user_len = 0;
52629+ gr_learn_attached = 1;
52630+out_error:
52631+ mutex_unlock(&gr_learn_user_mutex);
52632+ return retval;
52633+ }
52634+ return 0;
52635+}
52636+
52637+static int
52638+close_learn(struct inode *inode, struct file *file)
52639+{
52640+ if (file->f_mode & FMODE_READ) {
52641+ char *tmp = NULL;
52642+ mutex_lock(&gr_learn_user_mutex);
52643+ spin_lock(&gr_learn_lock);
52644+ tmp = learn_buffer;
52645+ learn_buffer = NULL;
52646+ spin_unlock(&gr_learn_lock);
52647+ if (tmp)
52648+ vfree(tmp);
52649+ if (learn_buffer_user != NULL) {
52650+ vfree(learn_buffer_user);
52651+ learn_buffer_user = NULL;
52652+ }
52653+ learn_buffer_len = 0;
52654+ learn_buffer_user_len = 0;
52655+ gr_learn_attached = 0;
52656+ mutex_unlock(&gr_learn_user_mutex);
52657+ }
52658+
52659+ return 0;
52660+}
52661+
52662+const struct file_operations grsec_fops = {
52663+ .read = read_learn,
52664+ .write = write_grsec_handler,
52665+ .open = open_learn,
52666+ .release = close_learn,
52667+ .poll = poll_learn,
52668+};
52669diff -urNp linux-2.6.32.43/grsecurity/gracl_res.c linux-2.6.32.43/grsecurity/gracl_res.c
52670--- linux-2.6.32.43/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
52671+++ linux-2.6.32.43/grsecurity/gracl_res.c 2011-04-17 15:56:46.000000000 -0400
52672@@ -0,0 +1,67 @@
52673+#include <linux/kernel.h>
52674+#include <linux/sched.h>
52675+#include <linux/gracl.h>
52676+#include <linux/grinternal.h>
52677+
52678+static const char *restab_log[] = {
52679+ [RLIMIT_CPU] = "RLIMIT_CPU",
52680+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
52681+ [RLIMIT_DATA] = "RLIMIT_DATA",
52682+ [RLIMIT_STACK] = "RLIMIT_STACK",
52683+ [RLIMIT_CORE] = "RLIMIT_CORE",
52684+ [RLIMIT_RSS] = "RLIMIT_RSS",
52685+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
52686+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
52687+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
52688+ [RLIMIT_AS] = "RLIMIT_AS",
52689+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
52690+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
52691+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
52692+ [RLIMIT_NICE] = "RLIMIT_NICE",
52693+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
52694+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
52695+ [GR_CRASH_RES] = "RLIMIT_CRASH"
52696+};
52697+
52698+void
52699+gr_log_resource(const struct task_struct *task,
52700+ const int res, const unsigned long wanted, const int gt)
52701+{
52702+ const struct cred *cred;
52703+ unsigned long rlim;
52704+
52705+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
52706+ return;
52707+
52708+ // not yet supported resource
52709+ if (unlikely(!restab_log[res]))
52710+ return;
52711+
52712+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
52713+ rlim = task->signal->rlim[res].rlim_max;
52714+ else
52715+ rlim = task->signal->rlim[res].rlim_cur;
52716+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
52717+ return;
52718+
52719+ rcu_read_lock();
52720+ cred = __task_cred(task);
52721+
52722+ if (res == RLIMIT_NPROC &&
52723+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
52724+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
52725+ goto out_rcu_unlock;
52726+ else if (res == RLIMIT_MEMLOCK &&
52727+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
52728+ goto out_rcu_unlock;
52729+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
52730+ goto out_rcu_unlock;
52731+ rcu_read_unlock();
52732+
52733+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
52734+
52735+ return;
52736+out_rcu_unlock:
52737+ rcu_read_unlock();
52738+ return;
52739+}
52740diff -urNp linux-2.6.32.43/grsecurity/gracl_segv.c linux-2.6.32.43/grsecurity/gracl_segv.c
52741--- linux-2.6.32.43/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
52742+++ linux-2.6.32.43/grsecurity/gracl_segv.c 2011-04-17 15:56:46.000000000 -0400
52743@@ -0,0 +1,284 @@
52744+#include <linux/kernel.h>
52745+#include <linux/mm.h>
52746+#include <asm/uaccess.h>
52747+#include <asm/errno.h>
52748+#include <asm/mman.h>
52749+#include <net/sock.h>
52750+#include <linux/file.h>
52751+#include <linux/fs.h>
52752+#include <linux/net.h>
52753+#include <linux/in.h>
52754+#include <linux/smp_lock.h>
52755+#include <linux/slab.h>
52756+#include <linux/types.h>
52757+#include <linux/sched.h>
52758+#include <linux/timer.h>
52759+#include <linux/gracl.h>
52760+#include <linux/grsecurity.h>
52761+#include <linux/grinternal.h>
52762+
52763+static struct crash_uid *uid_set;
52764+static unsigned short uid_used;
52765+static DEFINE_SPINLOCK(gr_uid_lock);
52766+extern rwlock_t gr_inode_lock;
52767+extern struct acl_subject_label *
52768+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
52769+ struct acl_role_label *role);
52770+extern int gr_fake_force_sig(int sig, struct task_struct *t);
52771+
52772+int
52773+gr_init_uidset(void)
52774+{
52775+ uid_set =
52776+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
52777+ uid_used = 0;
52778+
52779+ return uid_set ? 1 : 0;
52780+}
52781+
52782+void
52783+gr_free_uidset(void)
52784+{
52785+ if (uid_set)
52786+ kfree(uid_set);
52787+
52788+ return;
52789+}
52790+
52791+int
52792+gr_find_uid(const uid_t uid)
52793+{
52794+ struct crash_uid *tmp = uid_set;
52795+ uid_t buid;
52796+ int low = 0, high = uid_used - 1, mid;
52797+
52798+ while (high >= low) {
52799+ mid = (low + high) >> 1;
52800+ buid = tmp[mid].uid;
52801+ if (buid == uid)
52802+ return mid;
52803+ if (buid > uid)
52804+ high = mid - 1;
52805+ if (buid < uid)
52806+ low = mid + 1;
52807+ }
52808+
52809+ return -1;
52810+}
52811+
52812+static __inline__ void
52813+gr_insertsort(void)
52814+{
52815+ unsigned short i, j;
52816+ struct crash_uid index;
52817+
52818+ for (i = 1; i < uid_used; i++) {
52819+ index = uid_set[i];
52820+ j = i;
52821+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
52822+ uid_set[j] = uid_set[j - 1];
52823+ j--;
52824+ }
52825+ uid_set[j] = index;
52826+ }
52827+
52828+ return;
52829+}
52830+
52831+static __inline__ void
52832+gr_insert_uid(const uid_t uid, const unsigned long expires)
52833+{
52834+ int loc;
52835+
52836+ if (uid_used == GR_UIDTABLE_MAX)
52837+ return;
52838+
52839+ loc = gr_find_uid(uid);
52840+
52841+ if (loc >= 0) {
52842+ uid_set[loc].expires = expires;
52843+ return;
52844+ }
52845+
52846+ uid_set[uid_used].uid = uid;
52847+ uid_set[uid_used].expires = expires;
52848+ uid_used++;
52849+
52850+ gr_insertsort();
52851+
52852+ return;
52853+}
52854+
52855+void
52856+gr_remove_uid(const unsigned short loc)
52857+{
52858+ unsigned short i;
52859+
52860+ for (i = loc + 1; i < uid_used; i++)
52861+ uid_set[i - 1] = uid_set[i];
52862+
52863+ uid_used--;
52864+
52865+ return;
52866+}
52867+
52868+int
52869+gr_check_crash_uid(const uid_t uid)
52870+{
52871+ int loc;
52872+ int ret = 0;
52873+
52874+ if (unlikely(!gr_acl_is_enabled()))
52875+ return 0;
52876+
52877+ spin_lock(&gr_uid_lock);
52878+ loc = gr_find_uid(uid);
52879+
52880+ if (loc < 0)
52881+ goto out_unlock;
52882+
52883+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
52884+ gr_remove_uid(loc);
52885+ else
52886+ ret = 1;
52887+
52888+out_unlock:
52889+ spin_unlock(&gr_uid_lock);
52890+ return ret;
52891+}
52892+
52893+static __inline__ int
52894+proc_is_setxid(const struct cred *cred)
52895+{
52896+ if (cred->uid != cred->euid || cred->uid != cred->suid ||
52897+ cred->uid != cred->fsuid)
52898+ return 1;
52899+ if (cred->gid != cred->egid || cred->gid != cred->sgid ||
52900+ cred->gid != cred->fsgid)
52901+ return 1;
52902+
52903+ return 0;
52904+}
52905+
52906+void
52907+gr_handle_crash(struct task_struct *task, const int sig)
52908+{
52909+ struct acl_subject_label *curr;
52910+ struct acl_subject_label *curr2;
52911+ struct task_struct *tsk, *tsk2;
52912+ const struct cred *cred;
52913+ const struct cred *cred2;
52914+
52915+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
52916+ return;
52917+
52918+ if (unlikely(!gr_acl_is_enabled()))
52919+ return;
52920+
52921+ curr = task->acl;
52922+
52923+ if (!(curr->resmask & (1 << GR_CRASH_RES)))
52924+ return;
52925+
52926+ if (time_before_eq(curr->expires, get_seconds())) {
52927+ curr->expires = 0;
52928+ curr->crashes = 0;
52929+ }
52930+
52931+ curr->crashes++;
52932+
52933+ if (!curr->expires)
52934+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
52935+
52936+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52937+ time_after(curr->expires, get_seconds())) {
52938+ rcu_read_lock();
52939+ cred = __task_cred(task);
52940+ if (cred->uid && proc_is_setxid(cred)) {
52941+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52942+ spin_lock(&gr_uid_lock);
52943+ gr_insert_uid(cred->uid, curr->expires);
52944+ spin_unlock(&gr_uid_lock);
52945+ curr->expires = 0;
52946+ curr->crashes = 0;
52947+ read_lock(&tasklist_lock);
52948+ do_each_thread(tsk2, tsk) {
52949+ cred2 = __task_cred(tsk);
52950+ if (tsk != task && cred2->uid == cred->uid)
52951+ gr_fake_force_sig(SIGKILL, tsk);
52952+ } while_each_thread(tsk2, tsk);
52953+ read_unlock(&tasklist_lock);
52954+ } else {
52955+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
52956+ read_lock(&tasklist_lock);
52957+ do_each_thread(tsk2, tsk) {
52958+ if (likely(tsk != task)) {
52959+ curr2 = tsk->acl;
52960+
52961+ if (curr2->device == curr->device &&
52962+ curr2->inode == curr->inode)
52963+ gr_fake_force_sig(SIGKILL, tsk);
52964+ }
52965+ } while_each_thread(tsk2, tsk);
52966+ read_unlock(&tasklist_lock);
52967+ }
52968+ rcu_read_unlock();
52969+ }
52970+
52971+ return;
52972+}
52973+
52974+int
52975+gr_check_crash_exec(const struct file *filp)
52976+{
52977+ struct acl_subject_label *curr;
52978+
52979+ if (unlikely(!gr_acl_is_enabled()))
52980+ return 0;
52981+
52982+ read_lock(&gr_inode_lock);
52983+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
52984+ filp->f_path.dentry->d_inode->i_sb->s_dev,
52985+ current->role);
52986+ read_unlock(&gr_inode_lock);
52987+
52988+ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
52989+ (!curr->crashes && !curr->expires))
52990+ return 0;
52991+
52992+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
52993+ time_after(curr->expires, get_seconds()))
52994+ return 1;
52995+ else if (time_before_eq(curr->expires, get_seconds())) {
52996+ curr->crashes = 0;
52997+ curr->expires = 0;
52998+ }
52999+
53000+ return 0;
53001+}
53002+
53003+void
53004+gr_handle_alertkill(struct task_struct *task)
53005+{
53006+ struct acl_subject_label *curracl;
53007+ __u32 curr_ip;
53008+ struct task_struct *p, *p2;
53009+
53010+ if (unlikely(!gr_acl_is_enabled()))
53011+ return;
53012+
53013+ curracl = task->acl;
53014+ curr_ip = task->signal->curr_ip;
53015+
53016+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
53017+ read_lock(&tasklist_lock);
53018+ do_each_thread(p2, p) {
53019+ if (p->signal->curr_ip == curr_ip)
53020+ gr_fake_force_sig(SIGKILL, p);
53021+ } while_each_thread(p2, p);
53022+ read_unlock(&tasklist_lock);
53023+ } else if (curracl->mode & GR_KILLPROC)
53024+ gr_fake_force_sig(SIGKILL, task);
53025+
53026+ return;
53027+}
53028diff -urNp linux-2.6.32.43/grsecurity/gracl_shm.c linux-2.6.32.43/grsecurity/gracl_shm.c
53029--- linux-2.6.32.43/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
53030+++ linux-2.6.32.43/grsecurity/gracl_shm.c 2011-04-17 15:56:46.000000000 -0400
53031@@ -0,0 +1,40 @@
53032+#include <linux/kernel.h>
53033+#include <linux/mm.h>
53034+#include <linux/sched.h>
53035+#include <linux/file.h>
53036+#include <linux/ipc.h>
53037+#include <linux/gracl.h>
53038+#include <linux/grsecurity.h>
53039+#include <linux/grinternal.h>
53040+
53041+int
53042+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53043+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53044+{
53045+ struct task_struct *task;
53046+
53047+ if (!gr_acl_is_enabled())
53048+ return 1;
53049+
53050+ rcu_read_lock();
53051+ read_lock(&tasklist_lock);
53052+
53053+ task = find_task_by_vpid(shm_cprid);
53054+
53055+ if (unlikely(!task))
53056+ task = find_task_by_vpid(shm_lapid);
53057+
53058+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
53059+ (task->pid == shm_lapid)) &&
53060+ (task->acl->mode & GR_PROTSHM) &&
53061+ (task->acl != current->acl))) {
53062+ read_unlock(&tasklist_lock);
53063+ rcu_read_unlock();
53064+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid);
53065+ return 0;
53066+ }
53067+ read_unlock(&tasklist_lock);
53068+ rcu_read_unlock();
53069+
53070+ return 1;
53071+}
53072diff -urNp linux-2.6.32.43/grsecurity/grsec_chdir.c linux-2.6.32.43/grsecurity/grsec_chdir.c
53073--- linux-2.6.32.43/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
53074+++ linux-2.6.32.43/grsecurity/grsec_chdir.c 2011-04-17 15:56:46.000000000 -0400
53075@@ -0,0 +1,19 @@
53076+#include <linux/kernel.h>
53077+#include <linux/sched.h>
53078+#include <linux/fs.h>
53079+#include <linux/file.h>
53080+#include <linux/grsecurity.h>
53081+#include <linux/grinternal.h>
53082+
53083+void
53084+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
53085+{
53086+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
53087+ if ((grsec_enable_chdir && grsec_enable_group &&
53088+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
53089+ !grsec_enable_group)) {
53090+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
53091+ }
53092+#endif
53093+ return;
53094+}
53095diff -urNp linux-2.6.32.43/grsecurity/grsec_chroot.c linux-2.6.32.43/grsecurity/grsec_chroot.c
53096--- linux-2.6.32.43/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
53097+++ linux-2.6.32.43/grsecurity/grsec_chroot.c 2011-07-18 17:14:10.000000000 -0400
53098@@ -0,0 +1,384 @@
53099+#include <linux/kernel.h>
53100+#include <linux/module.h>
53101+#include <linux/sched.h>
53102+#include <linux/file.h>
53103+#include <linux/fs.h>
53104+#include <linux/mount.h>
53105+#include <linux/types.h>
53106+#include <linux/pid_namespace.h>
53107+#include <linux/grsecurity.h>
53108+#include <linux/grinternal.h>
53109+
53110+void gr_set_chroot_entries(struct task_struct *task, struct path *path)
53111+{
53112+#ifdef CONFIG_GRKERNSEC
53113+ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry &&
53114+ path->dentry != task->nsproxy->mnt_ns->root->mnt_root)
53115+ task->gr_is_chrooted = 1;
53116+ else
53117+ task->gr_is_chrooted = 0;
53118+
53119+ task->gr_chroot_dentry = path->dentry;
53120+#endif
53121+ return;
53122+}
53123+
53124+void gr_clear_chroot_entries(struct task_struct *task)
53125+{
53126+#ifdef CONFIG_GRKERNSEC
53127+ task->gr_is_chrooted = 0;
53128+ task->gr_chroot_dentry = NULL;
53129+#endif
53130+ return;
53131+}
53132+
53133+int
53134+gr_handle_chroot_unix(const pid_t pid)
53135+{
53136+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
53137+ struct task_struct *p;
53138+
53139+ if (unlikely(!grsec_enable_chroot_unix))
53140+ return 1;
53141+
53142+ if (likely(!proc_is_chrooted(current)))
53143+ return 1;
53144+
53145+ rcu_read_lock();
53146+ read_lock(&tasklist_lock);
53147+
53148+ p = find_task_by_vpid_unrestricted(pid);
53149+ if (unlikely(p && !have_same_root(current, p))) {
53150+ read_unlock(&tasklist_lock);
53151+ rcu_read_unlock();
53152+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
53153+ return 0;
53154+ }
53155+ read_unlock(&tasklist_lock);
53156+ rcu_read_unlock();
53157+#endif
53158+ return 1;
53159+}
53160+
53161+int
53162+gr_handle_chroot_nice(void)
53163+{
53164+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53165+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
53166+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
53167+ return -EPERM;
53168+ }
53169+#endif
53170+ return 0;
53171+}
53172+
53173+int
53174+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
53175+{
53176+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
53177+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
53178+ && proc_is_chrooted(current)) {
53179+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid);
53180+ return -EACCES;
53181+ }
53182+#endif
53183+ return 0;
53184+}
53185+
53186+int
53187+gr_handle_chroot_rawio(const struct inode *inode)
53188+{
53189+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53190+ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
53191+ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
53192+ return 1;
53193+#endif
53194+ return 0;
53195+}
53196+
53197+int
53198+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
53199+{
53200+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53201+ struct task_struct *p;
53202+ int ret = 0;
53203+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
53204+ return ret;
53205+
53206+ read_lock(&tasklist_lock);
53207+ do_each_pid_task(pid, type, p) {
53208+ if (!have_same_root(current, p)) {
53209+ ret = 1;
53210+ goto out;
53211+ }
53212+ } while_each_pid_task(pid, type, p);
53213+out:
53214+ read_unlock(&tasklist_lock);
53215+ return ret;
53216+#endif
53217+ return 0;
53218+}
53219+
53220+int
53221+gr_pid_is_chrooted(struct task_struct *p)
53222+{
53223+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
53224+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
53225+ return 0;
53226+
53227+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
53228+ !have_same_root(current, p)) {
53229+ return 1;
53230+ }
53231+#endif
53232+ return 0;
53233+}
53234+
53235+EXPORT_SYMBOL(gr_pid_is_chrooted);
53236+
53237+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
53238+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
53239+{
53240+ struct dentry *dentry = (struct dentry *)u_dentry;
53241+ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
53242+ struct dentry *realroot;
53243+ struct vfsmount *realrootmnt;
53244+ struct dentry *currentroot;
53245+ struct vfsmount *currentmnt;
53246+ struct task_struct *reaper = &init_task;
53247+ int ret = 1;
53248+
53249+ read_lock(&reaper->fs->lock);
53250+ realrootmnt = mntget(reaper->fs->root.mnt);
53251+ realroot = dget(reaper->fs->root.dentry);
53252+ read_unlock(&reaper->fs->lock);
53253+
53254+ read_lock(&current->fs->lock);
53255+ currentmnt = mntget(current->fs->root.mnt);
53256+ currentroot = dget(current->fs->root.dentry);
53257+ read_unlock(&current->fs->lock);
53258+
53259+ spin_lock(&dcache_lock);
53260+ for (;;) {
53261+ if (unlikely((dentry == realroot && mnt == realrootmnt)
53262+ || (dentry == currentroot && mnt == currentmnt)))
53263+ break;
53264+ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
53265+ if (mnt->mnt_parent == mnt)
53266+ break;
53267+ dentry = mnt->mnt_mountpoint;
53268+ mnt = mnt->mnt_parent;
53269+ continue;
53270+ }
53271+ dentry = dentry->d_parent;
53272+ }
53273+ spin_unlock(&dcache_lock);
53274+
53275+ dput(currentroot);
53276+ mntput(currentmnt);
53277+
53278+ /* access is outside of chroot */
53279+ if (dentry == realroot && mnt == realrootmnt)
53280+ ret = 0;
53281+
53282+ dput(realroot);
53283+ mntput(realrootmnt);
53284+ return ret;
53285+}
53286+#endif
53287+
53288+int
53289+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
53290+{
53291+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
53292+ if (!grsec_enable_chroot_fchdir)
53293+ return 1;
53294+
53295+ if (!proc_is_chrooted(current))
53296+ return 1;
53297+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
53298+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
53299+ return 0;
53300+ }
53301+#endif
53302+ return 1;
53303+}
53304+
53305+int
53306+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53307+ const time_t shm_createtime)
53308+{
53309+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
53310+ struct task_struct *p;
53311+ time_t starttime;
53312+
53313+ if (unlikely(!grsec_enable_chroot_shmat))
53314+ return 1;
53315+
53316+ if (likely(!proc_is_chrooted(current)))
53317+ return 1;
53318+
53319+ rcu_read_lock();
53320+ read_lock(&tasklist_lock);
53321+
53322+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
53323+ starttime = p->start_time.tv_sec;
53324+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
53325+ if (have_same_root(current, p)) {
53326+ goto allow;
53327+ } else {
53328+ read_unlock(&tasklist_lock);
53329+ rcu_read_unlock();
53330+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53331+ return 0;
53332+ }
53333+ }
53334+ /* creator exited, pid reuse, fall through to next check */
53335+ }
53336+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
53337+ if (unlikely(!have_same_root(current, p))) {
53338+ read_unlock(&tasklist_lock);
53339+ rcu_read_unlock();
53340+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
53341+ return 0;
53342+ }
53343+ }
53344+
53345+allow:
53346+ read_unlock(&tasklist_lock);
53347+ rcu_read_unlock();
53348+#endif
53349+ return 1;
53350+}
53351+
53352+void
53353+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
53354+{
53355+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
53356+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
53357+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
53358+#endif
53359+ return;
53360+}
53361+
53362+int
53363+gr_handle_chroot_mknod(const struct dentry *dentry,
53364+ const struct vfsmount *mnt, const int mode)
53365+{
53366+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
53367+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
53368+ proc_is_chrooted(current)) {
53369+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
53370+ return -EPERM;
53371+ }
53372+#endif
53373+ return 0;
53374+}
53375+
53376+int
53377+gr_handle_chroot_mount(const struct dentry *dentry,
53378+ const struct vfsmount *mnt, const char *dev_name)
53379+{
53380+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
53381+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
53382+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none" , dentry, mnt);
53383+ return -EPERM;
53384+ }
53385+#endif
53386+ return 0;
53387+}
53388+
53389+int
53390+gr_handle_chroot_pivot(void)
53391+{
53392+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
53393+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
53394+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
53395+ return -EPERM;
53396+ }
53397+#endif
53398+ return 0;
53399+}
53400+
53401+int
53402+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
53403+{
53404+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
53405+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
53406+ !gr_is_outside_chroot(dentry, mnt)) {
53407+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
53408+ return -EPERM;
53409+ }
53410+#endif
53411+ return 0;
53412+}
53413+
53414+int
53415+gr_handle_chroot_caps(struct path *path)
53416+{
53417+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
53418+ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL &&
53419+ (init_task.fs->root.dentry != path->dentry) &&
53420+ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) {
53421+
53422+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
53423+ const struct cred *old = current_cred();
53424+ struct cred *new = prepare_creds();
53425+ if (new == NULL)
53426+ return 1;
53427+
53428+ new->cap_permitted = cap_drop(old->cap_permitted,
53429+ chroot_caps);
53430+ new->cap_inheritable = cap_drop(old->cap_inheritable,
53431+ chroot_caps);
53432+ new->cap_effective = cap_drop(old->cap_effective,
53433+ chroot_caps);
53434+
53435+ commit_creds(new);
53436+
53437+ return 0;
53438+ }
53439+#endif
53440+ return 0;
53441+}
53442+
53443+int
53444+gr_handle_chroot_sysctl(const int op)
53445+{
53446+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
53447+ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
53448+ && (op & MAY_WRITE))
53449+ return -EACCES;
53450+#endif
53451+ return 0;
53452+}
53453+
53454+void
53455+gr_handle_chroot_chdir(struct path *path)
53456+{
53457+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
53458+ if (grsec_enable_chroot_chdir)
53459+ set_fs_pwd(current->fs, path);
53460+#endif
53461+ return;
53462+}
53463+
53464+int
53465+gr_handle_chroot_chmod(const struct dentry *dentry,
53466+ const struct vfsmount *mnt, const int mode)
53467+{
53468+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
53469+ /* allow chmod +s on directories, but not on files */
53470+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
53471+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
53472+ proc_is_chrooted(current)) {
53473+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
53474+ return -EPERM;
53475+ }
53476+#endif
53477+ return 0;
53478+}
53479+
53480+#ifdef CONFIG_SECURITY
53481+EXPORT_SYMBOL(gr_handle_chroot_caps);
53482+#endif
53483diff -urNp linux-2.6.32.43/grsecurity/grsec_disabled.c linux-2.6.32.43/grsecurity/grsec_disabled.c
53484--- linux-2.6.32.43/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
53485+++ linux-2.6.32.43/grsecurity/grsec_disabled.c 2011-04-17 15:56:46.000000000 -0400
53486@@ -0,0 +1,447 @@
53487+#include <linux/kernel.h>
53488+#include <linux/module.h>
53489+#include <linux/sched.h>
53490+#include <linux/file.h>
53491+#include <linux/fs.h>
53492+#include <linux/kdev_t.h>
53493+#include <linux/net.h>
53494+#include <linux/in.h>
53495+#include <linux/ip.h>
53496+#include <linux/skbuff.h>
53497+#include <linux/sysctl.h>
53498+
53499+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
53500+void
53501+pax_set_initial_flags(struct linux_binprm *bprm)
53502+{
53503+ return;
53504+}
53505+#endif
53506+
53507+#ifdef CONFIG_SYSCTL
53508+__u32
53509+gr_handle_sysctl(const struct ctl_table * table, const int op)
53510+{
53511+ return 0;
53512+}
53513+#endif
53514+
53515+#ifdef CONFIG_TASKSTATS
53516+int gr_is_taskstats_denied(int pid)
53517+{
53518+ return 0;
53519+}
53520+#endif
53521+
53522+int
53523+gr_acl_is_enabled(void)
53524+{
53525+ return 0;
53526+}
53527+
53528+int
53529+gr_handle_rawio(const struct inode *inode)
53530+{
53531+ return 0;
53532+}
53533+
53534+void
53535+gr_acl_handle_psacct(struct task_struct *task, const long code)
53536+{
53537+ return;
53538+}
53539+
53540+int
53541+gr_handle_ptrace(struct task_struct *task, const long request)
53542+{
53543+ return 0;
53544+}
53545+
53546+int
53547+gr_handle_proc_ptrace(struct task_struct *task)
53548+{
53549+ return 0;
53550+}
53551+
53552+void
53553+gr_learn_resource(const struct task_struct *task,
53554+ const int res, const unsigned long wanted, const int gt)
53555+{
53556+ return;
53557+}
53558+
53559+int
53560+gr_set_acls(const int type)
53561+{
53562+ return 0;
53563+}
53564+
53565+int
53566+gr_check_hidden_task(const struct task_struct *tsk)
53567+{
53568+ return 0;
53569+}
53570+
53571+int
53572+gr_check_protected_task(const struct task_struct *task)
53573+{
53574+ return 0;
53575+}
53576+
53577+int
53578+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
53579+{
53580+ return 0;
53581+}
53582+
53583+void
53584+gr_copy_label(struct task_struct *tsk)
53585+{
53586+ return;
53587+}
53588+
53589+void
53590+gr_set_pax_flags(struct task_struct *task)
53591+{
53592+ return;
53593+}
53594+
53595+int
53596+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
53597+ const int unsafe_share)
53598+{
53599+ return 0;
53600+}
53601+
53602+void
53603+gr_handle_delete(const ino_t ino, const dev_t dev)
53604+{
53605+ return;
53606+}
53607+
53608+void
53609+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
53610+{
53611+ return;
53612+}
53613+
53614+void
53615+gr_handle_crash(struct task_struct *task, const int sig)
53616+{
53617+ return;
53618+}
53619+
53620+int
53621+gr_check_crash_exec(const struct file *filp)
53622+{
53623+ return 0;
53624+}
53625+
53626+int
53627+gr_check_crash_uid(const uid_t uid)
53628+{
53629+ return 0;
53630+}
53631+
53632+void
53633+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
53634+ struct dentry *old_dentry,
53635+ struct dentry *new_dentry,
53636+ struct vfsmount *mnt, const __u8 replace)
53637+{
53638+ return;
53639+}
53640+
53641+int
53642+gr_search_socket(const int family, const int type, const int protocol)
53643+{
53644+ return 1;
53645+}
53646+
53647+int
53648+gr_search_connectbind(const int mode, const struct socket *sock,
53649+ const struct sockaddr_in *addr)
53650+{
53651+ return 0;
53652+}
53653+
53654+int
53655+gr_is_capable(const int cap)
53656+{
53657+ return 1;
53658+}
53659+
53660+int
53661+gr_is_capable_nolog(const int cap)
53662+{
53663+ return 1;
53664+}
53665+
53666+void
53667+gr_handle_alertkill(struct task_struct *task)
53668+{
53669+ return;
53670+}
53671+
53672+__u32
53673+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
53674+{
53675+ return 1;
53676+}
53677+
53678+__u32
53679+gr_acl_handle_hidden_file(const struct dentry * dentry,
53680+ const struct vfsmount * mnt)
53681+{
53682+ return 1;
53683+}
53684+
53685+__u32
53686+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
53687+ const int fmode)
53688+{
53689+ return 1;
53690+}
53691+
53692+__u32
53693+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
53694+{
53695+ return 1;
53696+}
53697+
53698+__u32
53699+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
53700+{
53701+ return 1;
53702+}
53703+
53704+int
53705+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
53706+ unsigned int *vm_flags)
53707+{
53708+ return 1;
53709+}
53710+
53711+__u32
53712+gr_acl_handle_truncate(const struct dentry * dentry,
53713+ const struct vfsmount * mnt)
53714+{
53715+ return 1;
53716+}
53717+
53718+__u32
53719+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
53720+{
53721+ return 1;
53722+}
53723+
53724+__u32
53725+gr_acl_handle_access(const struct dentry * dentry,
53726+ const struct vfsmount * mnt, const int fmode)
53727+{
53728+ return 1;
53729+}
53730+
53731+__u32
53732+gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
53733+ mode_t mode)
53734+{
53735+ return 1;
53736+}
53737+
53738+__u32
53739+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
53740+ mode_t mode)
53741+{
53742+ return 1;
53743+}
53744+
53745+__u32
53746+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
53747+{
53748+ return 1;
53749+}
53750+
53751+__u32
53752+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
53753+{
53754+ return 1;
53755+}
53756+
53757+void
53758+grsecurity_init(void)
53759+{
53760+ return;
53761+}
53762+
53763+__u32
53764+gr_acl_handle_mknod(const struct dentry * new_dentry,
53765+ const struct dentry * parent_dentry,
53766+ const struct vfsmount * parent_mnt,
53767+ const int mode)
53768+{
53769+ return 1;
53770+}
53771+
53772+__u32
53773+gr_acl_handle_mkdir(const struct dentry * new_dentry,
53774+ const struct dentry * parent_dentry,
53775+ const struct vfsmount * parent_mnt)
53776+{
53777+ return 1;
53778+}
53779+
53780+__u32
53781+gr_acl_handle_symlink(const struct dentry * new_dentry,
53782+ const struct dentry * parent_dentry,
53783+ const struct vfsmount * parent_mnt, const char *from)
53784+{
53785+ return 1;
53786+}
53787+
53788+__u32
53789+gr_acl_handle_link(const struct dentry * new_dentry,
53790+ const struct dentry * parent_dentry,
53791+ const struct vfsmount * parent_mnt,
53792+ const struct dentry * old_dentry,
53793+ const struct vfsmount * old_mnt, const char *to)
53794+{
53795+ return 1;
53796+}
53797+
53798+int
53799+gr_acl_handle_rename(const struct dentry *new_dentry,
53800+ const struct dentry *parent_dentry,
53801+ const struct vfsmount *parent_mnt,
53802+ const struct dentry *old_dentry,
53803+ const struct inode *old_parent_inode,
53804+ const struct vfsmount *old_mnt, const char *newname)
53805+{
53806+ return 0;
53807+}
53808+
53809+int
53810+gr_acl_handle_filldir(const struct file *file, const char *name,
53811+ const int namelen, const ino_t ino)
53812+{
53813+ return 1;
53814+}
53815+
53816+int
53817+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
53818+ const time_t shm_createtime, const uid_t cuid, const int shmid)
53819+{
53820+ return 1;
53821+}
53822+
53823+int
53824+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
53825+{
53826+ return 0;
53827+}
53828+
53829+int
53830+gr_search_accept(const struct socket *sock)
53831+{
53832+ return 0;
53833+}
53834+
53835+int
53836+gr_search_listen(const struct socket *sock)
53837+{
53838+ return 0;
53839+}
53840+
53841+int
53842+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
53843+{
53844+ return 0;
53845+}
53846+
53847+__u32
53848+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
53849+{
53850+ return 1;
53851+}
53852+
53853+__u32
53854+gr_acl_handle_creat(const struct dentry * dentry,
53855+ const struct dentry * p_dentry,
53856+ const struct vfsmount * p_mnt, const int fmode,
53857+ const int imode)
53858+{
53859+ return 1;
53860+}
53861+
53862+void
53863+gr_acl_handle_exit(void)
53864+{
53865+ return;
53866+}
53867+
53868+int
53869+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
53870+{
53871+ return 1;
53872+}
53873+
53874+void
53875+gr_set_role_label(const uid_t uid, const gid_t gid)
53876+{
53877+ return;
53878+}
53879+
53880+int
53881+gr_acl_handle_procpidmem(const struct task_struct *task)
53882+{
53883+ return 0;
53884+}
53885+
53886+int
53887+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
53888+{
53889+ return 0;
53890+}
53891+
53892+int
53893+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
53894+{
53895+ return 0;
53896+}
53897+
53898+void
53899+gr_set_kernel_label(struct task_struct *task)
53900+{
53901+ return;
53902+}
53903+
53904+int
53905+gr_check_user_change(int real, int effective, int fs)
53906+{
53907+ return 0;
53908+}
53909+
53910+int
53911+gr_check_group_change(int real, int effective, int fs)
53912+{
53913+ return 0;
53914+}
53915+
53916+int gr_acl_enable_at_secure(void)
53917+{
53918+ return 0;
53919+}
53920+
53921+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
53922+{
53923+ return dentry->d_inode->i_sb->s_dev;
53924+}
53925+
53926+EXPORT_SYMBOL(gr_is_capable);
53927+EXPORT_SYMBOL(gr_is_capable_nolog);
53928+EXPORT_SYMBOL(gr_learn_resource);
53929+EXPORT_SYMBOL(gr_set_kernel_label);
53930+#ifdef CONFIG_SECURITY
53931+EXPORT_SYMBOL(gr_check_user_change);
53932+EXPORT_SYMBOL(gr_check_group_change);
53933+#endif
53934diff -urNp linux-2.6.32.43/grsecurity/grsec_exec.c linux-2.6.32.43/grsecurity/grsec_exec.c
53935--- linux-2.6.32.43/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
53936+++ linux-2.6.32.43/grsecurity/grsec_exec.c 2011-04-17 15:56:46.000000000 -0400
53937@@ -0,0 +1,148 @@
53938+#include <linux/kernel.h>
53939+#include <linux/sched.h>
53940+#include <linux/file.h>
53941+#include <linux/binfmts.h>
53942+#include <linux/smp_lock.h>
53943+#include <linux/fs.h>
53944+#include <linux/types.h>
53945+#include <linux/grdefs.h>
53946+#include <linux/grinternal.h>
53947+#include <linux/capability.h>
53948+#include <linux/compat.h>
53949+
53950+#include <asm/uaccess.h>
53951+
53952+#ifdef CONFIG_GRKERNSEC_EXECLOG
53953+static char gr_exec_arg_buf[132];
53954+static DEFINE_MUTEX(gr_exec_arg_mutex);
53955+#endif
53956+
53957+int
53958+gr_handle_nproc(void)
53959+{
53960+#ifdef CONFIG_GRKERNSEC_EXECVE
53961+ const struct cred *cred = current_cred();
53962+ if (grsec_enable_execve && cred->user &&
53963+ (atomic_read(&cred->user->processes) >
53964+ current->signal->rlim[RLIMIT_NPROC].rlim_cur) &&
53965+ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
53966+ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG);
53967+ return -EAGAIN;
53968+ }
53969+#endif
53970+ return 0;
53971+}
53972+
53973+void
53974+gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv)
53975+{
53976+#ifdef CONFIG_GRKERNSEC_EXECLOG
53977+ char *grarg = gr_exec_arg_buf;
53978+ unsigned int i, x, execlen = 0;
53979+ char c;
53980+
53981+ if (!((grsec_enable_execlog && grsec_enable_group &&
53982+ in_group_p(grsec_audit_gid))
53983+ || (grsec_enable_execlog && !grsec_enable_group)))
53984+ return;
53985+
53986+ mutex_lock(&gr_exec_arg_mutex);
53987+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
53988+
53989+ if (unlikely(argv == NULL))
53990+ goto log;
53991+
53992+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
53993+ const char __user *p;
53994+ unsigned int len;
53995+
53996+ if (copy_from_user(&p, argv + i, sizeof(p)))
53997+ goto log;
53998+ if (!p)
53999+ goto log;
54000+ len = strnlen_user(p, 128 - execlen);
54001+ if (len > 128 - execlen)
54002+ len = 128 - execlen;
54003+ else if (len > 0)
54004+ len--;
54005+ if (copy_from_user(grarg + execlen, p, len))
54006+ goto log;
54007+
54008+ /* rewrite unprintable characters */
54009+ for (x = 0; x < len; x++) {
54010+ c = *(grarg + execlen + x);
54011+ if (c < 32 || c > 126)
54012+ *(grarg + execlen + x) = ' ';
54013+ }
54014+
54015+ execlen += len;
54016+ *(grarg + execlen) = ' ';
54017+ *(grarg + execlen + 1) = '\0';
54018+ execlen++;
54019+ }
54020+
54021+ log:
54022+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54023+ bprm->file->f_path.mnt, grarg);
54024+ mutex_unlock(&gr_exec_arg_mutex);
54025+#endif
54026+ return;
54027+}
54028+
54029+#ifdef CONFIG_COMPAT
54030+void
54031+gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv)
54032+{
54033+#ifdef CONFIG_GRKERNSEC_EXECLOG
54034+ char *grarg = gr_exec_arg_buf;
54035+ unsigned int i, x, execlen = 0;
54036+ char c;
54037+
54038+ if (!((grsec_enable_execlog && grsec_enable_group &&
54039+ in_group_p(grsec_audit_gid))
54040+ || (grsec_enable_execlog && !grsec_enable_group)))
54041+ return;
54042+
54043+ mutex_lock(&gr_exec_arg_mutex);
54044+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
54045+
54046+ if (unlikely(argv == NULL))
54047+ goto log;
54048+
54049+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
54050+ compat_uptr_t p;
54051+ unsigned int len;
54052+
54053+ if (get_user(p, argv + i))
54054+ goto log;
54055+ len = strnlen_user(compat_ptr(p), 128 - execlen);
54056+ if (len > 128 - execlen)
54057+ len = 128 - execlen;
54058+ else if (len > 0)
54059+ len--;
54060+ else
54061+ goto log;
54062+ if (copy_from_user(grarg + execlen, compat_ptr(p), len))
54063+ goto log;
54064+
54065+ /* rewrite unprintable characters */
54066+ for (x = 0; x < len; x++) {
54067+ c = *(grarg + execlen + x);
54068+ if (c < 32 || c > 126)
54069+ *(grarg + execlen + x) = ' ';
54070+ }
54071+
54072+ execlen += len;
54073+ *(grarg + execlen) = ' ';
54074+ *(grarg + execlen + 1) = '\0';
54075+ execlen++;
54076+ }
54077+
54078+ log:
54079+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
54080+ bprm->file->f_path.mnt, grarg);
54081+ mutex_unlock(&gr_exec_arg_mutex);
54082+#endif
54083+ return;
54084+}
54085+#endif
54086diff -urNp linux-2.6.32.43/grsecurity/grsec_fifo.c linux-2.6.32.43/grsecurity/grsec_fifo.c
54087--- linux-2.6.32.43/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
54088+++ linux-2.6.32.43/grsecurity/grsec_fifo.c 2011-04-17 15:56:46.000000000 -0400
54089@@ -0,0 +1,24 @@
54090+#include <linux/kernel.h>
54091+#include <linux/sched.h>
54092+#include <linux/fs.h>
54093+#include <linux/file.h>
54094+#include <linux/grinternal.h>
54095+
54096+int
54097+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
54098+ const struct dentry *dir, const int flag, const int acc_mode)
54099+{
54100+#ifdef CONFIG_GRKERNSEC_FIFO
54101+ const struct cred *cred = current_cred();
54102+
54103+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
54104+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
54105+ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
54106+ (cred->fsuid != dentry->d_inode->i_uid)) {
54107+ if (!inode_permission(dentry->d_inode, acc_mode))
54108+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid);
54109+ return -EACCES;
54110+ }
54111+#endif
54112+ return 0;
54113+}
54114diff -urNp linux-2.6.32.43/grsecurity/grsec_fork.c linux-2.6.32.43/grsecurity/grsec_fork.c
54115--- linux-2.6.32.43/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
54116+++ linux-2.6.32.43/grsecurity/grsec_fork.c 2011-04-17 15:56:46.000000000 -0400
54117@@ -0,0 +1,23 @@
54118+#include <linux/kernel.h>
54119+#include <linux/sched.h>
54120+#include <linux/grsecurity.h>
54121+#include <linux/grinternal.h>
54122+#include <linux/errno.h>
54123+
54124+void
54125+gr_log_forkfail(const int retval)
54126+{
54127+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54128+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
54129+ switch (retval) {
54130+ case -EAGAIN:
54131+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
54132+ break;
54133+ case -ENOMEM:
54134+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
54135+ break;
54136+ }
54137+ }
54138+#endif
54139+ return;
54140+}
54141diff -urNp linux-2.6.32.43/grsecurity/grsec_init.c linux-2.6.32.43/grsecurity/grsec_init.c
54142--- linux-2.6.32.43/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
54143+++ linux-2.6.32.43/grsecurity/grsec_init.c 2011-06-29 19:35:26.000000000 -0400
54144@@ -0,0 +1,274 @@
54145+#include <linux/kernel.h>
54146+#include <linux/sched.h>
54147+#include <linux/mm.h>
54148+#include <linux/smp_lock.h>
54149+#include <linux/gracl.h>
54150+#include <linux/slab.h>
54151+#include <linux/vmalloc.h>
54152+#include <linux/percpu.h>
54153+#include <linux/module.h>
54154+
54155+int grsec_enable_brute;
54156+int grsec_enable_link;
54157+int grsec_enable_dmesg;
54158+int grsec_enable_harden_ptrace;
54159+int grsec_enable_fifo;
54160+int grsec_enable_execve;
54161+int grsec_enable_execlog;
54162+int grsec_enable_signal;
54163+int grsec_enable_forkfail;
54164+int grsec_enable_audit_ptrace;
54165+int grsec_enable_time;
54166+int grsec_enable_audit_textrel;
54167+int grsec_enable_group;
54168+int grsec_audit_gid;
54169+int grsec_enable_chdir;
54170+int grsec_enable_mount;
54171+int grsec_enable_rofs;
54172+int grsec_enable_chroot_findtask;
54173+int grsec_enable_chroot_mount;
54174+int grsec_enable_chroot_shmat;
54175+int grsec_enable_chroot_fchdir;
54176+int grsec_enable_chroot_double;
54177+int grsec_enable_chroot_pivot;
54178+int grsec_enable_chroot_chdir;
54179+int grsec_enable_chroot_chmod;
54180+int grsec_enable_chroot_mknod;
54181+int grsec_enable_chroot_nice;
54182+int grsec_enable_chroot_execlog;
54183+int grsec_enable_chroot_caps;
54184+int grsec_enable_chroot_sysctl;
54185+int grsec_enable_chroot_unix;
54186+int grsec_enable_tpe;
54187+int grsec_tpe_gid;
54188+int grsec_enable_blackhole;
54189+#ifdef CONFIG_IPV6_MODULE
54190+EXPORT_SYMBOL(grsec_enable_blackhole);
54191+#endif
54192+int grsec_lastack_retries;
54193+int grsec_enable_tpe_all;
54194+int grsec_enable_tpe_invert;
54195+int grsec_enable_socket_all;
54196+int grsec_socket_all_gid;
54197+int grsec_enable_socket_client;
54198+int grsec_socket_client_gid;
54199+int grsec_enable_socket_server;
54200+int grsec_socket_server_gid;
54201+int grsec_resource_logging;
54202+int grsec_disable_privio;
54203+int grsec_enable_log_rwxmaps;
54204+int grsec_lock;
54205+
54206+DEFINE_SPINLOCK(grsec_alert_lock);
54207+unsigned long grsec_alert_wtime = 0;
54208+unsigned long grsec_alert_fyet = 0;
54209+
54210+DEFINE_SPINLOCK(grsec_audit_lock);
54211+
54212+DEFINE_RWLOCK(grsec_exec_file_lock);
54213+
54214+char *gr_shared_page[4];
54215+
54216+char *gr_alert_log_fmt;
54217+char *gr_audit_log_fmt;
54218+char *gr_alert_log_buf;
54219+char *gr_audit_log_buf;
54220+
54221+extern struct gr_arg *gr_usermode;
54222+extern unsigned char *gr_system_salt;
54223+extern unsigned char *gr_system_sum;
54224+
54225+void __init
54226+grsecurity_init(void)
54227+{
54228+ int j;
54229+ /* create the per-cpu shared pages */
54230+
54231+#ifdef CONFIG_X86
54232+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
54233+#endif
54234+
54235+ for (j = 0; j < 4; j++) {
54236+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
54237+ if (gr_shared_page[j] == NULL) {
54238+ panic("Unable to allocate grsecurity shared page");
54239+ return;
54240+ }
54241+ }
54242+
54243+ /* allocate log buffers */
54244+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
54245+ if (!gr_alert_log_fmt) {
54246+ panic("Unable to allocate grsecurity alert log format buffer");
54247+ return;
54248+ }
54249+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
54250+ if (!gr_audit_log_fmt) {
54251+ panic("Unable to allocate grsecurity audit log format buffer");
54252+ return;
54253+ }
54254+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54255+ if (!gr_alert_log_buf) {
54256+ panic("Unable to allocate grsecurity alert log buffer");
54257+ return;
54258+ }
54259+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
54260+ if (!gr_audit_log_buf) {
54261+ panic("Unable to allocate grsecurity audit log buffer");
54262+ return;
54263+ }
54264+
54265+ /* allocate memory for authentication structure */
54266+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
54267+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
54268+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
54269+
54270+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
54271+ panic("Unable to allocate grsecurity authentication structure");
54272+ return;
54273+ }
54274+
54275+
54276+#ifdef CONFIG_GRKERNSEC_IO
54277+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
54278+ grsec_disable_privio = 1;
54279+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54280+ grsec_disable_privio = 1;
54281+#else
54282+ grsec_disable_privio = 0;
54283+#endif
54284+#endif
54285+
54286+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
54287+ /* for backward compatibility, tpe_invert always defaults to on if
54288+ enabled in the kernel
54289+ */
54290+ grsec_enable_tpe_invert = 1;
54291+#endif
54292+
54293+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
54294+#ifndef CONFIG_GRKERNSEC_SYSCTL
54295+ grsec_lock = 1;
54296+#endif
54297+
54298+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54299+ grsec_enable_audit_textrel = 1;
54300+#endif
54301+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54302+ grsec_enable_log_rwxmaps = 1;
54303+#endif
54304+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
54305+ grsec_enable_group = 1;
54306+ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
54307+#endif
54308+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
54309+ grsec_enable_chdir = 1;
54310+#endif
54311+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
54312+ grsec_enable_harden_ptrace = 1;
54313+#endif
54314+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54315+ grsec_enable_mount = 1;
54316+#endif
54317+#ifdef CONFIG_GRKERNSEC_LINK
54318+ grsec_enable_link = 1;
54319+#endif
54320+#ifdef CONFIG_GRKERNSEC_BRUTE
54321+ grsec_enable_brute = 1;
54322+#endif
54323+#ifdef CONFIG_GRKERNSEC_DMESG
54324+ grsec_enable_dmesg = 1;
54325+#endif
54326+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
54327+ grsec_enable_blackhole = 1;
54328+ grsec_lastack_retries = 4;
54329+#endif
54330+#ifdef CONFIG_GRKERNSEC_FIFO
54331+ grsec_enable_fifo = 1;
54332+#endif
54333+#ifdef CONFIG_GRKERNSEC_EXECVE
54334+ grsec_enable_execve = 1;
54335+#endif
54336+#ifdef CONFIG_GRKERNSEC_EXECLOG
54337+ grsec_enable_execlog = 1;
54338+#endif
54339+#ifdef CONFIG_GRKERNSEC_SIGNAL
54340+ grsec_enable_signal = 1;
54341+#endif
54342+#ifdef CONFIG_GRKERNSEC_FORKFAIL
54343+ grsec_enable_forkfail = 1;
54344+#endif
54345+#ifdef CONFIG_GRKERNSEC_TIME
54346+ grsec_enable_time = 1;
54347+#endif
54348+#ifdef CONFIG_GRKERNSEC_RESLOG
54349+ grsec_resource_logging = 1;
54350+#endif
54351+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
54352+ grsec_enable_chroot_findtask = 1;
54353+#endif
54354+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
54355+ grsec_enable_chroot_unix = 1;
54356+#endif
54357+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
54358+ grsec_enable_chroot_mount = 1;
54359+#endif
54360+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
54361+ grsec_enable_chroot_fchdir = 1;
54362+#endif
54363+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
54364+ grsec_enable_chroot_shmat = 1;
54365+#endif
54366+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54367+ grsec_enable_audit_ptrace = 1;
54368+#endif
54369+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
54370+ grsec_enable_chroot_double = 1;
54371+#endif
54372+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
54373+ grsec_enable_chroot_pivot = 1;
54374+#endif
54375+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
54376+ grsec_enable_chroot_chdir = 1;
54377+#endif
54378+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
54379+ grsec_enable_chroot_chmod = 1;
54380+#endif
54381+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
54382+ grsec_enable_chroot_mknod = 1;
54383+#endif
54384+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
54385+ grsec_enable_chroot_nice = 1;
54386+#endif
54387+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
54388+ grsec_enable_chroot_execlog = 1;
54389+#endif
54390+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
54391+ grsec_enable_chroot_caps = 1;
54392+#endif
54393+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
54394+ grsec_enable_chroot_sysctl = 1;
54395+#endif
54396+#ifdef CONFIG_GRKERNSEC_TPE
54397+ grsec_enable_tpe = 1;
54398+ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
54399+#ifdef CONFIG_GRKERNSEC_TPE_ALL
54400+ grsec_enable_tpe_all = 1;
54401+#endif
54402+#endif
54403+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
54404+ grsec_enable_socket_all = 1;
54405+ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
54406+#endif
54407+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
54408+ grsec_enable_socket_client = 1;
54409+ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
54410+#endif
54411+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
54412+ grsec_enable_socket_server = 1;
54413+ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
54414+#endif
54415+#endif
54416+
54417+ return;
54418+}
54419diff -urNp linux-2.6.32.43/grsecurity/grsec_link.c linux-2.6.32.43/grsecurity/grsec_link.c
54420--- linux-2.6.32.43/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
54421+++ linux-2.6.32.43/grsecurity/grsec_link.c 2011-04-17 15:56:46.000000000 -0400
54422@@ -0,0 +1,43 @@
54423+#include <linux/kernel.h>
54424+#include <linux/sched.h>
54425+#include <linux/fs.h>
54426+#include <linux/file.h>
54427+#include <linux/grinternal.h>
54428+
54429+int
54430+gr_handle_follow_link(const struct inode *parent,
54431+ const struct inode *inode,
54432+ const struct dentry *dentry, const struct vfsmount *mnt)
54433+{
54434+#ifdef CONFIG_GRKERNSEC_LINK
54435+ const struct cred *cred = current_cred();
54436+
54437+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
54438+ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
54439+ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) {
54440+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
54441+ return -EACCES;
54442+ }
54443+#endif
54444+ return 0;
54445+}
54446+
54447+int
54448+gr_handle_hardlink(const struct dentry *dentry,
54449+ const struct vfsmount *mnt,
54450+ struct inode *inode, const int mode, const char *to)
54451+{
54452+#ifdef CONFIG_GRKERNSEC_LINK
54453+ const struct cred *cred = current_cred();
54454+
54455+ if (grsec_enable_link && cred->fsuid != inode->i_uid &&
54456+ (!S_ISREG(mode) || (mode & S_ISUID) ||
54457+ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
54458+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
54459+ !capable(CAP_FOWNER) && cred->uid) {
54460+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to);
54461+ return -EPERM;
54462+ }
54463+#endif
54464+ return 0;
54465+}
54466diff -urNp linux-2.6.32.43/grsecurity/grsec_log.c linux-2.6.32.43/grsecurity/grsec_log.c
54467--- linux-2.6.32.43/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500
54468+++ linux-2.6.32.43/grsecurity/grsec_log.c 2011-05-10 21:58:49.000000000 -0400
54469@@ -0,0 +1,310 @@
54470+#include <linux/kernel.h>
54471+#include <linux/sched.h>
54472+#include <linux/file.h>
54473+#include <linux/tty.h>
54474+#include <linux/fs.h>
54475+#include <linux/grinternal.h>
54476+
54477+#ifdef CONFIG_TREE_PREEMPT_RCU
54478+#define DISABLE_PREEMPT() preempt_disable()
54479+#define ENABLE_PREEMPT() preempt_enable()
54480+#else
54481+#define DISABLE_PREEMPT()
54482+#define ENABLE_PREEMPT()
54483+#endif
54484+
54485+#define BEGIN_LOCKS(x) \
54486+ DISABLE_PREEMPT(); \
54487+ rcu_read_lock(); \
54488+ read_lock(&tasklist_lock); \
54489+ read_lock(&grsec_exec_file_lock); \
54490+ if (x != GR_DO_AUDIT) \
54491+ spin_lock(&grsec_alert_lock); \
54492+ else \
54493+ spin_lock(&grsec_audit_lock)
54494+
54495+#define END_LOCKS(x) \
54496+ if (x != GR_DO_AUDIT) \
54497+ spin_unlock(&grsec_alert_lock); \
54498+ else \
54499+ spin_unlock(&grsec_audit_lock); \
54500+ read_unlock(&grsec_exec_file_lock); \
54501+ read_unlock(&tasklist_lock); \
54502+ rcu_read_unlock(); \
54503+ ENABLE_PREEMPT(); \
54504+ if (x == GR_DONT_AUDIT) \
54505+ gr_handle_alertkill(current)
54506+
54507+enum {
54508+ FLOODING,
54509+ NO_FLOODING
54510+};
54511+
54512+extern char *gr_alert_log_fmt;
54513+extern char *gr_audit_log_fmt;
54514+extern char *gr_alert_log_buf;
54515+extern char *gr_audit_log_buf;
54516+
54517+static int gr_log_start(int audit)
54518+{
54519+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
54520+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
54521+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54522+
54523+ if (audit == GR_DO_AUDIT)
54524+ goto set_fmt;
54525+
54526+ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) {
54527+ grsec_alert_wtime = jiffies;
54528+ grsec_alert_fyet = 0;
54529+ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
54530+ grsec_alert_fyet++;
54531+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
54532+ grsec_alert_wtime = jiffies;
54533+ grsec_alert_fyet++;
54534+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
54535+ return FLOODING;
54536+ } else return FLOODING;
54537+
54538+set_fmt:
54539+ memset(buf, 0, PAGE_SIZE);
54540+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
54541+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
54542+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54543+ } else if (current->signal->curr_ip) {
54544+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
54545+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
54546+ } else if (gr_acl_is_enabled()) {
54547+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
54548+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
54549+ } else {
54550+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
54551+ strcpy(buf, fmt);
54552+ }
54553+
54554+ return NO_FLOODING;
54555+}
54556+
54557+static void gr_log_middle(int audit, const char *msg, va_list ap)
54558+ __attribute__ ((format (printf, 2, 0)));
54559+
54560+static void gr_log_middle(int audit, const char *msg, va_list ap)
54561+{
54562+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54563+ unsigned int len = strlen(buf);
54564+
54565+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54566+
54567+ return;
54568+}
54569+
54570+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54571+ __attribute__ ((format (printf, 2, 3)));
54572+
54573+static void gr_log_middle_varargs(int audit, const char *msg, ...)
54574+{
54575+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54576+ unsigned int len = strlen(buf);
54577+ va_list ap;
54578+
54579+ va_start(ap, msg);
54580+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
54581+ va_end(ap);
54582+
54583+ return;
54584+}
54585+
54586+static void gr_log_end(int audit)
54587+{
54588+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
54589+ unsigned int len = strlen(buf);
54590+
54591+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->real_parent)));
54592+ printk("%s\n", buf);
54593+
54594+ return;
54595+}
54596+
54597+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
54598+{
54599+ int logtype;
54600+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
54601+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
54602+ void *voidptr = NULL;
54603+ int num1 = 0, num2 = 0;
54604+ unsigned long ulong1 = 0, ulong2 = 0;
54605+ struct dentry *dentry = NULL;
54606+ struct vfsmount *mnt = NULL;
54607+ struct file *file = NULL;
54608+ struct task_struct *task = NULL;
54609+ const struct cred *cred, *pcred;
54610+ va_list ap;
54611+
54612+ BEGIN_LOCKS(audit);
54613+ logtype = gr_log_start(audit);
54614+ if (logtype == FLOODING) {
54615+ END_LOCKS(audit);
54616+ return;
54617+ }
54618+ va_start(ap, argtypes);
54619+ switch (argtypes) {
54620+ case GR_TTYSNIFF:
54621+ task = va_arg(ap, struct task_struct *);
54622+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid);
54623+ break;
54624+ case GR_SYSCTL_HIDDEN:
54625+ str1 = va_arg(ap, char *);
54626+ gr_log_middle_varargs(audit, msg, result, str1);
54627+ break;
54628+ case GR_RBAC:
54629+ dentry = va_arg(ap, struct dentry *);
54630+ mnt = va_arg(ap, struct vfsmount *);
54631+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
54632+ break;
54633+ case GR_RBAC_STR:
54634+ dentry = va_arg(ap, struct dentry *);
54635+ mnt = va_arg(ap, struct vfsmount *);
54636+ str1 = va_arg(ap, char *);
54637+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
54638+ break;
54639+ case GR_STR_RBAC:
54640+ str1 = va_arg(ap, char *);
54641+ dentry = va_arg(ap, struct dentry *);
54642+ mnt = va_arg(ap, struct vfsmount *);
54643+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
54644+ break;
54645+ case GR_RBAC_MODE2:
54646+ dentry = va_arg(ap, struct dentry *);
54647+ mnt = va_arg(ap, struct vfsmount *);
54648+ str1 = va_arg(ap, char *);
54649+ str2 = va_arg(ap, char *);
54650+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
54651+ break;
54652+ case GR_RBAC_MODE3:
54653+ dentry = va_arg(ap, struct dentry *);
54654+ mnt = va_arg(ap, struct vfsmount *);
54655+ str1 = va_arg(ap, char *);
54656+ str2 = va_arg(ap, char *);
54657+ str3 = va_arg(ap, char *);
54658+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
54659+ break;
54660+ case GR_FILENAME:
54661+ dentry = va_arg(ap, struct dentry *);
54662+ mnt = va_arg(ap, struct vfsmount *);
54663+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
54664+ break;
54665+ case GR_STR_FILENAME:
54666+ str1 = va_arg(ap, char *);
54667+ dentry = va_arg(ap, struct dentry *);
54668+ mnt = va_arg(ap, struct vfsmount *);
54669+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
54670+ break;
54671+ case GR_FILENAME_STR:
54672+ dentry = va_arg(ap, struct dentry *);
54673+ mnt = va_arg(ap, struct vfsmount *);
54674+ str1 = va_arg(ap, char *);
54675+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
54676+ break;
54677+ case GR_FILENAME_TWO_INT:
54678+ dentry = va_arg(ap, struct dentry *);
54679+ mnt = va_arg(ap, struct vfsmount *);
54680+ num1 = va_arg(ap, int);
54681+ num2 = va_arg(ap, int);
54682+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
54683+ break;
54684+ case GR_FILENAME_TWO_INT_STR:
54685+ dentry = va_arg(ap, struct dentry *);
54686+ mnt = va_arg(ap, struct vfsmount *);
54687+ num1 = va_arg(ap, int);
54688+ num2 = va_arg(ap, int);
54689+ str1 = va_arg(ap, char *);
54690+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
54691+ break;
54692+ case GR_TEXTREL:
54693+ file = va_arg(ap, struct file *);
54694+ ulong1 = va_arg(ap, unsigned long);
54695+ ulong2 = va_arg(ap, unsigned long);
54696+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
54697+ break;
54698+ case GR_PTRACE:
54699+ task = va_arg(ap, struct task_struct *);
54700+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid);
54701+ break;
54702+ case GR_RESOURCE:
54703+ task = va_arg(ap, struct task_struct *);
54704+ cred = __task_cred(task);
54705+ pcred = __task_cred(task->real_parent);
54706+ ulong1 = va_arg(ap, unsigned long);
54707+ str1 = va_arg(ap, char *);
54708+ ulong2 = va_arg(ap, unsigned long);
54709+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54710+ break;
54711+ case GR_CAP:
54712+ task = va_arg(ap, struct task_struct *);
54713+ cred = __task_cred(task);
54714+ pcred = __task_cred(task->real_parent);
54715+ str1 = va_arg(ap, char *);
54716+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54717+ break;
54718+ case GR_SIG:
54719+ str1 = va_arg(ap, char *);
54720+ voidptr = va_arg(ap, void *);
54721+ gr_log_middle_varargs(audit, msg, str1, voidptr);
54722+ break;
54723+ case GR_SIG2:
54724+ task = va_arg(ap, struct task_struct *);
54725+ cred = __task_cred(task);
54726+ pcred = __task_cred(task->real_parent);
54727+ num1 = va_arg(ap, int);
54728+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54729+ break;
54730+ case GR_CRASH1:
54731+ task = va_arg(ap, struct task_struct *);
54732+ cred = __task_cred(task);
54733+ pcred = __task_cred(task->real_parent);
54734+ ulong1 = va_arg(ap, unsigned long);
54735+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1);
54736+ break;
54737+ case GR_CRASH2:
54738+ task = va_arg(ap, struct task_struct *);
54739+ cred = __task_cred(task);
54740+ pcred = __task_cred(task->real_parent);
54741+ ulong1 = va_arg(ap, unsigned long);
54742+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1);
54743+ break;
54744+ case GR_RWXMAP:
54745+ file = va_arg(ap, struct file *);
54746+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
54747+ break;
54748+ case GR_PSACCT:
54749+ {
54750+ unsigned int wday, cday;
54751+ __u8 whr, chr;
54752+ __u8 wmin, cmin;
54753+ __u8 wsec, csec;
54754+ char cur_tty[64] = { 0 };
54755+ char parent_tty[64] = { 0 };
54756+
54757+ task = va_arg(ap, struct task_struct *);
54758+ wday = va_arg(ap, unsigned int);
54759+ cday = va_arg(ap, unsigned int);
54760+ whr = va_arg(ap, int);
54761+ chr = va_arg(ap, int);
54762+ wmin = va_arg(ap, int);
54763+ cmin = va_arg(ap, int);
54764+ wsec = va_arg(ap, int);
54765+ csec = va_arg(ap, int);
54766+ ulong1 = va_arg(ap, unsigned long);
54767+ cred = __task_cred(task);
54768+ pcred = __task_cred(task->real_parent);
54769+
54770+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task->real_parent->pid, &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid);
54771+ }
54772+ break;
54773+ default:
54774+ gr_log_middle(audit, msg, ap);
54775+ }
54776+ va_end(ap);
54777+ gr_log_end(audit);
54778+ END_LOCKS(audit);
54779+}
54780diff -urNp linux-2.6.32.43/grsecurity/grsec_mem.c linux-2.6.32.43/grsecurity/grsec_mem.c
54781--- linux-2.6.32.43/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
54782+++ linux-2.6.32.43/grsecurity/grsec_mem.c 2011-04-17 15:56:46.000000000 -0400
54783@@ -0,0 +1,33 @@
54784+#include <linux/kernel.h>
54785+#include <linux/sched.h>
54786+#include <linux/mm.h>
54787+#include <linux/mman.h>
54788+#include <linux/grinternal.h>
54789+
54790+void
54791+gr_handle_ioperm(void)
54792+{
54793+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
54794+ return;
54795+}
54796+
54797+void
54798+gr_handle_iopl(void)
54799+{
54800+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
54801+ return;
54802+}
54803+
54804+void
54805+gr_handle_mem_readwrite(u64 from, u64 to)
54806+{
54807+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
54808+ return;
54809+}
54810+
54811+void
54812+gr_handle_vm86(void)
54813+{
54814+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
54815+ return;
54816+}
54817diff -urNp linux-2.6.32.43/grsecurity/grsec_mount.c linux-2.6.32.43/grsecurity/grsec_mount.c
54818--- linux-2.6.32.43/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
54819+++ linux-2.6.32.43/grsecurity/grsec_mount.c 2011-06-20 19:47:03.000000000 -0400
54820@@ -0,0 +1,62 @@
54821+#include <linux/kernel.h>
54822+#include <linux/sched.h>
54823+#include <linux/mount.h>
54824+#include <linux/grsecurity.h>
54825+#include <linux/grinternal.h>
54826+
54827+void
54828+gr_log_remount(const char *devname, const int retval)
54829+{
54830+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54831+ if (grsec_enable_mount && (retval >= 0))
54832+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
54833+#endif
54834+ return;
54835+}
54836+
54837+void
54838+gr_log_unmount(const char *devname, const int retval)
54839+{
54840+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54841+ if (grsec_enable_mount && (retval >= 0))
54842+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
54843+#endif
54844+ return;
54845+}
54846+
54847+void
54848+gr_log_mount(const char *from, const char *to, const int retval)
54849+{
54850+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
54851+ if (grsec_enable_mount && (retval >= 0))
54852+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
54853+#endif
54854+ return;
54855+}
54856+
54857+int
54858+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
54859+{
54860+#ifdef CONFIG_GRKERNSEC_ROFS
54861+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
54862+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
54863+ return -EPERM;
54864+ } else
54865+ return 0;
54866+#endif
54867+ return 0;
54868+}
54869+
54870+int
54871+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
54872+{
54873+#ifdef CONFIG_GRKERNSEC_ROFS
54874+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
54875+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
54876+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
54877+ return -EPERM;
54878+ } else
54879+ return 0;
54880+#endif
54881+ return 0;
54882+}
54883diff -urNp linux-2.6.32.43/grsecurity/grsec_pax.c linux-2.6.32.43/grsecurity/grsec_pax.c
54884--- linux-2.6.32.43/grsecurity/grsec_pax.c 1969-12-31 19:00:00.000000000 -0500
54885+++ linux-2.6.32.43/grsecurity/grsec_pax.c 2011-04-17 15:56:46.000000000 -0400
54886@@ -0,0 +1,36 @@
54887+#include <linux/kernel.h>
54888+#include <linux/sched.h>
54889+#include <linux/mm.h>
54890+#include <linux/file.h>
54891+#include <linux/grinternal.h>
54892+#include <linux/grsecurity.h>
54893+
54894+void
54895+gr_log_textrel(struct vm_area_struct * vma)
54896+{
54897+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
54898+ if (grsec_enable_audit_textrel)
54899+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
54900+#endif
54901+ return;
54902+}
54903+
54904+void
54905+gr_log_rwxmmap(struct file *file)
54906+{
54907+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54908+ if (grsec_enable_log_rwxmaps)
54909+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
54910+#endif
54911+ return;
54912+}
54913+
54914+void
54915+gr_log_rwxmprotect(struct file *file)
54916+{
54917+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
54918+ if (grsec_enable_log_rwxmaps)
54919+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
54920+#endif
54921+ return;
54922+}
54923diff -urNp linux-2.6.32.43/grsecurity/grsec_ptrace.c linux-2.6.32.43/grsecurity/grsec_ptrace.c
54924--- linux-2.6.32.43/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500
54925+++ linux-2.6.32.43/grsecurity/grsec_ptrace.c 2011-04-17 15:56:46.000000000 -0400
54926@@ -0,0 +1,14 @@
54927+#include <linux/kernel.h>
54928+#include <linux/sched.h>
54929+#include <linux/grinternal.h>
54930+#include <linux/grsecurity.h>
54931+
54932+void
54933+gr_audit_ptrace(struct task_struct *task)
54934+{
54935+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
54936+ if (grsec_enable_audit_ptrace)
54937+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
54938+#endif
54939+ return;
54940+}
54941diff -urNp linux-2.6.32.43/grsecurity/grsec_sig.c linux-2.6.32.43/grsecurity/grsec_sig.c
54942--- linux-2.6.32.43/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
54943+++ linux-2.6.32.43/grsecurity/grsec_sig.c 2011-06-29 19:40:31.000000000 -0400
54944@@ -0,0 +1,205 @@
54945+#include <linux/kernel.h>
54946+#include <linux/sched.h>
54947+#include <linux/delay.h>
54948+#include <linux/grsecurity.h>
54949+#include <linux/grinternal.h>
54950+#include <linux/hardirq.h>
54951+
54952+char *signames[] = {
54953+ [SIGSEGV] = "Segmentation fault",
54954+ [SIGILL] = "Illegal instruction",
54955+ [SIGABRT] = "Abort",
54956+ [SIGBUS] = "Invalid alignment/Bus error"
54957+};
54958+
54959+void
54960+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
54961+{
54962+#ifdef CONFIG_GRKERNSEC_SIGNAL
54963+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
54964+ (sig == SIGABRT) || (sig == SIGBUS))) {
54965+ if (t->pid == current->pid) {
54966+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
54967+ } else {
54968+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
54969+ }
54970+ }
54971+#endif
54972+ return;
54973+}
54974+
54975+int
54976+gr_handle_signal(const struct task_struct *p, const int sig)
54977+{
54978+#ifdef CONFIG_GRKERNSEC
54979+ if (current->pid > 1 && gr_check_protected_task(p)) {
54980+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
54981+ return -EPERM;
54982+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
54983+ return -EPERM;
54984+ }
54985+#endif
54986+ return 0;
54987+}
54988+
54989+#ifdef CONFIG_GRKERNSEC
54990+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
54991+
54992+int gr_fake_force_sig(int sig, struct task_struct *t)
54993+{
54994+ unsigned long int flags;
54995+ int ret, blocked, ignored;
54996+ struct k_sigaction *action;
54997+
54998+ spin_lock_irqsave(&t->sighand->siglock, flags);
54999+ action = &t->sighand->action[sig-1];
55000+ ignored = action->sa.sa_handler == SIG_IGN;
55001+ blocked = sigismember(&t->blocked, sig);
55002+ if (blocked || ignored) {
55003+ action->sa.sa_handler = SIG_DFL;
55004+ if (blocked) {
55005+ sigdelset(&t->blocked, sig);
55006+ recalc_sigpending_and_wake(t);
55007+ }
55008+ }
55009+ if (action->sa.sa_handler == SIG_DFL)
55010+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
55011+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
55012+
55013+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
55014+
55015+ return ret;
55016+}
55017+#endif
55018+
55019+#ifdef CONFIG_GRKERNSEC_BRUTE
55020+#define GR_USER_BAN_TIME (15 * 60)
55021+
55022+static int __get_dumpable(unsigned long mm_flags)
55023+{
55024+ int ret;
55025+
55026+ ret = mm_flags & MMF_DUMPABLE_MASK;
55027+ return (ret >= 2) ? 2 : ret;
55028+}
55029+#endif
55030+
55031+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags)
55032+{
55033+#ifdef CONFIG_GRKERNSEC_BRUTE
55034+ uid_t uid = 0;
55035+
55036+ if (!grsec_enable_brute)
55037+ return;
55038+
55039+ rcu_read_lock();
55040+ read_lock(&tasklist_lock);
55041+ read_lock(&grsec_exec_file_lock);
55042+ if (p->real_parent && p->real_parent->exec_file == p->exec_file)
55043+ p->real_parent->brute = 1;
55044+ else {
55045+ const struct cred *cred = __task_cred(p), *cred2;
55046+ struct task_struct *tsk, *tsk2;
55047+
55048+ if (!__get_dumpable(mm_flags) && cred->uid) {
55049+ struct user_struct *user;
55050+
55051+ uid = cred->uid;
55052+
55053+ /* this is put upon execution past expiration */
55054+ user = find_user(uid);
55055+ if (user == NULL)
55056+ goto unlock;
55057+ user->banned = 1;
55058+ user->ban_expires = get_seconds() + GR_USER_BAN_TIME;
55059+ if (user->ban_expires == ~0UL)
55060+ user->ban_expires--;
55061+
55062+ do_each_thread(tsk2, tsk) {
55063+ cred2 = __task_cred(tsk);
55064+ if (tsk != p && cred2->uid == uid)
55065+ gr_fake_force_sig(SIGKILL, tsk);
55066+ } while_each_thread(tsk2, tsk);
55067+ }
55068+ }
55069+unlock:
55070+ read_unlock(&grsec_exec_file_lock);
55071+ read_unlock(&tasklist_lock);
55072+ rcu_read_unlock();
55073+
55074+ if (uid)
55075+ printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60);
55076+#endif
55077+ return;
55078+}
55079+
55080+void gr_handle_brute_check(void)
55081+{
55082+#ifdef CONFIG_GRKERNSEC_BRUTE
55083+ if (current->brute)
55084+ msleep(30 * 1000);
55085+#endif
55086+ return;
55087+}
55088+
55089+void gr_handle_kernel_exploit(void)
55090+{
55091+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
55092+ const struct cred *cred;
55093+ struct task_struct *tsk, *tsk2;
55094+ struct user_struct *user;
55095+ uid_t uid;
55096+
55097+ if (in_irq() || in_serving_softirq() || in_nmi())
55098+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
55099+
55100+ uid = current_uid();
55101+
55102+ if (uid == 0)
55103+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
55104+ else {
55105+ /* kill all the processes of this user, hold a reference
55106+ to their creds struct, and prevent them from creating
55107+ another process until system reset
55108+ */
55109+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n", uid);
55110+ /* we intentionally leak this ref */
55111+ user = get_uid(current->cred->user);
55112+ if (user) {
55113+ user->banned = 1;
55114+ user->ban_expires = ~0UL;
55115+ }
55116+
55117+ read_lock(&tasklist_lock);
55118+ do_each_thread(tsk2, tsk) {
55119+ cred = __task_cred(tsk);
55120+ if (cred->uid == uid)
55121+ gr_fake_force_sig(SIGKILL, tsk);
55122+ } while_each_thread(tsk2, tsk);
55123+ read_unlock(&tasklist_lock);
55124+ }
55125+#endif
55126+}
55127+
55128+int __gr_process_user_ban(struct user_struct *user)
55129+{
55130+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55131+ if (unlikely(user->banned)) {
55132+ if (user->ban_expires != ~0UL && time_after_eq(get_seconds(), user->ban_expires)) {
55133+ user->banned = 0;
55134+ user->ban_expires = 0;
55135+ free_uid(user);
55136+ } else
55137+ return -EPERM;
55138+ }
55139+#endif
55140+ return 0;
55141+}
55142+
55143+int gr_process_user_ban(void)
55144+{
55145+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
55146+ return __gr_process_user_ban(current->cred->user);
55147+#endif
55148+ return 0;
55149+}
55150diff -urNp linux-2.6.32.43/grsecurity/grsec_sock.c linux-2.6.32.43/grsecurity/grsec_sock.c
55151--- linux-2.6.32.43/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
55152+++ linux-2.6.32.43/grsecurity/grsec_sock.c 2011-04-17 15:56:46.000000000 -0400
55153@@ -0,0 +1,275 @@
55154+#include <linux/kernel.h>
55155+#include <linux/module.h>
55156+#include <linux/sched.h>
55157+#include <linux/file.h>
55158+#include <linux/net.h>
55159+#include <linux/in.h>
55160+#include <linux/ip.h>
55161+#include <net/sock.h>
55162+#include <net/inet_sock.h>
55163+#include <linux/grsecurity.h>
55164+#include <linux/grinternal.h>
55165+#include <linux/gracl.h>
55166+
55167+kernel_cap_t gr_cap_rtnetlink(struct sock *sock);
55168+EXPORT_SYMBOL(gr_cap_rtnetlink);
55169+
55170+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
55171+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
55172+
55173+EXPORT_SYMBOL(gr_search_udp_recvmsg);
55174+EXPORT_SYMBOL(gr_search_udp_sendmsg);
55175+
55176+#ifdef CONFIG_UNIX_MODULE
55177+EXPORT_SYMBOL(gr_acl_handle_unix);
55178+EXPORT_SYMBOL(gr_acl_handle_mknod);
55179+EXPORT_SYMBOL(gr_handle_chroot_unix);
55180+EXPORT_SYMBOL(gr_handle_create);
55181+#endif
55182+
55183+#ifdef CONFIG_GRKERNSEC
55184+#define gr_conn_table_size 32749
55185+struct conn_table_entry {
55186+ struct conn_table_entry *next;
55187+ struct signal_struct *sig;
55188+};
55189+
55190+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
55191+DEFINE_SPINLOCK(gr_conn_table_lock);
55192+
55193+extern const char * gr_socktype_to_name(unsigned char type);
55194+extern const char * gr_proto_to_name(unsigned char proto);
55195+extern const char * gr_sockfamily_to_name(unsigned char family);
55196+
55197+static __inline__ int
55198+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
55199+{
55200+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
55201+}
55202+
55203+static __inline__ int
55204+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
55205+ __u16 sport, __u16 dport)
55206+{
55207+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
55208+ sig->gr_sport == sport && sig->gr_dport == dport))
55209+ return 1;
55210+ else
55211+ return 0;
55212+}
55213+
55214+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
55215+{
55216+ struct conn_table_entry **match;
55217+ unsigned int index;
55218+
55219+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55220+ sig->gr_sport, sig->gr_dport,
55221+ gr_conn_table_size);
55222+
55223+ newent->sig = sig;
55224+
55225+ match = &gr_conn_table[index];
55226+ newent->next = *match;
55227+ *match = newent;
55228+
55229+ return;
55230+}
55231+
55232+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
55233+{
55234+ struct conn_table_entry *match, *last = NULL;
55235+ unsigned int index;
55236+
55237+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
55238+ sig->gr_sport, sig->gr_dport,
55239+ gr_conn_table_size);
55240+
55241+ match = gr_conn_table[index];
55242+ while (match && !conn_match(match->sig,
55243+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
55244+ sig->gr_dport)) {
55245+ last = match;
55246+ match = match->next;
55247+ }
55248+
55249+ if (match) {
55250+ if (last)
55251+ last->next = match->next;
55252+ else
55253+ gr_conn_table[index] = NULL;
55254+ kfree(match);
55255+ }
55256+
55257+ return;
55258+}
55259+
55260+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
55261+ __u16 sport, __u16 dport)
55262+{
55263+ struct conn_table_entry *match;
55264+ unsigned int index;
55265+
55266+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
55267+
55268+ match = gr_conn_table[index];
55269+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
55270+ match = match->next;
55271+
55272+ if (match)
55273+ return match->sig;
55274+ else
55275+ return NULL;
55276+}
55277+
55278+#endif
55279+
55280+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
55281+{
55282+#ifdef CONFIG_GRKERNSEC
55283+ struct signal_struct *sig = task->signal;
55284+ struct conn_table_entry *newent;
55285+
55286+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
55287+ if (newent == NULL)
55288+ return;
55289+ /* no bh lock needed since we are called with bh disabled */
55290+ spin_lock(&gr_conn_table_lock);
55291+ gr_del_task_from_ip_table_nolock(sig);
55292+ sig->gr_saddr = inet->rcv_saddr;
55293+ sig->gr_daddr = inet->daddr;
55294+ sig->gr_sport = inet->sport;
55295+ sig->gr_dport = inet->dport;
55296+ gr_add_to_task_ip_table_nolock(sig, newent);
55297+ spin_unlock(&gr_conn_table_lock);
55298+#endif
55299+ return;
55300+}
55301+
55302+void gr_del_task_from_ip_table(struct task_struct *task)
55303+{
55304+#ifdef CONFIG_GRKERNSEC
55305+ spin_lock_bh(&gr_conn_table_lock);
55306+ gr_del_task_from_ip_table_nolock(task->signal);
55307+ spin_unlock_bh(&gr_conn_table_lock);
55308+#endif
55309+ return;
55310+}
55311+
55312+void
55313+gr_attach_curr_ip(const struct sock *sk)
55314+{
55315+#ifdef CONFIG_GRKERNSEC
55316+ struct signal_struct *p, *set;
55317+ const struct inet_sock *inet = inet_sk(sk);
55318+
55319+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
55320+ return;
55321+
55322+ set = current->signal;
55323+
55324+ spin_lock_bh(&gr_conn_table_lock);
55325+ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
55326+ inet->dport, inet->sport);
55327+ if (unlikely(p != NULL)) {
55328+ set->curr_ip = p->curr_ip;
55329+ set->used_accept = 1;
55330+ gr_del_task_from_ip_table_nolock(p);
55331+ spin_unlock_bh(&gr_conn_table_lock);
55332+ return;
55333+ }
55334+ spin_unlock_bh(&gr_conn_table_lock);
55335+
55336+ set->curr_ip = inet->daddr;
55337+ set->used_accept = 1;
55338+#endif
55339+ return;
55340+}
55341+
55342+int
55343+gr_handle_sock_all(const int family, const int type, const int protocol)
55344+{
55345+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55346+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
55347+ (family != AF_UNIX)) {
55348+ if (family == AF_INET)
55349+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
55350+ else
55351+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
55352+ return -EACCES;
55353+ }
55354+#endif
55355+ return 0;
55356+}
55357+
55358+int
55359+gr_handle_sock_server(const struct sockaddr *sck)
55360+{
55361+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55362+ if (grsec_enable_socket_server &&
55363+ in_group_p(grsec_socket_server_gid) &&
55364+ sck && (sck->sa_family != AF_UNIX) &&
55365+ (sck->sa_family != AF_LOCAL)) {
55366+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55367+ return -EACCES;
55368+ }
55369+#endif
55370+ return 0;
55371+}
55372+
55373+int
55374+gr_handle_sock_server_other(const struct sock *sck)
55375+{
55376+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55377+ if (grsec_enable_socket_server &&
55378+ in_group_p(grsec_socket_server_gid) &&
55379+ sck && (sck->sk_family != AF_UNIX) &&
55380+ (sck->sk_family != AF_LOCAL)) {
55381+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
55382+ return -EACCES;
55383+ }
55384+#endif
55385+ return 0;
55386+}
55387+
55388+int
55389+gr_handle_sock_client(const struct sockaddr *sck)
55390+{
55391+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55392+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
55393+ sck && (sck->sa_family != AF_UNIX) &&
55394+ (sck->sa_family != AF_LOCAL)) {
55395+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
55396+ return -EACCES;
55397+ }
55398+#endif
55399+ return 0;
55400+}
55401+
55402+kernel_cap_t
55403+gr_cap_rtnetlink(struct sock *sock)
55404+{
55405+#ifdef CONFIG_GRKERNSEC
55406+ if (!gr_acl_is_enabled())
55407+ return current_cap();
55408+ else if (sock->sk_protocol == NETLINK_ISCSI &&
55409+ cap_raised(current_cap(), CAP_SYS_ADMIN) &&
55410+ gr_is_capable(CAP_SYS_ADMIN))
55411+ return current_cap();
55412+ else if (sock->sk_protocol == NETLINK_AUDIT &&
55413+ cap_raised(current_cap(), CAP_AUDIT_WRITE) &&
55414+ gr_is_capable(CAP_AUDIT_WRITE) &&
55415+ cap_raised(current_cap(), CAP_AUDIT_CONTROL) &&
55416+ gr_is_capable(CAP_AUDIT_CONTROL))
55417+ return current_cap();
55418+ else if (cap_raised(current_cap(), CAP_NET_ADMIN) &&
55419+ ((sock->sk_protocol == NETLINK_ROUTE) ?
55420+ gr_is_capable_nolog(CAP_NET_ADMIN) :
55421+ gr_is_capable(CAP_NET_ADMIN)))
55422+ return current_cap();
55423+ else
55424+ return __cap_empty_set;
55425+#else
55426+ return current_cap();
55427+#endif
55428+}
55429diff -urNp linux-2.6.32.43/grsecurity/grsec_sysctl.c linux-2.6.32.43/grsecurity/grsec_sysctl.c
55430--- linux-2.6.32.43/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
55431+++ linux-2.6.32.43/grsecurity/grsec_sysctl.c 2011-06-29 19:37:19.000000000 -0400
55432@@ -0,0 +1,489 @@
55433+#include <linux/kernel.h>
55434+#include <linux/sched.h>
55435+#include <linux/sysctl.h>
55436+#include <linux/grsecurity.h>
55437+#include <linux/grinternal.h>
55438+
55439+int
55440+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
55441+{
55442+#ifdef CONFIG_GRKERNSEC_SYSCTL
55443+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
55444+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
55445+ return -EACCES;
55446+ }
55447+#endif
55448+ return 0;
55449+}
55450+
55451+#ifdef CONFIG_GRKERNSEC_ROFS
55452+static int __maybe_unused one = 1;
55453+#endif
55454+
55455+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
55456+ctl_table grsecurity_table[] = {
55457+#ifdef CONFIG_GRKERNSEC_SYSCTL
55458+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
55459+#ifdef CONFIG_GRKERNSEC_IO
55460+ {
55461+ .ctl_name = CTL_UNNUMBERED,
55462+ .procname = "disable_priv_io",
55463+ .data = &grsec_disable_privio,
55464+ .maxlen = sizeof(int),
55465+ .mode = 0600,
55466+ .proc_handler = &proc_dointvec,
55467+ },
55468+#endif
55469+#endif
55470+#ifdef CONFIG_GRKERNSEC_LINK
55471+ {
55472+ .ctl_name = CTL_UNNUMBERED,
55473+ .procname = "linking_restrictions",
55474+ .data = &grsec_enable_link,
55475+ .maxlen = sizeof(int),
55476+ .mode = 0600,
55477+ .proc_handler = &proc_dointvec,
55478+ },
55479+#endif
55480+#ifdef CONFIG_GRKERNSEC_BRUTE
55481+ {
55482+ .ctl_name = CTL_UNNUMBERED,
55483+ .procname = "deter_bruteforce",
55484+ .data = &grsec_enable_brute,
55485+ .maxlen = sizeof(int),
55486+ .mode = 0600,
55487+ .proc_handler = &proc_dointvec,
55488+ },
55489+#endif
55490+#ifdef CONFIG_GRKERNSEC_FIFO
55491+ {
55492+ .ctl_name = CTL_UNNUMBERED,
55493+ .procname = "fifo_restrictions",
55494+ .data = &grsec_enable_fifo,
55495+ .maxlen = sizeof(int),
55496+ .mode = 0600,
55497+ .proc_handler = &proc_dointvec,
55498+ },
55499+#endif
55500+#ifdef CONFIG_GRKERNSEC_EXECVE
55501+ {
55502+ .ctl_name = CTL_UNNUMBERED,
55503+ .procname = "execve_limiting",
55504+ .data = &grsec_enable_execve,
55505+ .maxlen = sizeof(int),
55506+ .mode = 0600,
55507+ .proc_handler = &proc_dointvec,
55508+ },
55509+#endif
55510+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
55511+ {
55512+ .ctl_name = CTL_UNNUMBERED,
55513+ .procname = "ip_blackhole",
55514+ .data = &grsec_enable_blackhole,
55515+ .maxlen = sizeof(int),
55516+ .mode = 0600,
55517+ .proc_handler = &proc_dointvec,
55518+ },
55519+ {
55520+ .ctl_name = CTL_UNNUMBERED,
55521+ .procname = "lastack_retries",
55522+ .data = &grsec_lastack_retries,
55523+ .maxlen = sizeof(int),
55524+ .mode = 0600,
55525+ .proc_handler = &proc_dointvec,
55526+ },
55527+#endif
55528+#ifdef CONFIG_GRKERNSEC_EXECLOG
55529+ {
55530+ .ctl_name = CTL_UNNUMBERED,
55531+ .procname = "exec_logging",
55532+ .data = &grsec_enable_execlog,
55533+ .maxlen = sizeof(int),
55534+ .mode = 0600,
55535+ .proc_handler = &proc_dointvec,
55536+ },
55537+#endif
55538+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
55539+ {
55540+ .ctl_name = CTL_UNNUMBERED,
55541+ .procname = "rwxmap_logging",
55542+ .data = &grsec_enable_log_rwxmaps,
55543+ .maxlen = sizeof(int),
55544+ .mode = 0600,
55545+ .proc_handler = &proc_dointvec,
55546+ },
55547+#endif
55548+#ifdef CONFIG_GRKERNSEC_SIGNAL
55549+ {
55550+ .ctl_name = CTL_UNNUMBERED,
55551+ .procname = "signal_logging",
55552+ .data = &grsec_enable_signal,
55553+ .maxlen = sizeof(int),
55554+ .mode = 0600,
55555+ .proc_handler = &proc_dointvec,
55556+ },
55557+#endif
55558+#ifdef CONFIG_GRKERNSEC_FORKFAIL
55559+ {
55560+ .ctl_name = CTL_UNNUMBERED,
55561+ .procname = "forkfail_logging",
55562+ .data = &grsec_enable_forkfail,
55563+ .maxlen = sizeof(int),
55564+ .mode = 0600,
55565+ .proc_handler = &proc_dointvec,
55566+ },
55567+#endif
55568+#ifdef CONFIG_GRKERNSEC_TIME
55569+ {
55570+ .ctl_name = CTL_UNNUMBERED,
55571+ .procname = "timechange_logging",
55572+ .data = &grsec_enable_time,
55573+ .maxlen = sizeof(int),
55574+ .mode = 0600,
55575+ .proc_handler = &proc_dointvec,
55576+ },
55577+#endif
55578+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
55579+ {
55580+ .ctl_name = CTL_UNNUMBERED,
55581+ .procname = "chroot_deny_shmat",
55582+ .data = &grsec_enable_chroot_shmat,
55583+ .maxlen = sizeof(int),
55584+ .mode = 0600,
55585+ .proc_handler = &proc_dointvec,
55586+ },
55587+#endif
55588+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
55589+ {
55590+ .ctl_name = CTL_UNNUMBERED,
55591+ .procname = "chroot_deny_unix",
55592+ .data = &grsec_enable_chroot_unix,
55593+ .maxlen = sizeof(int),
55594+ .mode = 0600,
55595+ .proc_handler = &proc_dointvec,
55596+ },
55597+#endif
55598+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
55599+ {
55600+ .ctl_name = CTL_UNNUMBERED,
55601+ .procname = "chroot_deny_mount",
55602+ .data = &grsec_enable_chroot_mount,
55603+ .maxlen = sizeof(int),
55604+ .mode = 0600,
55605+ .proc_handler = &proc_dointvec,
55606+ },
55607+#endif
55608+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
55609+ {
55610+ .ctl_name = CTL_UNNUMBERED,
55611+ .procname = "chroot_deny_fchdir",
55612+ .data = &grsec_enable_chroot_fchdir,
55613+ .maxlen = sizeof(int),
55614+ .mode = 0600,
55615+ .proc_handler = &proc_dointvec,
55616+ },
55617+#endif
55618+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
55619+ {
55620+ .ctl_name = CTL_UNNUMBERED,
55621+ .procname = "chroot_deny_chroot",
55622+ .data = &grsec_enable_chroot_double,
55623+ .maxlen = sizeof(int),
55624+ .mode = 0600,
55625+ .proc_handler = &proc_dointvec,
55626+ },
55627+#endif
55628+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
55629+ {
55630+ .ctl_name = CTL_UNNUMBERED,
55631+ .procname = "chroot_deny_pivot",
55632+ .data = &grsec_enable_chroot_pivot,
55633+ .maxlen = sizeof(int),
55634+ .mode = 0600,
55635+ .proc_handler = &proc_dointvec,
55636+ },
55637+#endif
55638+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
55639+ {
55640+ .ctl_name = CTL_UNNUMBERED,
55641+ .procname = "chroot_enforce_chdir",
55642+ .data = &grsec_enable_chroot_chdir,
55643+ .maxlen = sizeof(int),
55644+ .mode = 0600,
55645+ .proc_handler = &proc_dointvec,
55646+ },
55647+#endif
55648+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
55649+ {
55650+ .ctl_name = CTL_UNNUMBERED,
55651+ .procname = "chroot_deny_chmod",
55652+ .data = &grsec_enable_chroot_chmod,
55653+ .maxlen = sizeof(int),
55654+ .mode = 0600,
55655+ .proc_handler = &proc_dointvec,
55656+ },
55657+#endif
55658+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
55659+ {
55660+ .ctl_name = CTL_UNNUMBERED,
55661+ .procname = "chroot_deny_mknod",
55662+ .data = &grsec_enable_chroot_mknod,
55663+ .maxlen = sizeof(int),
55664+ .mode = 0600,
55665+ .proc_handler = &proc_dointvec,
55666+ },
55667+#endif
55668+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
55669+ {
55670+ .ctl_name = CTL_UNNUMBERED,
55671+ .procname = "chroot_restrict_nice",
55672+ .data = &grsec_enable_chroot_nice,
55673+ .maxlen = sizeof(int),
55674+ .mode = 0600,
55675+ .proc_handler = &proc_dointvec,
55676+ },
55677+#endif
55678+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
55679+ {
55680+ .ctl_name = CTL_UNNUMBERED,
55681+ .procname = "chroot_execlog",
55682+ .data = &grsec_enable_chroot_execlog,
55683+ .maxlen = sizeof(int),
55684+ .mode = 0600,
55685+ .proc_handler = &proc_dointvec,
55686+ },
55687+#endif
55688+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
55689+ {
55690+ .ctl_name = CTL_UNNUMBERED,
55691+ .procname = "chroot_caps",
55692+ .data = &grsec_enable_chroot_caps,
55693+ .maxlen = sizeof(int),
55694+ .mode = 0600,
55695+ .proc_handler = &proc_dointvec,
55696+ },
55697+#endif
55698+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
55699+ {
55700+ .ctl_name = CTL_UNNUMBERED,
55701+ .procname = "chroot_deny_sysctl",
55702+ .data = &grsec_enable_chroot_sysctl,
55703+ .maxlen = sizeof(int),
55704+ .mode = 0600,
55705+ .proc_handler = &proc_dointvec,
55706+ },
55707+#endif
55708+#ifdef CONFIG_GRKERNSEC_TPE
55709+ {
55710+ .ctl_name = CTL_UNNUMBERED,
55711+ .procname = "tpe",
55712+ .data = &grsec_enable_tpe,
55713+ .maxlen = sizeof(int),
55714+ .mode = 0600,
55715+ .proc_handler = &proc_dointvec,
55716+ },
55717+ {
55718+ .ctl_name = CTL_UNNUMBERED,
55719+ .procname = "tpe_gid",
55720+ .data = &grsec_tpe_gid,
55721+ .maxlen = sizeof(int),
55722+ .mode = 0600,
55723+ .proc_handler = &proc_dointvec,
55724+ },
55725+#endif
55726+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55727+ {
55728+ .ctl_name = CTL_UNNUMBERED,
55729+ .procname = "tpe_invert",
55730+ .data = &grsec_enable_tpe_invert,
55731+ .maxlen = sizeof(int),
55732+ .mode = 0600,
55733+ .proc_handler = &proc_dointvec,
55734+ },
55735+#endif
55736+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55737+ {
55738+ .ctl_name = CTL_UNNUMBERED,
55739+ .procname = "tpe_restrict_all",
55740+ .data = &grsec_enable_tpe_all,
55741+ .maxlen = sizeof(int),
55742+ .mode = 0600,
55743+ .proc_handler = &proc_dointvec,
55744+ },
55745+#endif
55746+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
55747+ {
55748+ .ctl_name = CTL_UNNUMBERED,
55749+ .procname = "socket_all",
55750+ .data = &grsec_enable_socket_all,
55751+ .maxlen = sizeof(int),
55752+ .mode = 0600,
55753+ .proc_handler = &proc_dointvec,
55754+ },
55755+ {
55756+ .ctl_name = CTL_UNNUMBERED,
55757+ .procname = "socket_all_gid",
55758+ .data = &grsec_socket_all_gid,
55759+ .maxlen = sizeof(int),
55760+ .mode = 0600,
55761+ .proc_handler = &proc_dointvec,
55762+ },
55763+#endif
55764+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
55765+ {
55766+ .ctl_name = CTL_UNNUMBERED,
55767+ .procname = "socket_client",
55768+ .data = &grsec_enable_socket_client,
55769+ .maxlen = sizeof(int),
55770+ .mode = 0600,
55771+ .proc_handler = &proc_dointvec,
55772+ },
55773+ {
55774+ .ctl_name = CTL_UNNUMBERED,
55775+ .procname = "socket_client_gid",
55776+ .data = &grsec_socket_client_gid,
55777+ .maxlen = sizeof(int),
55778+ .mode = 0600,
55779+ .proc_handler = &proc_dointvec,
55780+ },
55781+#endif
55782+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
55783+ {
55784+ .ctl_name = CTL_UNNUMBERED,
55785+ .procname = "socket_server",
55786+ .data = &grsec_enable_socket_server,
55787+ .maxlen = sizeof(int),
55788+ .mode = 0600,
55789+ .proc_handler = &proc_dointvec,
55790+ },
55791+ {
55792+ .ctl_name = CTL_UNNUMBERED,
55793+ .procname = "socket_server_gid",
55794+ .data = &grsec_socket_server_gid,
55795+ .maxlen = sizeof(int),
55796+ .mode = 0600,
55797+ .proc_handler = &proc_dointvec,
55798+ },
55799+#endif
55800+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
55801+ {
55802+ .ctl_name = CTL_UNNUMBERED,
55803+ .procname = "audit_group",
55804+ .data = &grsec_enable_group,
55805+ .maxlen = sizeof(int),
55806+ .mode = 0600,
55807+ .proc_handler = &proc_dointvec,
55808+ },
55809+ {
55810+ .ctl_name = CTL_UNNUMBERED,
55811+ .procname = "audit_gid",
55812+ .data = &grsec_audit_gid,
55813+ .maxlen = sizeof(int),
55814+ .mode = 0600,
55815+ .proc_handler = &proc_dointvec,
55816+ },
55817+#endif
55818+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
55819+ {
55820+ .ctl_name = CTL_UNNUMBERED,
55821+ .procname = "audit_chdir",
55822+ .data = &grsec_enable_chdir,
55823+ .maxlen = sizeof(int),
55824+ .mode = 0600,
55825+ .proc_handler = &proc_dointvec,
55826+ },
55827+#endif
55828+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
55829+ {
55830+ .ctl_name = CTL_UNNUMBERED,
55831+ .procname = "audit_mount",
55832+ .data = &grsec_enable_mount,
55833+ .maxlen = sizeof(int),
55834+ .mode = 0600,
55835+ .proc_handler = &proc_dointvec,
55836+ },
55837+#endif
55838+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
55839+ {
55840+ .ctl_name = CTL_UNNUMBERED,
55841+ .procname = "audit_textrel",
55842+ .data = &grsec_enable_audit_textrel,
55843+ .maxlen = sizeof(int),
55844+ .mode = 0600,
55845+ .proc_handler = &proc_dointvec,
55846+ },
55847+#endif
55848+#ifdef CONFIG_GRKERNSEC_DMESG
55849+ {
55850+ .ctl_name = CTL_UNNUMBERED,
55851+ .procname = "dmesg",
55852+ .data = &grsec_enable_dmesg,
55853+ .maxlen = sizeof(int),
55854+ .mode = 0600,
55855+ .proc_handler = &proc_dointvec,
55856+ },
55857+#endif
55858+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
55859+ {
55860+ .ctl_name = CTL_UNNUMBERED,
55861+ .procname = "chroot_findtask",
55862+ .data = &grsec_enable_chroot_findtask,
55863+ .maxlen = sizeof(int),
55864+ .mode = 0600,
55865+ .proc_handler = &proc_dointvec,
55866+ },
55867+#endif
55868+#ifdef CONFIG_GRKERNSEC_RESLOG
55869+ {
55870+ .ctl_name = CTL_UNNUMBERED,
55871+ .procname = "resource_logging",
55872+ .data = &grsec_resource_logging,
55873+ .maxlen = sizeof(int),
55874+ .mode = 0600,
55875+ .proc_handler = &proc_dointvec,
55876+ },
55877+#endif
55878+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
55879+ {
55880+ .ctl_name = CTL_UNNUMBERED,
55881+ .procname = "audit_ptrace",
55882+ .data = &grsec_enable_audit_ptrace,
55883+ .maxlen = sizeof(int),
55884+ .mode = 0600,
55885+ .proc_handler = &proc_dointvec,
55886+ },
55887+#endif
55888+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
55889+ {
55890+ .ctl_name = CTL_UNNUMBERED,
55891+ .procname = "harden_ptrace",
55892+ .data = &grsec_enable_harden_ptrace,
55893+ .maxlen = sizeof(int),
55894+ .mode = 0600,
55895+ .proc_handler = &proc_dointvec,
55896+ },
55897+#endif
55898+ {
55899+ .ctl_name = CTL_UNNUMBERED,
55900+ .procname = "grsec_lock",
55901+ .data = &grsec_lock,
55902+ .maxlen = sizeof(int),
55903+ .mode = 0600,
55904+ .proc_handler = &proc_dointvec,
55905+ },
55906+#endif
55907+#ifdef CONFIG_GRKERNSEC_ROFS
55908+ {
55909+ .ctl_name = CTL_UNNUMBERED,
55910+ .procname = "romount_protect",
55911+ .data = &grsec_enable_rofs,
55912+ .maxlen = sizeof(int),
55913+ .mode = 0600,
55914+ .proc_handler = &proc_dointvec_minmax,
55915+ .extra1 = &one,
55916+ .extra2 = &one,
55917+ },
55918+#endif
55919+ { .ctl_name = 0 }
55920+};
55921+#endif
55922diff -urNp linux-2.6.32.43/grsecurity/grsec_time.c linux-2.6.32.43/grsecurity/grsec_time.c
55923--- linux-2.6.32.43/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
55924+++ linux-2.6.32.43/grsecurity/grsec_time.c 2011-04-17 15:56:46.000000000 -0400
55925@@ -0,0 +1,16 @@
55926+#include <linux/kernel.h>
55927+#include <linux/sched.h>
55928+#include <linux/grinternal.h>
55929+#include <linux/module.h>
55930+
55931+void
55932+gr_log_timechange(void)
55933+{
55934+#ifdef CONFIG_GRKERNSEC_TIME
55935+ if (grsec_enable_time)
55936+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
55937+#endif
55938+ return;
55939+}
55940+
55941+EXPORT_SYMBOL(gr_log_timechange);
55942diff -urNp linux-2.6.32.43/grsecurity/grsec_tpe.c linux-2.6.32.43/grsecurity/grsec_tpe.c
55943--- linux-2.6.32.43/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
55944+++ linux-2.6.32.43/grsecurity/grsec_tpe.c 2011-04-17 15:56:46.000000000 -0400
55945@@ -0,0 +1,39 @@
55946+#include <linux/kernel.h>
55947+#include <linux/sched.h>
55948+#include <linux/file.h>
55949+#include <linux/fs.h>
55950+#include <linux/grinternal.h>
55951+
55952+extern int gr_acl_tpe_check(void);
55953+
55954+int
55955+gr_tpe_allow(const struct file *file)
55956+{
55957+#ifdef CONFIG_GRKERNSEC
55958+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
55959+ const struct cred *cred = current_cred();
55960+
55961+ if (cred->uid && ((grsec_enable_tpe &&
55962+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
55963+ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) ||
55964+ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid)))
55965+#else
55966+ in_group_p(grsec_tpe_gid)
55967+#endif
55968+ ) || gr_acl_tpe_check()) &&
55969+ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
55970+ (inode->i_mode & S_IWOTH))))) {
55971+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55972+ return 0;
55973+ }
55974+#ifdef CONFIG_GRKERNSEC_TPE_ALL
55975+ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
55976+ ((inode->i_uid && (inode->i_uid != cred->uid)) ||
55977+ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
55978+ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt);
55979+ return 0;
55980+ }
55981+#endif
55982+#endif
55983+ return 1;
55984+}
55985diff -urNp linux-2.6.32.43/grsecurity/grsum.c linux-2.6.32.43/grsecurity/grsum.c
55986--- linux-2.6.32.43/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
55987+++ linux-2.6.32.43/grsecurity/grsum.c 2011-04-17 15:56:46.000000000 -0400
55988@@ -0,0 +1,61 @@
55989+#include <linux/err.h>
55990+#include <linux/kernel.h>
55991+#include <linux/sched.h>
55992+#include <linux/mm.h>
55993+#include <linux/scatterlist.h>
55994+#include <linux/crypto.h>
55995+#include <linux/gracl.h>
55996+
55997+
55998+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
55999+#error "crypto and sha256 must be built into the kernel"
56000+#endif
56001+
56002+int
56003+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
56004+{
56005+ char *p;
56006+ struct crypto_hash *tfm;
56007+ struct hash_desc desc;
56008+ struct scatterlist sg;
56009+ unsigned char temp_sum[GR_SHA_LEN];
56010+ volatile int retval = 0;
56011+ volatile int dummy = 0;
56012+ unsigned int i;
56013+
56014+ sg_init_table(&sg, 1);
56015+
56016+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
56017+ if (IS_ERR(tfm)) {
56018+ /* should never happen, since sha256 should be built in */
56019+ return 1;
56020+ }
56021+
56022+ desc.tfm = tfm;
56023+ desc.flags = 0;
56024+
56025+ crypto_hash_init(&desc);
56026+
56027+ p = salt;
56028+ sg_set_buf(&sg, p, GR_SALT_LEN);
56029+ crypto_hash_update(&desc, &sg, sg.length);
56030+
56031+ p = entry->pw;
56032+ sg_set_buf(&sg, p, strlen(p));
56033+
56034+ crypto_hash_update(&desc, &sg, sg.length);
56035+
56036+ crypto_hash_final(&desc, temp_sum);
56037+
56038+ memset(entry->pw, 0, GR_PW_LEN);
56039+
56040+ for (i = 0; i < GR_SHA_LEN; i++)
56041+ if (sum[i] != temp_sum[i])
56042+ retval = 1;
56043+ else
56044+ dummy = 1; // waste a cycle
56045+
56046+ crypto_free_hash(tfm);
56047+
56048+ return retval;
56049+}
56050diff -urNp linux-2.6.32.43/grsecurity/Kconfig linux-2.6.32.43/grsecurity/Kconfig
56051--- linux-2.6.32.43/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
56052+++ linux-2.6.32.43/grsecurity/Kconfig 2011-07-06 19:57:57.000000000 -0400
56053@@ -0,0 +1,1047 @@
56054+#
56055+# grecurity configuration
56056+#
56057+
56058+menu "Grsecurity"
56059+
56060+config GRKERNSEC
56061+ bool "Grsecurity"
56062+ select CRYPTO
56063+ select CRYPTO_SHA256
56064+ help
56065+ If you say Y here, you will be able to configure many features
56066+ that will enhance the security of your system. It is highly
56067+ recommended that you say Y here and read through the help
56068+ for each option so that you fully understand the features and
56069+ can evaluate their usefulness for your machine.
56070+
56071+choice
56072+ prompt "Security Level"
56073+ depends on GRKERNSEC
56074+ default GRKERNSEC_CUSTOM
56075+
56076+config GRKERNSEC_LOW
56077+ bool "Low"
56078+ select GRKERNSEC_LINK
56079+ select GRKERNSEC_FIFO
56080+ select GRKERNSEC_EXECVE
56081+ select GRKERNSEC_RANDNET
56082+ select GRKERNSEC_DMESG
56083+ select GRKERNSEC_CHROOT
56084+ select GRKERNSEC_CHROOT_CHDIR
56085+
56086+ help
56087+ If you choose this option, several of the grsecurity options will
56088+ be enabled that will give you greater protection against a number
56089+ of attacks, while assuring that none of your software will have any
56090+ conflicts with the additional security measures. If you run a lot
56091+ of unusual software, or you are having problems with the higher
56092+ security levels, you should say Y here. With this option, the
56093+ following features are enabled:
56094+
56095+ - Linking restrictions
56096+ - FIFO restrictions
56097+ - Enforcing RLIMIT_NPROC on execve
56098+ - Restricted dmesg
56099+ - Enforced chdir("/") on chroot
56100+ - Runtime module disabling
56101+
56102+config GRKERNSEC_MEDIUM
56103+ bool "Medium"
56104+ select PAX
56105+ select PAX_EI_PAX
56106+ select PAX_PT_PAX_FLAGS
56107+ select PAX_HAVE_ACL_FLAGS
56108+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56109+ select GRKERNSEC_CHROOT
56110+ select GRKERNSEC_CHROOT_SYSCTL
56111+ select GRKERNSEC_LINK
56112+ select GRKERNSEC_FIFO
56113+ select GRKERNSEC_EXECVE
56114+ select GRKERNSEC_DMESG
56115+ select GRKERNSEC_RANDNET
56116+ select GRKERNSEC_FORKFAIL
56117+ select GRKERNSEC_TIME
56118+ select GRKERNSEC_SIGNAL
56119+ select GRKERNSEC_CHROOT
56120+ select GRKERNSEC_CHROOT_UNIX
56121+ select GRKERNSEC_CHROOT_MOUNT
56122+ select GRKERNSEC_CHROOT_PIVOT
56123+ select GRKERNSEC_CHROOT_DOUBLE
56124+ select GRKERNSEC_CHROOT_CHDIR
56125+ select GRKERNSEC_CHROOT_MKNOD
56126+ select GRKERNSEC_PROC
56127+ select GRKERNSEC_PROC_USERGROUP
56128+ select PAX_RANDUSTACK
56129+ select PAX_ASLR
56130+ select PAX_RANDMMAP
56131+ select PAX_REFCOUNT if (X86 || SPARC64)
56132+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56133+
56134+ help
56135+ If you say Y here, several features in addition to those included
56136+ in the low additional security level will be enabled. These
56137+ features provide even more security to your system, though in rare
56138+ cases they may be incompatible with very old or poorly written
56139+ software. If you enable this option, make sure that your auth
56140+ service (identd) is running as gid 1001. With this option,
56141+ the following features (in addition to those provided in the
56142+ low additional security level) will be enabled:
56143+
56144+ - Failed fork logging
56145+ - Time change logging
56146+ - Signal logging
56147+ - Deny mounts in chroot
56148+ - Deny double chrooting
56149+ - Deny sysctl writes in chroot
56150+ - Deny mknod in chroot
56151+ - Deny access to abstract AF_UNIX sockets out of chroot
56152+ - Deny pivot_root in chroot
56153+ - Denied writes of /dev/kmem, /dev/mem, and /dev/port
56154+ - /proc restrictions with special GID set to 10 (usually wheel)
56155+ - Address Space Layout Randomization (ASLR)
56156+ - Prevent exploitation of most refcount overflows
56157+ - Bounds checking of copying between the kernel and userland
56158+
56159+config GRKERNSEC_HIGH
56160+ bool "High"
56161+ select GRKERNSEC_LINK
56162+ select GRKERNSEC_FIFO
56163+ select GRKERNSEC_EXECVE
56164+ select GRKERNSEC_DMESG
56165+ select GRKERNSEC_FORKFAIL
56166+ select GRKERNSEC_TIME
56167+ select GRKERNSEC_SIGNAL
56168+ select GRKERNSEC_CHROOT
56169+ select GRKERNSEC_CHROOT_SHMAT
56170+ select GRKERNSEC_CHROOT_UNIX
56171+ select GRKERNSEC_CHROOT_MOUNT
56172+ select GRKERNSEC_CHROOT_FCHDIR
56173+ select GRKERNSEC_CHROOT_PIVOT
56174+ select GRKERNSEC_CHROOT_DOUBLE
56175+ select GRKERNSEC_CHROOT_CHDIR
56176+ select GRKERNSEC_CHROOT_MKNOD
56177+ select GRKERNSEC_CHROOT_CAPS
56178+ select GRKERNSEC_CHROOT_SYSCTL
56179+ select GRKERNSEC_CHROOT_FINDTASK
56180+ select GRKERNSEC_SYSFS_RESTRICT
56181+ select GRKERNSEC_PROC
56182+ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR)
56183+ select GRKERNSEC_HIDESYM
56184+ select GRKERNSEC_BRUTE
56185+ select GRKERNSEC_PROC_USERGROUP
56186+ select GRKERNSEC_KMEM
56187+ select GRKERNSEC_RESLOG
56188+ select GRKERNSEC_RANDNET
56189+ select GRKERNSEC_PROC_ADD
56190+ select GRKERNSEC_CHROOT_CHMOD
56191+ select GRKERNSEC_CHROOT_NICE
56192+ select GRKERNSEC_AUDIT_MOUNT
56193+ select GRKERNSEC_MODHARDEN if (MODULES)
56194+ select GRKERNSEC_HARDEN_PTRACE
56195+ select GRKERNSEC_VM86 if (X86_32)
56196+ select GRKERNSEC_KERN_LOCKOUT if (X86 || ARM || PPC || SPARC)
56197+ select PAX
56198+ select PAX_RANDUSTACK
56199+ select PAX_ASLR
56200+ select PAX_RANDMMAP
56201+ select PAX_NOEXEC
56202+ select PAX_MPROTECT
56203+ select PAX_EI_PAX
56204+ select PAX_PT_PAX_FLAGS
56205+ select PAX_HAVE_ACL_FLAGS
56206+ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN)
56207+ select PAX_MEMORY_UDEREF if (X86 && !XEN)
56208+ select PAX_RANDKSTACK if (X86_TSC && X86)
56209+ select PAX_SEGMEXEC if (X86_32)
56210+ select PAX_PAGEEXEC
56211+ select PAX_EMUPLT if (ALPHA || PARISC || SPARC)
56212+ select PAX_EMUTRAMP if (PARISC)
56213+ select PAX_EMUSIGRT if (PARISC)
56214+ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC)
56215+ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86))
56216+ select PAX_REFCOUNT if (X86 || SPARC64)
56217+ select PAX_USERCOPY if ((X86 || SPARC || PPC || ARM) && (SLAB || SLUB || SLOB))
56218+ help
56219+ If you say Y here, many of the features of grsecurity will be
56220+ enabled, which will protect you against many kinds of attacks
56221+ against your system. The heightened security comes at a cost
56222+ of an increased chance of incompatibilities with rare software
56223+ on your machine. Since this security level enables PaX, you should
56224+ view <http://pax.grsecurity.net> and read about the PaX
56225+ project. While you are there, download chpax and run it on
56226+ binaries that cause problems with PaX. Also remember that
56227+ since the /proc restrictions are enabled, you must run your
56228+ identd as gid 1001. This security level enables the following
56229+ features in addition to those listed in the low and medium
56230+ security levels:
56231+
56232+ - Additional /proc restrictions
56233+ - Chmod restrictions in chroot
56234+ - No signals, ptrace, or viewing of processes outside of chroot
56235+ - Capability restrictions in chroot
56236+ - Deny fchdir out of chroot
56237+ - Priority restrictions in chroot
56238+ - Segmentation-based implementation of PaX
56239+ - Mprotect restrictions
56240+ - Removal of addresses from /proc/<pid>/[smaps|maps|stat]
56241+ - Kernel stack randomization
56242+ - Mount/unmount/remount logging
56243+ - Kernel symbol hiding
56244+ - Prevention of memory exhaustion-based exploits
56245+ - Hardening of module auto-loading
56246+ - Ptrace restrictions
56247+ - Restricted vm86 mode
56248+ - Restricted sysfs/debugfs
56249+ - Active kernel exploit response
56250+
56251+config GRKERNSEC_CUSTOM
56252+ bool "Custom"
56253+ help
56254+ If you say Y here, you will be able to configure every grsecurity
56255+ option, which allows you to enable many more features that aren't
56256+ covered in the basic security levels. These additional features
56257+ include TPE, socket restrictions, and the sysctl system for
56258+ grsecurity. It is advised that you read through the help for
56259+ each option to determine its usefulness in your situation.
56260+
56261+endchoice
56262+
56263+menu "Address Space Protection"
56264+depends on GRKERNSEC
56265+
56266+config GRKERNSEC_KMEM
56267+ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
56268+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
56269+ help
56270+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
56271+ be written to via mmap or otherwise to modify the running kernel.
56272+ /dev/port will also not be allowed to be opened. If you have module
56273+ support disabled, enabling this will close up four ways that are
56274+ currently used to insert malicious code into the running kernel.
56275+ Even with all these features enabled, we still highly recommend that
56276+ you use the RBAC system, as it is still possible for an attacker to
56277+ modify the running kernel through privileged I/O granted by ioperm/iopl.
56278+ If you are not using XFree86, you may be able to stop this additional
56279+ case by enabling the 'Disable privileged I/O' option. Though nothing
56280+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
56281+ but only to video memory, which is the only writing we allow in this
56282+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
56283+ not be allowed to mprotect it with PROT_WRITE later.
56284+ It is highly recommended that you say Y here if you meet all the
56285+ conditions above.
56286+
56287+config GRKERNSEC_VM86
56288+ bool "Restrict VM86 mode"
56289+ depends on X86_32
56290+
56291+ help
56292+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
56293+ make use of a special execution mode on 32bit x86 processors called
56294+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
56295+ video cards and will still work with this option enabled. The purpose
56296+ of the option is to prevent exploitation of emulation errors in
56297+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
56298+ Nearly all users should be able to enable this option.
56299+
56300+config GRKERNSEC_IO
56301+ bool "Disable privileged I/O"
56302+ depends on X86
56303+ select RTC_CLASS
56304+ select RTC_INTF_DEV
56305+ select RTC_DRV_CMOS
56306+
56307+ help
56308+ If you say Y here, all ioperm and iopl calls will return an error.
56309+ Ioperm and iopl can be used to modify the running kernel.
56310+ Unfortunately, some programs need this access to operate properly,
56311+ the most notable of which are XFree86 and hwclock. hwclock can be
56312+ remedied by having RTC support in the kernel, so real-time
56313+ clock support is enabled if this option is enabled, to ensure
56314+ that hwclock operates correctly. XFree86 still will not
56315+ operate correctly with this option enabled, so DO NOT CHOOSE Y
56316+ IF YOU USE XFree86. If you use XFree86 and you still want to
56317+ protect your kernel against modification, use the RBAC system.
56318+
56319+config GRKERNSEC_PROC_MEMMAP
56320+ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]"
56321+ default y if (PAX_NOEXEC || PAX_ASLR)
56322+ depends on PAX_NOEXEC || PAX_ASLR
56323+ help
56324+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
56325+ give no information about the addresses of its mappings if
56326+ PaX features that rely on random addresses are enabled on the task.
56327+ If you use PaX it is greatly recommended that you say Y here as it
56328+ closes up a hole that makes the full ASLR useless for suid
56329+ binaries.
56330+
56331+config GRKERNSEC_BRUTE
56332+ bool "Deter exploit bruteforcing"
56333+ help
56334+ If you say Y here, attempts to bruteforce exploits against forking
56335+ daemons such as apache or sshd, as well as against suid/sgid binaries
56336+ will be deterred. When a child of a forking daemon is killed by PaX
56337+ or crashes due to an illegal instruction or other suspicious signal,
56338+ the parent process will be delayed 30 seconds upon every subsequent
56339+ fork until the administrator is able to assess the situation and
56340+ restart the daemon.
56341+ In the suid/sgid case, the attempt is logged, the user has all their
56342+ processes terminated, and they are prevented from executing any further
56343+ processes for 15 minutes.
56344+ It is recommended that you also enable signal logging in the auditing
56345+ section so that logs are generated when a process triggers a suspicious
56346+ signal.
56347+ If the sysctl option is enabled, a sysctl option with name
56348+ "deter_bruteforce" is created.
56349+
56350+config GRKERNSEC_MODHARDEN
56351+ bool "Harden module auto-loading"
56352+ depends on MODULES
56353+ help
56354+ If you say Y here, module auto-loading in response to use of some
56355+ feature implemented by an unloaded module will be restricted to
56356+ root users. Enabling this option helps defend against attacks
56357+ by unprivileged users who abuse the auto-loading behavior to
56358+ cause a vulnerable module to load that is then exploited.
56359+
56360+ If this option prevents a legitimate use of auto-loading for a
56361+ non-root user, the administrator can execute modprobe manually
56362+ with the exact name of the module mentioned in the alert log.
56363+ Alternatively, the administrator can add the module to the list
56364+ of modules loaded at boot by modifying init scripts.
56365+
56366+ Modification of init scripts will most likely be needed on
56367+ Ubuntu servers with encrypted home directory support enabled,
56368+ as the first non-root user logging in will cause the ecb(aes),
56369+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
56370+
56371+config GRKERNSEC_HIDESYM
56372+ bool "Hide kernel symbols"
56373+ help
56374+ If you say Y here, getting information on loaded modules, and
56375+ displaying all kernel symbols through a syscall will be restricted
56376+ to users with CAP_SYS_MODULE. For software compatibility reasons,
56377+ /proc/kallsyms will be restricted to the root user. The RBAC
56378+ system can hide that entry even from root.
56379+
56380+ This option also prevents leaking of kernel addresses through
56381+ several /proc entries.
56382+
56383+ Note that this option is only effective provided the following
56384+ conditions are met:
56385+ 1) The kernel using grsecurity is not precompiled by some distribution
56386+ 2) You have also enabled GRKERNSEC_DMESG
56387+ 3) You are using the RBAC system and hiding other files such as your
56388+ kernel image and System.map. Alternatively, enabling this option
56389+ causes the permissions on /boot, /lib/modules, and the kernel
56390+ source directory to change at compile time to prevent
56391+ reading by non-root users.
56392+ If the above conditions are met, this option will aid in providing a
56393+ useful protection against local kernel exploitation of overflows
56394+ and arbitrary read/write vulnerabilities.
56395+
56396+config GRKERNSEC_KERN_LOCKOUT
56397+ bool "Active kernel exploit response"
56398+ depends on X86 || ARM || PPC || SPARC
56399+ help
56400+ If you say Y here, when a PaX alert is triggered due to suspicious
56401+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
56402+ or an OOPs occurs due to bad memory accesses, instead of just
56403+ terminating the offending process (and potentially allowing
56404+ a subsequent exploit from the same user), we will take one of two
56405+ actions:
56406+ If the user was root, we will panic the system
56407+ If the user was non-root, we will log the attempt, terminate
56408+ all processes owned by the user, then prevent them from creating
56409+ any new processes until the system is restarted
56410+ This deters repeated kernel exploitation/bruteforcing attempts
56411+ and is useful for later forensics.
56412+
56413+endmenu
56414+menu "Role Based Access Control Options"
56415+depends on GRKERNSEC
56416+
56417+config GRKERNSEC_RBAC_DEBUG
56418+ bool
56419+
56420+config GRKERNSEC_NO_RBAC
56421+ bool "Disable RBAC system"
56422+ help
56423+ If you say Y here, the /dev/grsec device will be removed from the kernel,
56424+ preventing the RBAC system from being enabled. You should only say Y
56425+ here if you have no intention of using the RBAC system, so as to prevent
56426+ an attacker with root access from misusing the RBAC system to hide files
56427+ and processes when loadable module support and /dev/[k]mem have been
56428+ locked down.
56429+
56430+config GRKERNSEC_ACL_HIDEKERN
56431+ bool "Hide kernel processes"
56432+ help
56433+ If you say Y here, all kernel threads will be hidden to all
56434+ processes but those whose subject has the "view hidden processes"
56435+ flag.
56436+
56437+config GRKERNSEC_ACL_MAXTRIES
56438+ int "Maximum tries before password lockout"
56439+ default 3
56440+ help
56441+ This option enforces the maximum number of times a user can attempt
56442+ to authorize themselves with the grsecurity RBAC system before being
56443+ denied the ability to attempt authorization again for a specified time.
56444+ The lower the number, the harder it will be to brute-force a password.
56445+
56446+config GRKERNSEC_ACL_TIMEOUT
56447+ int "Time to wait after max password tries, in seconds"
56448+ default 30
56449+ help
56450+ This option specifies the time the user must wait after attempting to
56451+ authorize to the RBAC system with the maximum number of invalid
56452+ passwords. The higher the number, the harder it will be to brute-force
56453+ a password.
56454+
56455+endmenu
56456+menu "Filesystem Protections"
56457+depends on GRKERNSEC
56458+
56459+config GRKERNSEC_PROC
56460+ bool "Proc restrictions"
56461+ help
56462+ If you say Y here, the permissions of the /proc filesystem
56463+ will be altered to enhance system security and privacy. You MUST
56464+ choose either a user only restriction or a user and group restriction.
56465+ Depending upon the option you choose, you can either restrict users to
56466+ see only the processes they themselves run, or choose a group that can
56467+ view all processes and files normally restricted to root if you choose
56468+ the "restrict to user only" option. NOTE: If you're running identd as
56469+ a non-root user, you will have to run it as the group you specify here.
56470+
56471+config GRKERNSEC_PROC_USER
56472+ bool "Restrict /proc to user only"
56473+ depends on GRKERNSEC_PROC
56474+ help
56475+ If you say Y here, non-root users will only be able to view their own
56476+ processes, and restricts them from viewing network-related information,
56477+ and viewing kernel symbol and module information.
56478+
56479+config GRKERNSEC_PROC_USERGROUP
56480+ bool "Allow special group"
56481+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
56482+ help
56483+ If you say Y here, you will be able to select a group that will be
56484+ able to view all processes and network-related information. If you've
56485+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
56486+ remain hidden. This option is useful if you want to run identd as
56487+ a non-root user.
56488+
56489+config GRKERNSEC_PROC_GID
56490+ int "GID for special group"
56491+ depends on GRKERNSEC_PROC_USERGROUP
56492+ default 1001
56493+
56494+config GRKERNSEC_PROC_ADD
56495+ bool "Additional restrictions"
56496+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
56497+ help
56498+ If you say Y here, additional restrictions will be placed on
56499+ /proc that keep normal users from viewing device information and
56500+ slabinfo information that could be useful for exploits.
56501+
56502+config GRKERNSEC_LINK
56503+ bool "Linking restrictions"
56504+ help
56505+ If you say Y here, /tmp race exploits will be prevented, since users
56506+ will no longer be able to follow symlinks owned by other users in
56507+ world-writable +t directories (e.g. /tmp), unless the owner of the
56508+ symlink is the owner of the directory. users will also not be
56509+ able to hardlink to files they do not own. If the sysctl option is
56510+ enabled, a sysctl option with name "linking_restrictions" is created.
56511+
56512+config GRKERNSEC_FIFO
56513+ bool "FIFO restrictions"
56514+ help
56515+ If you say Y here, users will not be able to write to FIFOs they don't
56516+ own in world-writable +t directories (e.g. /tmp), unless the owner of
56517+ the FIFO is the same owner of the directory it's held in. If the sysctl
56518+ option is enabled, a sysctl option with name "fifo_restrictions" is
56519+ created.
56520+
56521+config GRKERNSEC_SYSFS_RESTRICT
56522+ bool "Sysfs/debugfs restriction"
56523+ depends on SYSFS
56524+ help
56525+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
56526+ any filesystem normally mounted under it (e.g. debugfs) will only
56527+ be accessible by root. These filesystems generally provide access
56528+ to hardware and debug information that isn't appropriate for unprivileged
56529+ users of the system. Sysfs and debugfs have also become a large source
56530+ of new vulnerabilities, ranging from infoleaks to local compromise.
56531+ There has been very little oversight with an eye toward security involved
56532+ in adding new exporters of information to these filesystems, so their
56533+ use is discouraged.
56534+ This option is equivalent to a chmod 0700 of the mount paths.
56535+
56536+config GRKERNSEC_ROFS
56537+ bool "Runtime read-only mount protection"
56538+ help
56539+ If you say Y here, a sysctl option with name "romount_protect" will
56540+ be created. By setting this option to 1 at runtime, filesystems
56541+ will be protected in the following ways:
56542+ * No new writable mounts will be allowed
56543+ * Existing read-only mounts won't be able to be remounted read/write
56544+ * Write operations will be denied on all block devices
56545+ This option acts independently of grsec_lock: once it is set to 1,
56546+ it cannot be turned off. Therefore, please be mindful of the resulting
56547+ behavior if this option is enabled in an init script on a read-only
56548+ filesystem. This feature is mainly intended for secure embedded systems.
56549+
56550+config GRKERNSEC_CHROOT
56551+ bool "Chroot jail restrictions"
56552+ help
56553+ If you say Y here, you will be able to choose several options that will
56554+ make breaking out of a chrooted jail much more difficult. If you
56555+ encounter no software incompatibilities with the following options, it
56556+ is recommended that you enable each one.
56557+
56558+config GRKERNSEC_CHROOT_MOUNT
56559+ bool "Deny mounts"
56560+ depends on GRKERNSEC_CHROOT
56561+ help
56562+ If you say Y here, processes inside a chroot will not be able to
56563+ mount or remount filesystems. If the sysctl option is enabled, a
56564+ sysctl option with name "chroot_deny_mount" is created.
56565+
56566+config GRKERNSEC_CHROOT_DOUBLE
56567+ bool "Deny double-chroots"
56568+ depends on GRKERNSEC_CHROOT
56569+ help
56570+ If you say Y here, processes inside a chroot will not be able to chroot
56571+ again outside the chroot. This is a widely used method of breaking
56572+ out of a chroot jail and should not be allowed. If the sysctl
56573+ option is enabled, a sysctl option with name
56574+ "chroot_deny_chroot" is created.
56575+
56576+config GRKERNSEC_CHROOT_PIVOT
56577+ bool "Deny pivot_root in chroot"
56578+ depends on GRKERNSEC_CHROOT
56579+ help
56580+ If you say Y here, processes inside a chroot will not be able to use
56581+ a function called pivot_root() that was introduced in Linux 2.3.41. It
56582+ works similar to chroot in that it changes the root filesystem. This
56583+ function could be misused in a chrooted process to attempt to break out
56584+ of the chroot, and therefore should not be allowed. If the sysctl
56585+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
56586+ created.
56587+
56588+config GRKERNSEC_CHROOT_CHDIR
56589+ bool "Enforce chdir(\"/\") on all chroots"
56590+ depends on GRKERNSEC_CHROOT
56591+ help
56592+ If you say Y here, the current working directory of all newly-chrooted
56593+ applications will be set to the the root directory of the chroot.
56594+ The man page on chroot(2) states:
56595+ Note that this call does not change the current working
56596+ directory, so that `.' can be outside the tree rooted at
56597+ `/'. In particular, the super-user can escape from a
56598+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
56599+
56600+ It is recommended that you say Y here, since it's not known to break
56601+ any software. If the sysctl option is enabled, a sysctl option with
56602+ name "chroot_enforce_chdir" is created.
56603+
56604+config GRKERNSEC_CHROOT_CHMOD
56605+ bool "Deny (f)chmod +s"
56606+ depends on GRKERNSEC_CHROOT
56607+ help
56608+ If you say Y here, processes inside a chroot will not be able to chmod
56609+ or fchmod files to make them have suid or sgid bits. This protects
56610+ against another published method of breaking a chroot. If the sysctl
56611+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
56612+ created.
56613+
56614+config GRKERNSEC_CHROOT_FCHDIR
56615+ bool "Deny fchdir out of chroot"
56616+ depends on GRKERNSEC_CHROOT
56617+ help
56618+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
56619+ to a file descriptor of the chrooting process that points to a directory
56620+ outside the filesystem will be stopped. If the sysctl option
56621+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
56622+
56623+config GRKERNSEC_CHROOT_MKNOD
56624+ bool "Deny mknod"
56625+ depends on GRKERNSEC_CHROOT
56626+ help
56627+ If you say Y here, processes inside a chroot will not be allowed to
56628+ mknod. The problem with using mknod inside a chroot is that it
56629+ would allow an attacker to create a device entry that is the same
56630+ as one on the physical root of your system, which could range from
56631+ anything from the console device to a device for your harddrive (which
56632+ they could then use to wipe the drive or steal data). It is recommended
56633+ that you say Y here, unless you run into software incompatibilities.
56634+ If the sysctl option is enabled, a sysctl option with name
56635+ "chroot_deny_mknod" is created.
56636+
56637+config GRKERNSEC_CHROOT_SHMAT
56638+ bool "Deny shmat() out of chroot"
56639+ depends on GRKERNSEC_CHROOT
56640+ help
56641+ If you say Y here, processes inside a chroot will not be able to attach
56642+ to shared memory segments that were created outside of the chroot jail.
56643+ It is recommended that you say Y here. If the sysctl option is enabled,
56644+ a sysctl option with name "chroot_deny_shmat" is created.
56645+
56646+config GRKERNSEC_CHROOT_UNIX
56647+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
56648+ depends on GRKERNSEC_CHROOT
56649+ help
56650+ If you say Y here, processes inside a chroot will not be able to
56651+ connect to abstract (meaning not belonging to a filesystem) Unix
56652+ domain sockets that were bound outside of a chroot. It is recommended
56653+ that you say Y here. If the sysctl option is enabled, a sysctl option
56654+ with name "chroot_deny_unix" is created.
56655+
56656+config GRKERNSEC_CHROOT_FINDTASK
56657+ bool "Protect outside processes"
56658+ depends on GRKERNSEC_CHROOT
56659+ help
56660+ If you say Y here, processes inside a chroot will not be able to
56661+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
56662+ getsid, or view any process outside of the chroot. If the sysctl
56663+ option is enabled, a sysctl option with name "chroot_findtask" is
56664+ created.
56665+
56666+config GRKERNSEC_CHROOT_NICE
56667+ bool "Restrict priority changes"
56668+ depends on GRKERNSEC_CHROOT
56669+ help
56670+ If you say Y here, processes inside a chroot will not be able to raise
56671+ the priority of processes in the chroot, or alter the priority of
56672+ processes outside the chroot. This provides more security than simply
56673+ removing CAP_SYS_NICE from the process' capability set. If the
56674+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
56675+ is created.
56676+
56677+config GRKERNSEC_CHROOT_SYSCTL
56678+ bool "Deny sysctl writes"
56679+ depends on GRKERNSEC_CHROOT
56680+ help
56681+ If you say Y here, an attacker in a chroot will not be able to
56682+ write to sysctl entries, either by sysctl(2) or through a /proc
56683+ interface. It is strongly recommended that you say Y here. If the
56684+ sysctl option is enabled, a sysctl option with name
56685+ "chroot_deny_sysctl" is created.
56686+
56687+config GRKERNSEC_CHROOT_CAPS
56688+ bool "Capability restrictions"
56689+ depends on GRKERNSEC_CHROOT
56690+ help
56691+ If you say Y here, the capabilities on all root processes within a
56692+ chroot jail will be lowered to stop module insertion, raw i/o,
56693+ system and net admin tasks, rebooting the system, modifying immutable
56694+ files, modifying IPC owned by another, and changing the system time.
56695+ This is left an option because it can break some apps. Disable this
56696+ if your chrooted apps are having problems performing those kinds of
56697+ tasks. If the sysctl option is enabled, a sysctl option with
56698+ name "chroot_caps" is created.
56699+
56700+endmenu
56701+menu "Kernel Auditing"
56702+depends on GRKERNSEC
56703+
56704+config GRKERNSEC_AUDIT_GROUP
56705+ bool "Single group for auditing"
56706+ help
56707+ If you say Y here, the exec, chdir, and (un)mount logging features
56708+ will only operate on a group you specify. This option is recommended
56709+ if you only want to watch certain users instead of having a large
56710+ amount of logs from the entire system. If the sysctl option is enabled,
56711+ a sysctl option with name "audit_group" is created.
56712+
56713+config GRKERNSEC_AUDIT_GID
56714+ int "GID for auditing"
56715+ depends on GRKERNSEC_AUDIT_GROUP
56716+ default 1007
56717+
56718+config GRKERNSEC_EXECLOG
56719+ bool "Exec logging"
56720+ help
56721+ If you say Y here, all execve() calls will be logged (since the
56722+ other exec*() calls are frontends to execve(), all execution
56723+ will be logged). Useful for shell-servers that like to keep track
56724+ of their users. If the sysctl option is enabled, a sysctl option with
56725+ name "exec_logging" is created.
56726+ WARNING: This option when enabled will produce a LOT of logs, especially
56727+ on an active system.
56728+
56729+config GRKERNSEC_RESLOG
56730+ bool "Resource logging"
56731+ help
56732+ If you say Y here, all attempts to overstep resource limits will
56733+ be logged with the resource name, the requested size, and the current
56734+ limit. It is highly recommended that you say Y here. If the sysctl
56735+ option is enabled, a sysctl option with name "resource_logging" is
56736+ created. If the RBAC system is enabled, the sysctl value is ignored.
56737+
56738+config GRKERNSEC_CHROOT_EXECLOG
56739+ bool "Log execs within chroot"
56740+ help
56741+ If you say Y here, all executions inside a chroot jail will be logged
56742+ to syslog. This can cause a large amount of logs if certain
56743+ applications (eg. djb's daemontools) are installed on the system, and
56744+ is therefore left as an option. If the sysctl option is enabled, a
56745+ sysctl option with name "chroot_execlog" is created.
56746+
56747+config GRKERNSEC_AUDIT_PTRACE
56748+ bool "Ptrace logging"
56749+ help
56750+ If you say Y here, all attempts to attach to a process via ptrace
56751+ will be logged. If the sysctl option is enabled, a sysctl option
56752+ with name "audit_ptrace" is created.
56753+
56754+config GRKERNSEC_AUDIT_CHDIR
56755+ bool "Chdir logging"
56756+ help
56757+ If you say Y here, all chdir() calls will be logged. If the sysctl
56758+ option is enabled, a sysctl option with name "audit_chdir" is created.
56759+
56760+config GRKERNSEC_AUDIT_MOUNT
56761+ bool "(Un)Mount logging"
56762+ help
56763+ If you say Y here, all mounts and unmounts will be logged. If the
56764+ sysctl option is enabled, a sysctl option with name "audit_mount" is
56765+ created.
56766+
56767+config GRKERNSEC_SIGNAL
56768+ bool "Signal logging"
56769+ help
56770+ If you say Y here, certain important signals will be logged, such as
56771+ SIGSEGV, which will as a result inform you of when a error in a program
56772+ occurred, which in some cases could mean a possible exploit attempt.
56773+ If the sysctl option is enabled, a sysctl option with name
56774+ "signal_logging" is created.
56775+
56776+config GRKERNSEC_FORKFAIL
56777+ bool "Fork failure logging"
56778+ help
56779+ If you say Y here, all failed fork() attempts will be logged.
56780+ This could suggest a fork bomb, or someone attempting to overstep
56781+ their process limit. If the sysctl option is enabled, a sysctl option
56782+ with name "forkfail_logging" is created.
56783+
56784+config GRKERNSEC_TIME
56785+ bool "Time change logging"
56786+ help
56787+ If you say Y here, any changes of the system clock will be logged.
56788+ If the sysctl option is enabled, a sysctl option with name
56789+ "timechange_logging" is created.
56790+
56791+config GRKERNSEC_PROC_IPADDR
56792+ bool "/proc/<pid>/ipaddr support"
56793+ help
56794+ If you say Y here, a new entry will be added to each /proc/<pid>
56795+ directory that contains the IP address of the person using the task.
56796+ The IP is carried across local TCP and AF_UNIX stream sockets.
56797+ This information can be useful for IDS/IPSes to perform remote response
56798+ to a local attack. The entry is readable by only the owner of the
56799+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
56800+ the RBAC system), and thus does not create privacy concerns.
56801+
56802+config GRKERNSEC_RWXMAP_LOG
56803+ bool 'Denied RWX mmap/mprotect logging'
56804+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
56805+ help
56806+ If you say Y here, calls to mmap() and mprotect() with explicit
56807+ usage of PROT_WRITE and PROT_EXEC together will be logged when
56808+ denied by the PAX_MPROTECT feature. If the sysctl option is
56809+ enabled, a sysctl option with name "rwxmap_logging" is created.
56810+
56811+config GRKERNSEC_AUDIT_TEXTREL
56812+ bool 'ELF text relocations logging (READ HELP)'
56813+ depends on PAX_MPROTECT
56814+ help
56815+ If you say Y here, text relocations will be logged with the filename
56816+ of the offending library or binary. The purpose of the feature is
56817+ to help Linux distribution developers get rid of libraries and
56818+ binaries that need text relocations which hinder the future progress
56819+ of PaX. Only Linux distribution developers should say Y here, and
56820+ never on a production machine, as this option creates an information
56821+ leak that could aid an attacker in defeating the randomization of
56822+ a single memory region. If the sysctl option is enabled, a sysctl
56823+ option with name "audit_textrel" is created.
56824+
56825+endmenu
56826+
56827+menu "Executable Protections"
56828+depends on GRKERNSEC
56829+
56830+config GRKERNSEC_EXECVE
56831+ bool "Enforce RLIMIT_NPROC on execs"
56832+ help
56833+ If you say Y here, users with a resource limit on processes will
56834+ have the value checked during execve() calls. The current system
56835+ only checks the system limit during fork() calls. If the sysctl option
56836+ is enabled, a sysctl option with name "execve_limiting" is created.
56837+
56838+config GRKERNSEC_DMESG
56839+ bool "Dmesg(8) restriction"
56840+ help
56841+ If you say Y here, non-root users will not be able to use dmesg(8)
56842+ to view up to the last 4kb of messages in the kernel's log buffer.
56843+ The kernel's log buffer often contains kernel addresses and other
56844+ identifying information useful to an attacker in fingerprinting a
56845+ system for a targeted exploit.
56846+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
56847+ created.
56848+
56849+config GRKERNSEC_HARDEN_PTRACE
56850+ bool "Deter ptrace-based process snooping"
56851+ help
56852+ If you say Y here, TTY sniffers and other malicious monitoring
56853+ programs implemented through ptrace will be defeated. If you
56854+ have been using the RBAC system, this option has already been
56855+ enabled for several years for all users, with the ability to make
56856+ fine-grained exceptions.
56857+
56858+ This option only affects the ability of non-root users to ptrace
56859+ processes that are not a descendent of the ptracing process.
56860+ This means that strace ./binary and gdb ./binary will still work,
56861+ but attaching to arbitrary processes will not. If the sysctl
56862+ option is enabled, a sysctl option with name "harden_ptrace" is
56863+ created.
56864+
56865+config GRKERNSEC_TPE
56866+ bool "Trusted Path Execution (TPE)"
56867+ help
56868+ If you say Y here, you will be able to choose a gid to add to the
56869+ supplementary groups of users you want to mark as "untrusted."
56870+ These users will not be able to execute any files that are not in
56871+ root-owned directories writable only by root. If the sysctl option
56872+ is enabled, a sysctl option with name "tpe" is created.
56873+
56874+config GRKERNSEC_TPE_ALL
56875+ bool "Partially restrict all non-root users"
56876+ depends on GRKERNSEC_TPE
56877+ help
56878+ If you say Y here, all non-root users will be covered under
56879+ a weaker TPE restriction. This is separate from, and in addition to,
56880+ the main TPE options that you have selected elsewhere. Thus, if a
56881+ "trusted" GID is chosen, this restriction applies to even that GID.
56882+ Under this restriction, all non-root users will only be allowed to
56883+ execute files in directories they own that are not group or
56884+ world-writable, or in directories owned by root and writable only by
56885+ root. If the sysctl option is enabled, a sysctl option with name
56886+ "tpe_restrict_all" is created.
56887+
56888+config GRKERNSEC_TPE_INVERT
56889+ bool "Invert GID option"
56890+ depends on GRKERNSEC_TPE
56891+ help
56892+ If you say Y here, the group you specify in the TPE configuration will
56893+ decide what group TPE restrictions will be *disabled* for. This
56894+ option is useful if you want TPE restrictions to be applied to most
56895+ users on the system. If the sysctl option is enabled, a sysctl option
56896+ with name "tpe_invert" is created. Unlike other sysctl options, this
56897+ entry will default to on for backward-compatibility.
56898+
56899+config GRKERNSEC_TPE_GID
56900+ int "GID for untrusted users"
56901+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
56902+ default 1005
56903+ help
56904+ Setting this GID determines what group TPE restrictions will be
56905+ *enabled* for. If the sysctl option is enabled, a sysctl option
56906+ with name "tpe_gid" is created.
56907+
56908+config GRKERNSEC_TPE_GID
56909+ int "GID for trusted users"
56910+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
56911+ default 1005
56912+ help
56913+ Setting this GID determines what group TPE restrictions will be
56914+ *disabled* for. If the sysctl option is enabled, a sysctl option
56915+ with name "tpe_gid" is created.
56916+
56917+endmenu
56918+menu "Network Protections"
56919+depends on GRKERNSEC
56920+
56921+config GRKERNSEC_RANDNET
56922+ bool "Larger entropy pools"
56923+ help
56924+ If you say Y here, the entropy pools used for many features of Linux
56925+ and grsecurity will be doubled in size. Since several grsecurity
56926+ features use additional randomness, it is recommended that you say Y
56927+ here. Saying Y here has a similar effect as modifying
56928+ /proc/sys/kernel/random/poolsize.
56929+
56930+config GRKERNSEC_BLACKHOLE
56931+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
56932+ help
56933+ If you say Y here, neither TCP resets nor ICMP
56934+ destination-unreachable packets will be sent in response to packets
56935+ sent to ports for which no associated listening process exists.
56936+ This feature supports both IPV4 and IPV6 and exempts the
56937+ loopback interface from blackholing. Enabling this feature
56938+ makes a host more resilient to DoS attacks and reduces network
56939+ visibility against scanners.
56940+
56941+ The blackhole feature as-implemented is equivalent to the FreeBSD
56942+ blackhole feature, as it prevents RST responses to all packets, not
56943+ just SYNs. Under most application behavior this causes no
56944+ problems, but applications (like haproxy) may not close certain
56945+ connections in a way that cleanly terminates them on the remote
56946+ end, leaving the remote host in LAST_ACK state. Because of this
56947+ side-effect and to prevent intentional LAST_ACK DoSes, this
56948+ feature also adds automatic mitigation against such attacks.
56949+ The mitigation drastically reduces the amount of time a socket
56950+ can spend in LAST_ACK state. If you're using haproxy and not
56951+ all servers it connects to have this option enabled, consider
56952+ disabling this feature on the haproxy host.
56953+
56954+ If the sysctl option is enabled, two sysctl options with names
56955+ "ip_blackhole" and "lastack_retries" will be created.
56956+ While "ip_blackhole" takes the standard zero/non-zero on/off
56957+ toggle, "lastack_retries" uses the same kinds of values as
56958+ "tcp_retries1" and "tcp_retries2". The default value of 4
56959+ prevents a socket from lasting more than 45 seconds in LAST_ACK
56960+ state.
56961+
56962+config GRKERNSEC_SOCKET
56963+ bool "Socket restrictions"
56964+ help
56965+ If you say Y here, you will be able to choose from several options.
56966+ If you assign a GID on your system and add it to the supplementary
56967+ groups of users you want to restrict socket access to, this patch
56968+ will perform up to three things, based on the option(s) you choose.
56969+
56970+config GRKERNSEC_SOCKET_ALL
56971+ bool "Deny any sockets to group"
56972+ depends on GRKERNSEC_SOCKET
56973+ help
56974+ If you say Y here, you will be able to choose a GID of whose users will
56975+ be unable to connect to other hosts from your machine or run server
56976+ applications from your machine. If the sysctl option is enabled, a
56977+ sysctl option with name "socket_all" is created.
56978+
56979+config GRKERNSEC_SOCKET_ALL_GID
56980+ int "GID to deny all sockets for"
56981+ depends on GRKERNSEC_SOCKET_ALL
56982+ default 1004
56983+ help
56984+ Here you can choose the GID to disable socket access for. Remember to
56985+ add the users you want socket access disabled for to the GID
56986+ specified here. If the sysctl option is enabled, a sysctl option
56987+ with name "socket_all_gid" is created.
56988+
56989+config GRKERNSEC_SOCKET_CLIENT
56990+ bool "Deny client sockets to group"
56991+ depends on GRKERNSEC_SOCKET
56992+ help
56993+ If you say Y here, you will be able to choose a GID of whose users will
56994+ be unable to connect to other hosts from your machine, but will be
56995+ able to run servers. If this option is enabled, all users in the group
56996+ you specify will have to use passive mode when initiating ftp transfers
56997+ from the shell on your machine. If the sysctl option is enabled, a
56998+ sysctl option with name "socket_client" is created.
56999+
57000+config GRKERNSEC_SOCKET_CLIENT_GID
57001+ int "GID to deny client sockets for"
57002+ depends on GRKERNSEC_SOCKET_CLIENT
57003+ default 1003
57004+ help
57005+ Here you can choose the GID to disable client socket access for.
57006+ Remember to add the users you want client socket access disabled for to
57007+ the GID specified here. If the sysctl option is enabled, a sysctl
57008+ option with name "socket_client_gid" is created.
57009+
57010+config GRKERNSEC_SOCKET_SERVER
57011+ bool "Deny server sockets to group"
57012+ depends on GRKERNSEC_SOCKET
57013+ help
57014+ If you say Y here, you will be able to choose a GID of whose users will
57015+ be unable to run server applications from your machine. If the sysctl
57016+ option is enabled, a sysctl option with name "socket_server" is created.
57017+
57018+config GRKERNSEC_SOCKET_SERVER_GID
57019+ int "GID to deny server sockets for"
57020+ depends on GRKERNSEC_SOCKET_SERVER
57021+ default 1002
57022+ help
57023+ Here you can choose the GID to disable server socket access for.
57024+ Remember to add the users you want server socket access disabled for to
57025+ the GID specified here. If the sysctl option is enabled, a sysctl
57026+ option with name "socket_server_gid" is created.
57027+
57028+endmenu
57029+menu "Sysctl support"
57030+depends on GRKERNSEC && SYSCTL
57031+
57032+config GRKERNSEC_SYSCTL
57033+ bool "Sysctl support"
57034+ help
57035+ If you say Y here, you will be able to change the options that
57036+ grsecurity runs with at bootup, without having to recompile your
57037+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
57038+ to enable (1) or disable (0) various features. All the sysctl entries
57039+ are mutable until the "grsec_lock" entry is set to a non-zero value.
57040+ All features enabled in the kernel configuration are disabled at boot
57041+ if you do not say Y to the "Turn on features by default" option.
57042+ All options should be set at startup, and the grsec_lock entry should
57043+ be set to a non-zero value after all the options are set.
57044+ *THIS IS EXTREMELY IMPORTANT*
57045+
57046+config GRKERNSEC_SYSCTL_DISTRO
57047+ bool "Extra sysctl support for distro makers (READ HELP)"
57048+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
57049+ help
57050+ If you say Y here, additional sysctl options will be created
57051+ for features that affect processes running as root. Therefore,
57052+ it is critical when using this option that the grsec_lock entry be
57053+ enabled after boot. Only distros with prebuilt kernel packages
57054+ with this option enabled that can ensure grsec_lock is enabled
57055+ after boot should use this option.
57056+ *Failure to set grsec_lock after boot makes all grsec features
57057+ this option covers useless*
57058+
57059+ Currently this option creates the following sysctl entries:
57060+ "Disable Privileged I/O": "disable_priv_io"
57061+
57062+config GRKERNSEC_SYSCTL_ON
57063+ bool "Turn on features by default"
57064+ depends on GRKERNSEC_SYSCTL
57065+ help
57066+ If you say Y here, instead of having all features enabled in the
57067+ kernel configuration disabled at boot time, the features will be
57068+ enabled at boot time. It is recommended you say Y here unless
57069+ there is some reason you would want all sysctl-tunable features to
57070+ be disabled by default. As mentioned elsewhere, it is important
57071+ to enable the grsec_lock entry once you have finished modifying
57072+ the sysctl entries.
57073+
57074+endmenu
57075+menu "Logging Options"
57076+depends on GRKERNSEC
57077+
57078+config GRKERNSEC_FLOODTIME
57079+ int "Seconds in between log messages (minimum)"
57080+ default 10
57081+ help
57082+ This option allows you to enforce the number of seconds between
57083+ grsecurity log messages. The default should be suitable for most
57084+ people, however, if you choose to change it, choose a value small enough
57085+ to allow informative logs to be produced, but large enough to
57086+ prevent flooding.
57087+
57088+config GRKERNSEC_FLOODBURST
57089+ int "Number of messages in a burst (maximum)"
57090+ default 4
57091+ help
57092+ This option allows you to choose the maximum number of messages allowed
57093+ within the flood time interval you chose in a separate option. The
57094+ default should be suitable for most people, however if you find that
57095+ many of your logs are being interpreted as flooding, you may want to
57096+ raise this value.
57097+
57098+endmenu
57099+
57100+endmenu
57101diff -urNp linux-2.6.32.43/grsecurity/Makefile linux-2.6.32.43/grsecurity/Makefile
57102--- linux-2.6.32.43/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
57103+++ linux-2.6.32.43/grsecurity/Makefile 2011-05-24 20:27:46.000000000 -0400
57104@@ -0,0 +1,33 @@
57105+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
57106+# during 2001-2009 it has been completely redesigned by Brad Spengler
57107+# into an RBAC system
57108+#
57109+# All code in this directory and various hooks inserted throughout the kernel
57110+# are copyright Brad Spengler - Open Source Security, Inc., and released
57111+# under the GPL v2 or higher
57112+
57113+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
57114+ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
57115+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
57116+
57117+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
57118+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
57119+ gracl_learn.o grsec_log.o
57120+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
57121+
57122+ifdef CONFIG_NET
57123+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
57124+endif
57125+
57126+ifndef CONFIG_GRKERNSEC
57127+obj-y += grsec_disabled.o
57128+endif
57129+
57130+ifdef CONFIG_GRKERNSEC_HIDESYM
57131+extra-y := grsec_hidesym.o
57132+$(obj)/grsec_hidesym.o:
57133+ @-chmod -f 500 /boot
57134+ @-chmod -f 500 /lib/modules
57135+ @-chmod -f 700 .
57136+ @echo ' grsec: protected kernel image paths'
57137+endif
57138diff -urNp linux-2.6.32.43/include/acpi/acpi_bus.h linux-2.6.32.43/include/acpi/acpi_bus.h
57139--- linux-2.6.32.43/include/acpi/acpi_bus.h 2011-03-27 14:31:47.000000000 -0400
57140+++ linux-2.6.32.43/include/acpi/acpi_bus.h 2011-08-05 20:33:55.000000000 -0400
57141@@ -107,7 +107,7 @@ struct acpi_device_ops {
57142 acpi_op_bind bind;
57143 acpi_op_unbind unbind;
57144 acpi_op_notify notify;
57145-};
57146+} __no_const;
57147
57148 #define ACPI_DRIVER_ALL_NOTIFY_EVENTS 0x1 /* system AND device events */
57149
57150diff -urNp linux-2.6.32.43/include/acpi/acpi_drivers.h linux-2.6.32.43/include/acpi/acpi_drivers.h
57151--- linux-2.6.32.43/include/acpi/acpi_drivers.h 2011-03-27 14:31:47.000000000 -0400
57152+++ linux-2.6.32.43/include/acpi/acpi_drivers.h 2011-04-17 15:56:46.000000000 -0400
57153@@ -119,8 +119,8 @@ int acpi_processor_set_thermal_limit(acp
57154 Dock Station
57155 -------------------------------------------------------------------------- */
57156 struct acpi_dock_ops {
57157- acpi_notify_handler handler;
57158- acpi_notify_handler uevent;
57159+ const acpi_notify_handler handler;
57160+ const acpi_notify_handler uevent;
57161 };
57162
57163 #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE)
57164@@ -128,7 +128,7 @@ extern int is_dock_device(acpi_handle ha
57165 extern int register_dock_notifier(struct notifier_block *nb);
57166 extern void unregister_dock_notifier(struct notifier_block *nb);
57167 extern int register_hotplug_dock_device(acpi_handle handle,
57168- struct acpi_dock_ops *ops,
57169+ const struct acpi_dock_ops *ops,
57170 void *context);
57171 extern void unregister_hotplug_dock_device(acpi_handle handle);
57172 #else
57173@@ -144,7 +144,7 @@ static inline void unregister_dock_notif
57174 {
57175 }
57176 static inline int register_hotplug_dock_device(acpi_handle handle,
57177- struct acpi_dock_ops *ops,
57178+ const struct acpi_dock_ops *ops,
57179 void *context)
57180 {
57181 return -ENODEV;
57182diff -urNp linux-2.6.32.43/include/asm-generic/atomic-long.h linux-2.6.32.43/include/asm-generic/atomic-long.h
57183--- linux-2.6.32.43/include/asm-generic/atomic-long.h 2011-03-27 14:31:47.000000000 -0400
57184+++ linux-2.6.32.43/include/asm-generic/atomic-long.h 2011-07-13 22:21:25.000000000 -0400
57185@@ -22,6 +22,12 @@
57186
57187 typedef atomic64_t atomic_long_t;
57188
57189+#ifdef CONFIG_PAX_REFCOUNT
57190+typedef atomic64_unchecked_t atomic_long_unchecked_t;
57191+#else
57192+typedef atomic64_t atomic_long_unchecked_t;
57193+#endif
57194+
57195 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
57196
57197 static inline long atomic_long_read(atomic_long_t *l)
57198@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom
57199 return (long)atomic64_read(v);
57200 }
57201
57202+#ifdef CONFIG_PAX_REFCOUNT
57203+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57204+{
57205+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57206+
57207+ return (long)atomic64_read_unchecked(v);
57208+}
57209+#endif
57210+
57211 static inline void atomic_long_set(atomic_long_t *l, long i)
57212 {
57213 atomic64_t *v = (atomic64_t *)l;
57214@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi
57215 atomic64_set(v, i);
57216 }
57217
57218+#ifdef CONFIG_PAX_REFCOUNT
57219+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57220+{
57221+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57222+
57223+ atomic64_set_unchecked(v, i);
57224+}
57225+#endif
57226+
57227 static inline void atomic_long_inc(atomic_long_t *l)
57228 {
57229 atomic64_t *v = (atomic64_t *)l;
57230@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi
57231 atomic64_inc(v);
57232 }
57233
57234+#ifdef CONFIG_PAX_REFCOUNT
57235+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57236+{
57237+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57238+
57239+ atomic64_inc_unchecked(v);
57240+}
57241+#endif
57242+
57243 static inline void atomic_long_dec(atomic_long_t *l)
57244 {
57245 atomic64_t *v = (atomic64_t *)l;
57246@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi
57247 atomic64_dec(v);
57248 }
57249
57250+#ifdef CONFIG_PAX_REFCOUNT
57251+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57252+{
57253+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57254+
57255+ atomic64_dec_unchecked(v);
57256+}
57257+#endif
57258+
57259 static inline void atomic_long_add(long i, atomic_long_t *l)
57260 {
57261 atomic64_t *v = (atomic64_t *)l;
57262@@ -59,6 +101,15 @@ static inline void atomic_long_add(long
57263 atomic64_add(i, v);
57264 }
57265
57266+#ifdef CONFIG_PAX_REFCOUNT
57267+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57268+{
57269+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57270+
57271+ atomic64_add_unchecked(i, v);
57272+}
57273+#endif
57274+
57275 static inline void atomic_long_sub(long i, atomic_long_t *l)
57276 {
57277 atomic64_t *v = (atomic64_t *)l;
57278@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur
57279 return (long)atomic64_inc_return(v);
57280 }
57281
57282+#ifdef CONFIG_PAX_REFCOUNT
57283+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57284+{
57285+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
57286+
57287+ return (long)atomic64_inc_return_unchecked(v);
57288+}
57289+#endif
57290+
57291 static inline long atomic_long_dec_return(atomic_long_t *l)
57292 {
57293 atomic64_t *v = (atomic64_t *)l;
57294@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles
57295
57296 typedef atomic_t atomic_long_t;
57297
57298+#ifdef CONFIG_PAX_REFCOUNT
57299+typedef atomic_unchecked_t atomic_long_unchecked_t;
57300+#else
57301+typedef atomic_t atomic_long_unchecked_t;
57302+#endif
57303+
57304 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
57305 static inline long atomic_long_read(atomic_long_t *l)
57306 {
57307@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom
57308 return (long)atomic_read(v);
57309 }
57310
57311+#ifdef CONFIG_PAX_REFCOUNT
57312+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
57313+{
57314+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57315+
57316+ return (long)atomic_read_unchecked(v);
57317+}
57318+#endif
57319+
57320 static inline void atomic_long_set(atomic_long_t *l, long i)
57321 {
57322 atomic_t *v = (atomic_t *)l;
57323@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi
57324 atomic_set(v, i);
57325 }
57326
57327+#ifdef CONFIG_PAX_REFCOUNT
57328+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
57329+{
57330+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57331+
57332+ atomic_set_unchecked(v, i);
57333+}
57334+#endif
57335+
57336 static inline void atomic_long_inc(atomic_long_t *l)
57337 {
57338 atomic_t *v = (atomic_t *)l;
57339@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi
57340 atomic_inc(v);
57341 }
57342
57343+#ifdef CONFIG_PAX_REFCOUNT
57344+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
57345+{
57346+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57347+
57348+ atomic_inc_unchecked(v);
57349+}
57350+#endif
57351+
57352 static inline void atomic_long_dec(atomic_long_t *l)
57353 {
57354 atomic_t *v = (atomic_t *)l;
57355@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi
57356 atomic_dec(v);
57357 }
57358
57359+#ifdef CONFIG_PAX_REFCOUNT
57360+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
57361+{
57362+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57363+
57364+ atomic_dec_unchecked(v);
57365+}
57366+#endif
57367+
57368 static inline void atomic_long_add(long i, atomic_long_t *l)
57369 {
57370 atomic_t *v = (atomic_t *)l;
57371@@ -176,6 +278,15 @@ static inline void atomic_long_add(long
57372 atomic_add(i, v);
57373 }
57374
57375+#ifdef CONFIG_PAX_REFCOUNT
57376+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
57377+{
57378+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57379+
57380+ atomic_add_unchecked(i, v);
57381+}
57382+#endif
57383+
57384 static inline void atomic_long_sub(long i, atomic_long_t *l)
57385 {
57386 atomic_t *v = (atomic_t *)l;
57387@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur
57388 return (long)atomic_inc_return(v);
57389 }
57390
57391+#ifdef CONFIG_PAX_REFCOUNT
57392+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
57393+{
57394+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
57395+
57396+ return (long)atomic_inc_return_unchecked(v);
57397+}
57398+#endif
57399+
57400 static inline long atomic_long_dec_return(atomic_long_t *l)
57401 {
57402 atomic_t *v = (atomic_t *)l;
57403@@ -255,4 +375,47 @@ static inline long atomic_long_add_unles
57404
57405 #endif /* BITS_PER_LONG == 64 */
57406
57407+#ifdef CONFIG_PAX_REFCOUNT
57408+static inline void pax_refcount_needs_these_functions(void)
57409+{
57410+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
57411+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
57412+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
57413+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
57414+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
57415+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
57416+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
57417+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
57418+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
57419+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
57420+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
57421+
57422+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
57423+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
57424+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
57425+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
57426+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
57427+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
57428+}
57429+#else
57430+#define atomic_read_unchecked(v) atomic_read(v)
57431+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
57432+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
57433+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
57434+#define atomic_inc_unchecked(v) atomic_inc(v)
57435+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
57436+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
57437+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
57438+#define atomic_dec_unchecked(v) atomic_dec(v)
57439+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
57440+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
57441+
57442+#define atomic_long_read_unchecked(v) atomic_long_read(v)
57443+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
57444+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
57445+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
57446+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
57447+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
57448+#endif
57449+
57450 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
57451diff -urNp linux-2.6.32.43/include/asm-generic/cache.h linux-2.6.32.43/include/asm-generic/cache.h
57452--- linux-2.6.32.43/include/asm-generic/cache.h 2011-03-27 14:31:47.000000000 -0400
57453+++ linux-2.6.32.43/include/asm-generic/cache.h 2011-07-06 19:53:33.000000000 -0400
57454@@ -6,7 +6,7 @@
57455 * cache lines need to provide their own cache.h.
57456 */
57457
57458-#define L1_CACHE_SHIFT 5
57459-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
57460+#define L1_CACHE_SHIFT 5UL
57461+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
57462
57463 #endif /* __ASM_GENERIC_CACHE_H */
57464diff -urNp linux-2.6.32.43/include/asm-generic/dma-mapping-common.h linux-2.6.32.43/include/asm-generic/dma-mapping-common.h
57465--- linux-2.6.32.43/include/asm-generic/dma-mapping-common.h 2011-03-27 14:31:47.000000000 -0400
57466+++ linux-2.6.32.43/include/asm-generic/dma-mapping-common.h 2011-04-17 15:56:46.000000000 -0400
57467@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_
57468 enum dma_data_direction dir,
57469 struct dma_attrs *attrs)
57470 {
57471- struct dma_map_ops *ops = get_dma_ops(dev);
57472+ const struct dma_map_ops *ops = get_dma_ops(dev);
57473 dma_addr_t addr;
57474
57475 kmemcheck_mark_initialized(ptr, size);
57476@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr
57477 enum dma_data_direction dir,
57478 struct dma_attrs *attrs)
57479 {
57480- struct dma_map_ops *ops = get_dma_ops(dev);
57481+ const struct dma_map_ops *ops = get_dma_ops(dev);
57482
57483 BUG_ON(!valid_dma_direction(dir));
57484 if (ops->unmap_page)
57485@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc
57486 int nents, enum dma_data_direction dir,
57487 struct dma_attrs *attrs)
57488 {
57489- struct dma_map_ops *ops = get_dma_ops(dev);
57490+ const struct dma_map_ops *ops = get_dma_ops(dev);
57491 int i, ents;
57492 struct scatterlist *s;
57493
57494@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st
57495 int nents, enum dma_data_direction dir,
57496 struct dma_attrs *attrs)
57497 {
57498- struct dma_map_ops *ops = get_dma_ops(dev);
57499+ const struct dma_map_ops *ops = get_dma_ops(dev);
57500
57501 BUG_ON(!valid_dma_direction(dir));
57502 debug_dma_unmap_sg(dev, sg, nents, dir);
57503@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st
57504 size_t offset, size_t size,
57505 enum dma_data_direction dir)
57506 {
57507- struct dma_map_ops *ops = get_dma_ops(dev);
57508+ const struct dma_map_ops *ops = get_dma_ops(dev);
57509 dma_addr_t addr;
57510
57511 kmemcheck_mark_initialized(page_address(page) + offset, size);
57512@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st
57513 static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
57514 size_t size, enum dma_data_direction dir)
57515 {
57516- struct dma_map_ops *ops = get_dma_ops(dev);
57517+ const struct dma_map_ops *ops = get_dma_ops(dev);
57518
57519 BUG_ON(!valid_dma_direction(dir));
57520 if (ops->unmap_page)
57521@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c
57522 size_t size,
57523 enum dma_data_direction dir)
57524 {
57525- struct dma_map_ops *ops = get_dma_ops(dev);
57526+ const struct dma_map_ops *ops = get_dma_ops(dev);
57527
57528 BUG_ON(!valid_dma_direction(dir));
57529 if (ops->sync_single_for_cpu)
57530@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d
57531 dma_addr_t addr, size_t size,
57532 enum dma_data_direction dir)
57533 {
57534- struct dma_map_ops *ops = get_dma_ops(dev);
57535+ const struct dma_map_ops *ops = get_dma_ops(dev);
57536
57537 BUG_ON(!valid_dma_direction(dir));
57538 if (ops->sync_single_for_device)
57539@@ -123,7 +123,7 @@ static inline void dma_sync_single_range
57540 size_t size,
57541 enum dma_data_direction dir)
57542 {
57543- struct dma_map_ops *ops = get_dma_ops(dev);
57544+ const struct dma_map_ops *ops = get_dma_ops(dev);
57545
57546 BUG_ON(!valid_dma_direction(dir));
57547 if (ops->sync_single_range_for_cpu) {
57548@@ -140,7 +140,7 @@ static inline void dma_sync_single_range
57549 size_t size,
57550 enum dma_data_direction dir)
57551 {
57552- struct dma_map_ops *ops = get_dma_ops(dev);
57553+ const struct dma_map_ops *ops = get_dma_ops(dev);
57554
57555 BUG_ON(!valid_dma_direction(dir));
57556 if (ops->sync_single_range_for_device) {
57557@@ -155,7 +155,7 @@ static inline void
57558 dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
57559 int nelems, enum dma_data_direction dir)
57560 {
57561- struct dma_map_ops *ops = get_dma_ops(dev);
57562+ const struct dma_map_ops *ops = get_dma_ops(dev);
57563
57564 BUG_ON(!valid_dma_direction(dir));
57565 if (ops->sync_sg_for_cpu)
57566@@ -167,7 +167,7 @@ static inline void
57567 dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
57568 int nelems, enum dma_data_direction dir)
57569 {
57570- struct dma_map_ops *ops = get_dma_ops(dev);
57571+ const struct dma_map_ops *ops = get_dma_ops(dev);
57572
57573 BUG_ON(!valid_dma_direction(dir));
57574 if (ops->sync_sg_for_device)
57575diff -urNp linux-2.6.32.43/include/asm-generic/futex.h linux-2.6.32.43/include/asm-generic/futex.h
57576--- linux-2.6.32.43/include/asm-generic/futex.h 2011-03-27 14:31:47.000000000 -0400
57577+++ linux-2.6.32.43/include/asm-generic/futex.h 2011-04-17 15:56:46.000000000 -0400
57578@@ -6,7 +6,7 @@
57579 #include <asm/errno.h>
57580
57581 static inline int
57582-futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
57583+futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
57584 {
57585 int op = (encoded_op >> 28) & 7;
57586 int cmp = (encoded_op >> 24) & 15;
57587@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op,
57588 }
57589
57590 static inline int
57591-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
57592+futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval)
57593 {
57594 return -ENOSYS;
57595 }
57596diff -urNp linux-2.6.32.43/include/asm-generic/int-l64.h linux-2.6.32.43/include/asm-generic/int-l64.h
57597--- linux-2.6.32.43/include/asm-generic/int-l64.h 2011-03-27 14:31:47.000000000 -0400
57598+++ linux-2.6.32.43/include/asm-generic/int-l64.h 2011-04-17 15:56:46.000000000 -0400
57599@@ -46,6 +46,8 @@ typedef unsigned int u32;
57600 typedef signed long s64;
57601 typedef unsigned long u64;
57602
57603+typedef unsigned int intoverflow_t __attribute__ ((mode(TI)));
57604+
57605 #define S8_C(x) x
57606 #define U8_C(x) x ## U
57607 #define S16_C(x) x
57608diff -urNp linux-2.6.32.43/include/asm-generic/int-ll64.h linux-2.6.32.43/include/asm-generic/int-ll64.h
57609--- linux-2.6.32.43/include/asm-generic/int-ll64.h 2011-03-27 14:31:47.000000000 -0400
57610+++ linux-2.6.32.43/include/asm-generic/int-ll64.h 2011-04-17 15:56:46.000000000 -0400
57611@@ -51,6 +51,8 @@ typedef unsigned int u32;
57612 typedef signed long long s64;
57613 typedef unsigned long long u64;
57614
57615+typedef unsigned long long intoverflow_t;
57616+
57617 #define S8_C(x) x
57618 #define U8_C(x) x ## U
57619 #define S16_C(x) x
57620diff -urNp linux-2.6.32.43/include/asm-generic/kmap_types.h linux-2.6.32.43/include/asm-generic/kmap_types.h
57621--- linux-2.6.32.43/include/asm-generic/kmap_types.h 2011-03-27 14:31:47.000000000 -0400
57622+++ linux-2.6.32.43/include/asm-generic/kmap_types.h 2011-04-17 15:56:46.000000000 -0400
57623@@ -28,7 +28,8 @@ KMAP_D(15) KM_UML_USERCOPY,
57624 KMAP_D(16) KM_IRQ_PTE,
57625 KMAP_D(17) KM_NMI,
57626 KMAP_D(18) KM_NMI_PTE,
57627-KMAP_D(19) KM_TYPE_NR
57628+KMAP_D(19) KM_CLEARPAGE,
57629+KMAP_D(20) KM_TYPE_NR
57630 };
57631
57632 #undef KMAP_D
57633diff -urNp linux-2.6.32.43/include/asm-generic/pgtable.h linux-2.6.32.43/include/asm-generic/pgtable.h
57634--- linux-2.6.32.43/include/asm-generic/pgtable.h 2011-03-27 14:31:47.000000000 -0400
57635+++ linux-2.6.32.43/include/asm-generic/pgtable.h 2011-04-17 15:56:46.000000000 -0400
57636@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar
57637 unsigned long size);
57638 #endif
57639
57640+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
57641+static inline unsigned long pax_open_kernel(void) { return 0; }
57642+#endif
57643+
57644+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
57645+static inline unsigned long pax_close_kernel(void) { return 0; }
57646+#endif
57647+
57648 #endif /* !__ASSEMBLY__ */
57649
57650 #endif /* _ASM_GENERIC_PGTABLE_H */
57651diff -urNp linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h
57652--- linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h 2011-03-27 14:31:47.000000000 -0400
57653+++ linux-2.6.32.43/include/asm-generic/pgtable-nopmd.h 2011-04-17 15:56:46.000000000 -0400
57654@@ -1,14 +1,19 @@
57655 #ifndef _PGTABLE_NOPMD_H
57656 #define _PGTABLE_NOPMD_H
57657
57658-#ifndef __ASSEMBLY__
57659-
57660 #include <asm-generic/pgtable-nopud.h>
57661
57662-struct mm_struct;
57663-
57664 #define __PAGETABLE_PMD_FOLDED
57665
57666+#define PMD_SHIFT PUD_SHIFT
57667+#define PTRS_PER_PMD 1
57668+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
57669+#define PMD_MASK (~(PMD_SIZE-1))
57670+
57671+#ifndef __ASSEMBLY__
57672+
57673+struct mm_struct;
57674+
57675 /*
57676 * Having the pmd type consist of a pud gets the size right, and allows
57677 * us to conceptually access the pud entry that this pmd is folded into
57678@@ -16,11 +21,6 @@ struct mm_struct;
57679 */
57680 typedef struct { pud_t pud; } pmd_t;
57681
57682-#define PMD_SHIFT PUD_SHIFT
57683-#define PTRS_PER_PMD 1
57684-#define PMD_SIZE (1UL << PMD_SHIFT)
57685-#define PMD_MASK (~(PMD_SIZE-1))
57686-
57687 /*
57688 * The "pud_xxx()" functions here are trivial for a folded two-level
57689 * setup: the pmd is never bad, and a pmd always exists (as it's folded
57690diff -urNp linux-2.6.32.43/include/asm-generic/pgtable-nopud.h linux-2.6.32.43/include/asm-generic/pgtable-nopud.h
57691--- linux-2.6.32.43/include/asm-generic/pgtable-nopud.h 2011-03-27 14:31:47.000000000 -0400
57692+++ linux-2.6.32.43/include/asm-generic/pgtable-nopud.h 2011-04-17 15:56:46.000000000 -0400
57693@@ -1,10 +1,15 @@
57694 #ifndef _PGTABLE_NOPUD_H
57695 #define _PGTABLE_NOPUD_H
57696
57697-#ifndef __ASSEMBLY__
57698-
57699 #define __PAGETABLE_PUD_FOLDED
57700
57701+#define PUD_SHIFT PGDIR_SHIFT
57702+#define PTRS_PER_PUD 1
57703+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
57704+#define PUD_MASK (~(PUD_SIZE-1))
57705+
57706+#ifndef __ASSEMBLY__
57707+
57708 /*
57709 * Having the pud type consist of a pgd gets the size right, and allows
57710 * us to conceptually access the pgd entry that this pud is folded into
57711@@ -12,11 +17,6 @@
57712 */
57713 typedef struct { pgd_t pgd; } pud_t;
57714
57715-#define PUD_SHIFT PGDIR_SHIFT
57716-#define PTRS_PER_PUD 1
57717-#define PUD_SIZE (1UL << PUD_SHIFT)
57718-#define PUD_MASK (~(PUD_SIZE-1))
57719-
57720 /*
57721 * The "pgd_xxx()" functions here are trivial for a folded two-level
57722 * setup: the pud is never bad, and a pud always exists (as it's folded
57723diff -urNp linux-2.6.32.43/include/asm-generic/vmlinux.lds.h linux-2.6.32.43/include/asm-generic/vmlinux.lds.h
57724--- linux-2.6.32.43/include/asm-generic/vmlinux.lds.h 2011-03-27 14:31:47.000000000 -0400
57725+++ linux-2.6.32.43/include/asm-generic/vmlinux.lds.h 2011-04-17 15:56:46.000000000 -0400
57726@@ -199,6 +199,7 @@
57727 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
57728 VMLINUX_SYMBOL(__start_rodata) = .; \
57729 *(.rodata) *(.rodata.*) \
57730+ *(.data.read_only) \
57731 *(__vermagic) /* Kernel version magic */ \
57732 *(__markers_strings) /* Markers: strings */ \
57733 *(__tracepoints_strings)/* Tracepoints: strings */ \
57734@@ -656,22 +657,24 @@
57735 * section in the linker script will go there too. @phdr should have
57736 * a leading colon.
57737 *
57738- * Note that this macros defines __per_cpu_load as an absolute symbol.
57739+ * Note that this macros defines per_cpu_load as an absolute symbol.
57740 * If there is no need to put the percpu section at a predetermined
57741 * address, use PERCPU().
57742 */
57743 #define PERCPU_VADDR(vaddr, phdr) \
57744- VMLINUX_SYMBOL(__per_cpu_load) = .; \
57745- .data.percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
57746+ per_cpu_load = .; \
57747+ .data.percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
57748 - LOAD_OFFSET) { \
57749+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
57750 VMLINUX_SYMBOL(__per_cpu_start) = .; \
57751 *(.data.percpu.first) \
57752- *(.data.percpu.page_aligned) \
57753 *(.data.percpu) \
57754+ . = ALIGN(PAGE_SIZE); \
57755+ *(.data.percpu.page_aligned) \
57756 *(.data.percpu.shared_aligned) \
57757 VMLINUX_SYMBOL(__per_cpu_end) = .; \
57758 } phdr \
57759- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data.percpu);
57760+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data.percpu);
57761
57762 /**
57763 * PERCPU - define output section for percpu area, simple version
57764diff -urNp linux-2.6.32.43/include/drm/drm_crtc_helper.h linux-2.6.32.43/include/drm/drm_crtc_helper.h
57765--- linux-2.6.32.43/include/drm/drm_crtc_helper.h 2011-03-27 14:31:47.000000000 -0400
57766+++ linux-2.6.32.43/include/drm/drm_crtc_helper.h 2011-08-05 20:33:55.000000000 -0400
57767@@ -64,7 +64,7 @@ struct drm_crtc_helper_funcs {
57768
57769 /* reload the current crtc LUT */
57770 void (*load_lut)(struct drm_crtc *crtc);
57771-};
57772+} __no_const;
57773
57774 struct drm_encoder_helper_funcs {
57775 void (*dpms)(struct drm_encoder *encoder, int mode);
57776@@ -85,7 +85,7 @@ struct drm_encoder_helper_funcs {
57777 struct drm_connector *connector);
57778 /* disable encoder when not in use - more explicit than dpms off */
57779 void (*disable)(struct drm_encoder *encoder);
57780-};
57781+} __no_const;
57782
57783 struct drm_connector_helper_funcs {
57784 int (*get_modes)(struct drm_connector *connector);
57785diff -urNp linux-2.6.32.43/include/drm/drmP.h linux-2.6.32.43/include/drm/drmP.h
57786--- linux-2.6.32.43/include/drm/drmP.h 2011-03-27 14:31:47.000000000 -0400
57787+++ linux-2.6.32.43/include/drm/drmP.h 2011-04-17 15:56:46.000000000 -0400
57788@@ -71,6 +71,7 @@
57789 #include <linux/workqueue.h>
57790 #include <linux/poll.h>
57791 #include <asm/pgalloc.h>
57792+#include <asm/local.h>
57793 #include "drm.h"
57794
57795 #include <linux/idr.h>
57796@@ -814,7 +815,7 @@ struct drm_driver {
57797 void (*vgaarb_irq)(struct drm_device *dev, bool state);
57798
57799 /* Driver private ops for this object */
57800- struct vm_operations_struct *gem_vm_ops;
57801+ const struct vm_operations_struct *gem_vm_ops;
57802
57803 int major;
57804 int minor;
57805@@ -917,7 +918,7 @@ struct drm_device {
57806
57807 /** \name Usage Counters */
57808 /*@{ */
57809- int open_count; /**< Outstanding files open */
57810+ local_t open_count; /**< Outstanding files open */
57811 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
57812 atomic_t vma_count; /**< Outstanding vma areas open */
57813 int buf_use; /**< Buffers in use -- cannot alloc */
57814@@ -928,7 +929,7 @@ struct drm_device {
57815 /*@{ */
57816 unsigned long counters;
57817 enum drm_stat_type types[15];
57818- atomic_t counts[15];
57819+ atomic_unchecked_t counts[15];
57820 /*@} */
57821
57822 struct list_head filelist;
57823@@ -1016,7 +1017,7 @@ struct drm_device {
57824 struct pci_controller *hose;
57825 #endif
57826 struct drm_sg_mem *sg; /**< Scatter gather memory */
57827- unsigned int num_crtcs; /**< Number of CRTCs on this device */
57828+ unsigned int num_crtcs; /**< Number of CRTCs on this device */
57829 void *dev_private; /**< device private data */
57830 void *mm_private;
57831 struct address_space *dev_mapping;
57832@@ -1042,11 +1043,11 @@ struct drm_device {
57833 spinlock_t object_name_lock;
57834 struct idr object_name_idr;
57835 atomic_t object_count;
57836- atomic_t object_memory;
57837+ atomic_unchecked_t object_memory;
57838 atomic_t pin_count;
57839- atomic_t pin_memory;
57840+ atomic_unchecked_t pin_memory;
57841 atomic_t gtt_count;
57842- atomic_t gtt_memory;
57843+ atomic_unchecked_t gtt_memory;
57844 uint32_t gtt_total;
57845 uint32_t invalidate_domains; /* domains pending invalidation */
57846 uint32_t flush_domains; /* domains pending flush */
57847diff -urNp linux-2.6.32.43/include/drm/ttm/ttm_memory.h linux-2.6.32.43/include/drm/ttm/ttm_memory.h
57848--- linux-2.6.32.43/include/drm/ttm/ttm_memory.h 2011-03-27 14:31:47.000000000 -0400
57849+++ linux-2.6.32.43/include/drm/ttm/ttm_memory.h 2011-08-05 20:33:55.000000000 -0400
57850@@ -47,7 +47,7 @@
57851
57852 struct ttm_mem_shrink {
57853 int (*do_shrink) (struct ttm_mem_shrink *);
57854-};
57855+} __no_const;
57856
57857 /**
57858 * struct ttm_mem_global - Global memory accounting structure.
57859diff -urNp linux-2.6.32.43/include/linux/a.out.h linux-2.6.32.43/include/linux/a.out.h
57860--- linux-2.6.32.43/include/linux/a.out.h 2011-03-27 14:31:47.000000000 -0400
57861+++ linux-2.6.32.43/include/linux/a.out.h 2011-04-17 15:56:46.000000000 -0400
57862@@ -39,6 +39,14 @@ enum machine_type {
57863 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
57864 };
57865
57866+/* Constants for the N_FLAGS field */
57867+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
57868+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
57869+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
57870+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
57871+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
57872+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
57873+
57874 #if !defined (N_MAGIC)
57875 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
57876 #endif
57877diff -urNp linux-2.6.32.43/include/linux/atmdev.h linux-2.6.32.43/include/linux/atmdev.h
57878--- linux-2.6.32.43/include/linux/atmdev.h 2011-03-27 14:31:47.000000000 -0400
57879+++ linux-2.6.32.43/include/linux/atmdev.h 2011-04-17 15:56:46.000000000 -0400
57880@@ -237,7 +237,7 @@ struct compat_atm_iobuf {
57881 #endif
57882
57883 struct k_atm_aal_stats {
57884-#define __HANDLE_ITEM(i) atomic_t i
57885+#define __HANDLE_ITEM(i) atomic_unchecked_t i
57886 __AAL_STAT_ITEMS
57887 #undef __HANDLE_ITEM
57888 };
57889diff -urNp linux-2.6.32.43/include/linux/backlight.h linux-2.6.32.43/include/linux/backlight.h
57890--- linux-2.6.32.43/include/linux/backlight.h 2011-03-27 14:31:47.000000000 -0400
57891+++ linux-2.6.32.43/include/linux/backlight.h 2011-04-17 15:56:46.000000000 -0400
57892@@ -36,18 +36,18 @@ struct backlight_device;
57893 struct fb_info;
57894
57895 struct backlight_ops {
57896- unsigned int options;
57897+ const unsigned int options;
57898
57899 #define BL_CORE_SUSPENDRESUME (1 << 0)
57900
57901 /* Notify the backlight driver some property has changed */
57902- int (*update_status)(struct backlight_device *);
57903+ int (* const update_status)(struct backlight_device *);
57904 /* Return the current backlight brightness (accounting for power,
57905 fb_blank etc.) */
57906- int (*get_brightness)(struct backlight_device *);
57907+ int (* const get_brightness)(struct backlight_device *);
57908 /* Check if given framebuffer device is the one bound to this backlight;
57909 return 0 if not, !=0 if it is. If NULL, backlight always matches the fb. */
57910- int (*check_fb)(struct fb_info *);
57911+ int (* const check_fb)(struct fb_info *);
57912 };
57913
57914 /* This structure defines all the properties of a backlight */
57915@@ -86,7 +86,7 @@ struct backlight_device {
57916 registered this device has been unloaded, and if class_get_devdata()
57917 points to something in the body of that driver, it is also invalid. */
57918 struct mutex ops_lock;
57919- struct backlight_ops *ops;
57920+ const struct backlight_ops *ops;
57921
57922 /* The framebuffer notifier block */
57923 struct notifier_block fb_notif;
57924@@ -103,7 +103,7 @@ static inline void backlight_update_stat
57925 }
57926
57927 extern struct backlight_device *backlight_device_register(const char *name,
57928- struct device *dev, void *devdata, struct backlight_ops *ops);
57929+ struct device *dev, void *devdata, const struct backlight_ops *ops);
57930 extern void backlight_device_unregister(struct backlight_device *bd);
57931 extern void backlight_force_update(struct backlight_device *bd,
57932 enum backlight_update_reason reason);
57933diff -urNp linux-2.6.32.43/include/linux/binfmts.h linux-2.6.32.43/include/linux/binfmts.h
57934--- linux-2.6.32.43/include/linux/binfmts.h 2011-04-17 17:00:52.000000000 -0400
57935+++ linux-2.6.32.43/include/linux/binfmts.h 2011-04-17 15:56:46.000000000 -0400
57936@@ -83,6 +83,7 @@ struct linux_binfmt {
57937 int (*load_binary)(struct linux_binprm *, struct pt_regs * regs);
57938 int (*load_shlib)(struct file *);
57939 int (*core_dump)(long signr, struct pt_regs *regs, struct file *file, unsigned long limit);
57940+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
57941 unsigned long min_coredump; /* minimal dump size */
57942 int hasvdso;
57943 };
57944diff -urNp linux-2.6.32.43/include/linux/blkdev.h linux-2.6.32.43/include/linux/blkdev.h
57945--- linux-2.6.32.43/include/linux/blkdev.h 2011-03-27 14:31:47.000000000 -0400
57946+++ linux-2.6.32.43/include/linux/blkdev.h 2011-04-17 15:56:46.000000000 -0400
57947@@ -1265,19 +1265,19 @@ static inline int blk_integrity_rq(struc
57948 #endif /* CONFIG_BLK_DEV_INTEGRITY */
57949
57950 struct block_device_operations {
57951- int (*open) (struct block_device *, fmode_t);
57952- int (*release) (struct gendisk *, fmode_t);
57953- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57954- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57955- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57956- int (*direct_access) (struct block_device *, sector_t,
57957+ int (* const open) (struct block_device *, fmode_t);
57958+ int (* const release) (struct gendisk *, fmode_t);
57959+ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57960+ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57961+ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
57962+ int (* const direct_access) (struct block_device *, sector_t,
57963 void **, unsigned long *);
57964- int (*media_changed) (struct gendisk *);
57965- unsigned long long (*set_capacity) (struct gendisk *,
57966+ int (* const media_changed) (struct gendisk *);
57967+ unsigned long long (* const set_capacity) (struct gendisk *,
57968 unsigned long long);
57969- int (*revalidate_disk) (struct gendisk *);
57970- int (*getgeo)(struct block_device *, struct hd_geometry *);
57971- struct module *owner;
57972+ int (* const revalidate_disk) (struct gendisk *);
57973+ int (*const getgeo)(struct block_device *, struct hd_geometry *);
57974+ struct module * const owner;
57975 };
57976
57977 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
57978diff -urNp linux-2.6.32.43/include/linux/blktrace_api.h linux-2.6.32.43/include/linux/blktrace_api.h
57979--- linux-2.6.32.43/include/linux/blktrace_api.h 2011-03-27 14:31:47.000000000 -0400
57980+++ linux-2.6.32.43/include/linux/blktrace_api.h 2011-05-04 17:56:28.000000000 -0400
57981@@ -160,7 +160,7 @@ struct blk_trace {
57982 struct dentry *dir;
57983 struct dentry *dropped_file;
57984 struct dentry *msg_file;
57985- atomic_t dropped;
57986+ atomic_unchecked_t dropped;
57987 };
57988
57989 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
57990diff -urNp linux-2.6.32.43/include/linux/byteorder/little_endian.h linux-2.6.32.43/include/linux/byteorder/little_endian.h
57991--- linux-2.6.32.43/include/linux/byteorder/little_endian.h 2011-03-27 14:31:47.000000000 -0400
57992+++ linux-2.6.32.43/include/linux/byteorder/little_endian.h 2011-04-17 15:56:46.000000000 -0400
57993@@ -42,51 +42,51 @@
57994
57995 static inline __le64 __cpu_to_le64p(const __u64 *p)
57996 {
57997- return (__force __le64)*p;
57998+ return (__force const __le64)*p;
57999 }
58000 static inline __u64 __le64_to_cpup(const __le64 *p)
58001 {
58002- return (__force __u64)*p;
58003+ return (__force const __u64)*p;
58004 }
58005 static inline __le32 __cpu_to_le32p(const __u32 *p)
58006 {
58007- return (__force __le32)*p;
58008+ return (__force const __le32)*p;
58009 }
58010 static inline __u32 __le32_to_cpup(const __le32 *p)
58011 {
58012- return (__force __u32)*p;
58013+ return (__force const __u32)*p;
58014 }
58015 static inline __le16 __cpu_to_le16p(const __u16 *p)
58016 {
58017- return (__force __le16)*p;
58018+ return (__force const __le16)*p;
58019 }
58020 static inline __u16 __le16_to_cpup(const __le16 *p)
58021 {
58022- return (__force __u16)*p;
58023+ return (__force const __u16)*p;
58024 }
58025 static inline __be64 __cpu_to_be64p(const __u64 *p)
58026 {
58027- return (__force __be64)__swab64p(p);
58028+ return (__force const __be64)__swab64p(p);
58029 }
58030 static inline __u64 __be64_to_cpup(const __be64 *p)
58031 {
58032- return __swab64p((__u64 *)p);
58033+ return __swab64p((const __u64 *)p);
58034 }
58035 static inline __be32 __cpu_to_be32p(const __u32 *p)
58036 {
58037- return (__force __be32)__swab32p(p);
58038+ return (__force const __be32)__swab32p(p);
58039 }
58040 static inline __u32 __be32_to_cpup(const __be32 *p)
58041 {
58042- return __swab32p((__u32 *)p);
58043+ return __swab32p((const __u32 *)p);
58044 }
58045 static inline __be16 __cpu_to_be16p(const __u16 *p)
58046 {
58047- return (__force __be16)__swab16p(p);
58048+ return (__force const __be16)__swab16p(p);
58049 }
58050 static inline __u16 __be16_to_cpup(const __be16 *p)
58051 {
58052- return __swab16p((__u16 *)p);
58053+ return __swab16p((const __u16 *)p);
58054 }
58055 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
58056 #define __le64_to_cpus(x) do { (void)(x); } while (0)
58057diff -urNp linux-2.6.32.43/include/linux/cache.h linux-2.6.32.43/include/linux/cache.h
58058--- linux-2.6.32.43/include/linux/cache.h 2011-03-27 14:31:47.000000000 -0400
58059+++ linux-2.6.32.43/include/linux/cache.h 2011-04-17 15:56:46.000000000 -0400
58060@@ -16,6 +16,10 @@
58061 #define __read_mostly
58062 #endif
58063
58064+#ifndef __read_only
58065+#define __read_only __read_mostly
58066+#endif
58067+
58068 #ifndef ____cacheline_aligned
58069 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
58070 #endif
58071diff -urNp linux-2.6.32.43/include/linux/capability.h linux-2.6.32.43/include/linux/capability.h
58072--- linux-2.6.32.43/include/linux/capability.h 2011-03-27 14:31:47.000000000 -0400
58073+++ linux-2.6.32.43/include/linux/capability.h 2011-04-17 15:56:46.000000000 -0400
58074@@ -563,6 +563,7 @@ extern const kernel_cap_t __cap_init_eff
58075 (security_real_capable_noaudit((t), (cap)) == 0)
58076
58077 extern int capable(int cap);
58078+int capable_nolog(int cap);
58079
58080 /* audit system wants to get cap info from files as well */
58081 struct dentry;
58082diff -urNp linux-2.6.32.43/include/linux/compiler-gcc4.h linux-2.6.32.43/include/linux/compiler-gcc4.h
58083--- linux-2.6.32.43/include/linux/compiler-gcc4.h 2011-03-27 14:31:47.000000000 -0400
58084+++ linux-2.6.32.43/include/linux/compiler-gcc4.h 2011-08-05 20:33:55.000000000 -0400
58085@@ -36,4 +36,13 @@
58086 the kernel context */
58087 #define __cold __attribute__((__cold__))
58088
58089+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
58090+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
58091+#define __bos0(ptr) __bos((ptr), 0)
58092+#define __bos1(ptr) __bos((ptr), 1)
58093+
58094+#if __GNUC_MINOR__ >= 5
58095+#define __no_const __attribute__((no_const))
58096+#endif
58097+
58098 #endif
58099diff -urNp linux-2.6.32.43/include/linux/compiler.h linux-2.6.32.43/include/linux/compiler.h
58100--- linux-2.6.32.43/include/linux/compiler.h 2011-03-27 14:31:47.000000000 -0400
58101+++ linux-2.6.32.43/include/linux/compiler.h 2011-08-05 20:33:55.000000000 -0400
58102@@ -247,6 +247,10 @@ void ftrace_likely_update(struct ftrace_
58103 # define __attribute_const__ /* unimplemented */
58104 #endif
58105
58106+#ifndef __no_const
58107+# define __no_const
58108+#endif
58109+
58110 /*
58111 * Tell gcc if a function is cold. The compiler will assume any path
58112 * directly leading to the call is unlikely.
58113@@ -256,6 +260,22 @@ void ftrace_likely_update(struct ftrace_
58114 #define __cold
58115 #endif
58116
58117+#ifndef __alloc_size
58118+#define __alloc_size(...)
58119+#endif
58120+
58121+#ifndef __bos
58122+#define __bos(ptr, arg)
58123+#endif
58124+
58125+#ifndef __bos0
58126+#define __bos0(ptr)
58127+#endif
58128+
58129+#ifndef __bos1
58130+#define __bos1(ptr)
58131+#endif
58132+
58133 /* Simple shorthand for a section definition */
58134 #ifndef __section
58135 # define __section(S) __attribute__ ((__section__(#S)))
58136@@ -278,6 +298,7 @@ void ftrace_likely_update(struct ftrace_
58137 * use is to mediate communication between process-level code and irq/NMI
58138 * handlers, all running on the same CPU.
58139 */
58140-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
58141+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
58142+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
58143
58144 #endif /* __LINUX_COMPILER_H */
58145diff -urNp linux-2.6.32.43/include/linux/crypto.h linux-2.6.32.43/include/linux/crypto.h
58146--- linux-2.6.32.43/include/linux/crypto.h 2011-03-27 14:31:47.000000000 -0400
58147+++ linux-2.6.32.43/include/linux/crypto.h 2011-08-05 20:33:55.000000000 -0400
58148@@ -394,7 +394,7 @@ struct cipher_tfm {
58149 const u8 *key, unsigned int keylen);
58150 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58151 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
58152-};
58153+} __no_const;
58154
58155 struct hash_tfm {
58156 int (*init)(struct hash_desc *desc);
58157@@ -415,13 +415,13 @@ struct compress_tfm {
58158 int (*cot_decompress)(struct crypto_tfm *tfm,
58159 const u8 *src, unsigned int slen,
58160 u8 *dst, unsigned int *dlen);
58161-};
58162+} __no_const;
58163
58164 struct rng_tfm {
58165 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
58166 unsigned int dlen);
58167 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
58168-};
58169+} __no_const;
58170
58171 #define crt_ablkcipher crt_u.ablkcipher
58172 #define crt_aead crt_u.aead
58173diff -urNp linux-2.6.32.43/include/linux/cryptohash.h linux-2.6.32.43/include/linux/cryptohash.h
58174--- linux-2.6.32.43/include/linux/cryptohash.h 2011-03-27 14:31:47.000000000 -0400
58175+++ linux-2.6.32.43/include/linux/cryptohash.h 2011-08-07 19:48:09.000000000 -0400
58176@@ -7,6 +7,11 @@
58177 void sha_init(__u32 *buf);
58178 void sha_transform(__u32 *digest, const char *data, __u32 *W);
58179
58180+#define MD5_DIGEST_WORDS 4
58181+#define MD5_MESSAGE_BYTES 64
58182+
58183+void md5_transform(__u32 *hash, __u32 const *in);
58184+
58185 __u32 half_md4_transform(__u32 buf[4], __u32 const in[8]);
58186
58187 #endif
58188diff -urNp linux-2.6.32.43/include/linux/dcache.h linux-2.6.32.43/include/linux/dcache.h
58189--- linux-2.6.32.43/include/linux/dcache.h 2011-03-27 14:31:47.000000000 -0400
58190+++ linux-2.6.32.43/include/linux/dcache.h 2011-04-23 13:34:46.000000000 -0400
58191@@ -119,6 +119,8 @@ struct dentry {
58192 unsigned char d_iname[DNAME_INLINE_LEN_MIN]; /* small names */
58193 };
58194
58195+#define DNAME_INLINE_LEN (sizeof(struct dentry)-offsetof(struct dentry,d_iname))
58196+
58197 /*
58198 * dentry->d_lock spinlock nesting subclasses:
58199 *
58200diff -urNp linux-2.6.32.43/include/linux/decompress/mm.h linux-2.6.32.43/include/linux/decompress/mm.h
58201--- linux-2.6.32.43/include/linux/decompress/mm.h 2011-03-27 14:31:47.000000000 -0400
58202+++ linux-2.6.32.43/include/linux/decompress/mm.h 2011-04-17 15:56:46.000000000 -0400
58203@@ -78,7 +78,7 @@ static void free(void *where)
58204 * warnings when not needed (indeed large_malloc / large_free are not
58205 * needed by inflate */
58206
58207-#define malloc(a) kmalloc(a, GFP_KERNEL)
58208+#define malloc(a) kmalloc((a), GFP_KERNEL)
58209 #define free(a) kfree(a)
58210
58211 #define large_malloc(a) vmalloc(a)
58212diff -urNp linux-2.6.32.43/include/linux/dma-mapping.h linux-2.6.32.43/include/linux/dma-mapping.h
58213--- linux-2.6.32.43/include/linux/dma-mapping.h 2011-03-27 14:31:47.000000000 -0400
58214+++ linux-2.6.32.43/include/linux/dma-mapping.h 2011-04-17 15:56:46.000000000 -0400
58215@@ -16,50 +16,50 @@ enum dma_data_direction {
58216 };
58217
58218 struct dma_map_ops {
58219- void* (*alloc_coherent)(struct device *dev, size_t size,
58220+ void* (* const alloc_coherent)(struct device *dev, size_t size,
58221 dma_addr_t *dma_handle, gfp_t gfp);
58222- void (*free_coherent)(struct device *dev, size_t size,
58223+ void (* const free_coherent)(struct device *dev, size_t size,
58224 void *vaddr, dma_addr_t dma_handle);
58225- dma_addr_t (*map_page)(struct device *dev, struct page *page,
58226+ dma_addr_t (* const map_page)(struct device *dev, struct page *page,
58227 unsigned long offset, size_t size,
58228 enum dma_data_direction dir,
58229 struct dma_attrs *attrs);
58230- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
58231+ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle,
58232 size_t size, enum dma_data_direction dir,
58233 struct dma_attrs *attrs);
58234- int (*map_sg)(struct device *dev, struct scatterlist *sg,
58235+ int (* const map_sg)(struct device *dev, struct scatterlist *sg,
58236 int nents, enum dma_data_direction dir,
58237 struct dma_attrs *attrs);
58238- void (*unmap_sg)(struct device *dev,
58239+ void (* const unmap_sg)(struct device *dev,
58240 struct scatterlist *sg, int nents,
58241 enum dma_data_direction dir,
58242 struct dma_attrs *attrs);
58243- void (*sync_single_for_cpu)(struct device *dev,
58244+ void (* const sync_single_for_cpu)(struct device *dev,
58245 dma_addr_t dma_handle, size_t size,
58246 enum dma_data_direction dir);
58247- void (*sync_single_for_device)(struct device *dev,
58248+ void (* const sync_single_for_device)(struct device *dev,
58249 dma_addr_t dma_handle, size_t size,
58250 enum dma_data_direction dir);
58251- void (*sync_single_range_for_cpu)(struct device *dev,
58252+ void (* const sync_single_range_for_cpu)(struct device *dev,
58253 dma_addr_t dma_handle,
58254 unsigned long offset,
58255 size_t size,
58256 enum dma_data_direction dir);
58257- void (*sync_single_range_for_device)(struct device *dev,
58258+ void (* const sync_single_range_for_device)(struct device *dev,
58259 dma_addr_t dma_handle,
58260 unsigned long offset,
58261 size_t size,
58262 enum dma_data_direction dir);
58263- void (*sync_sg_for_cpu)(struct device *dev,
58264+ void (* const sync_sg_for_cpu)(struct device *dev,
58265 struct scatterlist *sg, int nents,
58266 enum dma_data_direction dir);
58267- void (*sync_sg_for_device)(struct device *dev,
58268+ void (* const sync_sg_for_device)(struct device *dev,
58269 struct scatterlist *sg, int nents,
58270 enum dma_data_direction dir);
58271- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
58272- int (*dma_supported)(struct device *dev, u64 mask);
58273+ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr);
58274+ int (* const dma_supported)(struct device *dev, u64 mask);
58275 int (*set_dma_mask)(struct device *dev, u64 mask);
58276- int is_phys;
58277+ const int is_phys;
58278 };
58279
58280 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
58281diff -urNp linux-2.6.32.43/include/linux/dst.h linux-2.6.32.43/include/linux/dst.h
58282--- linux-2.6.32.43/include/linux/dst.h 2011-03-27 14:31:47.000000000 -0400
58283+++ linux-2.6.32.43/include/linux/dst.h 2011-04-17 15:56:46.000000000 -0400
58284@@ -380,7 +380,7 @@ struct dst_node
58285 struct thread_pool *pool;
58286
58287 /* Transaction IDs live here */
58288- atomic_long_t gen;
58289+ atomic_long_unchecked_t gen;
58290
58291 /*
58292 * How frequently and how many times transaction
58293diff -urNp linux-2.6.32.43/include/linux/elf.h linux-2.6.32.43/include/linux/elf.h
58294--- linux-2.6.32.43/include/linux/elf.h 2011-03-27 14:31:47.000000000 -0400
58295+++ linux-2.6.32.43/include/linux/elf.h 2011-04-17 15:56:46.000000000 -0400
58296@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword;
58297 #define PT_GNU_EH_FRAME 0x6474e550
58298
58299 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
58300+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
58301+
58302+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
58303+
58304+/* Constants for the e_flags field */
58305+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
58306+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
58307+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
58308+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
58309+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
58310+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
58311
58312 /* These constants define the different elf file types */
58313 #define ET_NONE 0
58314@@ -84,6 +95,8 @@ typedef __s64 Elf64_Sxword;
58315 #define DT_DEBUG 21
58316 #define DT_TEXTREL 22
58317 #define DT_JMPREL 23
58318+#define DT_FLAGS 30
58319+ #define DF_TEXTREL 0x00000004
58320 #define DT_ENCODING 32
58321 #define OLD_DT_LOOS 0x60000000
58322 #define DT_LOOS 0x6000000d
58323@@ -230,6 +243,19 @@ typedef struct elf64_hdr {
58324 #define PF_W 0x2
58325 #define PF_X 0x1
58326
58327+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
58328+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
58329+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
58330+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
58331+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
58332+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
58333+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
58334+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
58335+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
58336+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
58337+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
58338+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
58339+
58340 typedef struct elf32_phdr{
58341 Elf32_Word p_type;
58342 Elf32_Off p_offset;
58343@@ -322,6 +348,8 @@ typedef struct elf64_shdr {
58344 #define EI_OSABI 7
58345 #define EI_PAD 8
58346
58347+#define EI_PAX 14
58348+
58349 #define ELFMAG0 0x7f /* EI_MAG */
58350 #define ELFMAG1 'E'
58351 #define ELFMAG2 'L'
58352@@ -386,6 +414,7 @@ extern Elf32_Dyn _DYNAMIC [];
58353 #define elf_phdr elf32_phdr
58354 #define elf_note elf32_note
58355 #define elf_addr_t Elf32_Off
58356+#define elf_dyn Elf32_Dyn
58357
58358 #else
58359
58360@@ -394,6 +423,7 @@ extern Elf64_Dyn _DYNAMIC [];
58361 #define elf_phdr elf64_phdr
58362 #define elf_note elf64_note
58363 #define elf_addr_t Elf64_Off
58364+#define elf_dyn Elf64_Dyn
58365
58366 #endif
58367
58368diff -urNp linux-2.6.32.43/include/linux/fscache-cache.h linux-2.6.32.43/include/linux/fscache-cache.h
58369--- linux-2.6.32.43/include/linux/fscache-cache.h 2011-03-27 14:31:47.000000000 -0400
58370+++ linux-2.6.32.43/include/linux/fscache-cache.h 2011-05-04 17:56:28.000000000 -0400
58371@@ -116,7 +116,7 @@ struct fscache_operation {
58372 #endif
58373 };
58374
58375-extern atomic_t fscache_op_debug_id;
58376+extern atomic_unchecked_t fscache_op_debug_id;
58377 extern const struct slow_work_ops fscache_op_slow_work_ops;
58378
58379 extern void fscache_enqueue_operation(struct fscache_operation *);
58380@@ -134,7 +134,7 @@ static inline void fscache_operation_ini
58381 fscache_operation_release_t release)
58382 {
58383 atomic_set(&op->usage, 1);
58384- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
58385+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
58386 op->release = release;
58387 INIT_LIST_HEAD(&op->pend_link);
58388 fscache_set_op_state(op, "Init");
58389diff -urNp linux-2.6.32.43/include/linux/fs.h linux-2.6.32.43/include/linux/fs.h
58390--- linux-2.6.32.43/include/linux/fs.h 2011-07-13 17:23:04.000000000 -0400
58391+++ linux-2.6.32.43/include/linux/fs.h 2011-08-05 20:33:55.000000000 -0400
58392@@ -90,6 +90,11 @@ struct inodes_stat_t {
58393 /* Expect random access pattern */
58394 #define FMODE_RANDOM ((__force fmode_t)4096)
58395
58396+/* Hack for grsec so as not to require read permission simply to execute
58397+ * a binary
58398+ */
58399+#define FMODE_GREXEC ((__force fmode_t)0x2000000)
58400+
58401 /*
58402 * The below are the various read and write types that we support. Some of
58403 * them include behavioral modifiers that send information down to the
58404@@ -568,41 +573,41 @@ typedef int (*read_actor_t)(read_descrip
58405 unsigned long, unsigned long);
58406
58407 struct address_space_operations {
58408- int (*writepage)(struct page *page, struct writeback_control *wbc);
58409- int (*readpage)(struct file *, struct page *);
58410- void (*sync_page)(struct page *);
58411+ int (* const writepage)(struct page *page, struct writeback_control *wbc);
58412+ int (* const readpage)(struct file *, struct page *);
58413+ void (* const sync_page)(struct page *);
58414
58415 /* Write back some dirty pages from this mapping. */
58416- int (*writepages)(struct address_space *, struct writeback_control *);
58417+ int (* const writepages)(struct address_space *, struct writeback_control *);
58418
58419 /* Set a page dirty. Return true if this dirtied it */
58420- int (*set_page_dirty)(struct page *page);
58421+ int (* const set_page_dirty)(struct page *page);
58422
58423- int (*readpages)(struct file *filp, struct address_space *mapping,
58424+ int (* const readpages)(struct file *filp, struct address_space *mapping,
58425 struct list_head *pages, unsigned nr_pages);
58426
58427- int (*write_begin)(struct file *, struct address_space *mapping,
58428+ int (* const write_begin)(struct file *, struct address_space *mapping,
58429 loff_t pos, unsigned len, unsigned flags,
58430 struct page **pagep, void **fsdata);
58431- int (*write_end)(struct file *, struct address_space *mapping,
58432+ int (* const write_end)(struct file *, struct address_space *mapping,
58433 loff_t pos, unsigned len, unsigned copied,
58434 struct page *page, void *fsdata);
58435
58436 /* Unfortunately this kludge is needed for FIBMAP. Don't use it */
58437- sector_t (*bmap)(struct address_space *, sector_t);
58438- void (*invalidatepage) (struct page *, unsigned long);
58439- int (*releasepage) (struct page *, gfp_t);
58440- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov,
58441+ sector_t (* const bmap)(struct address_space *, sector_t);
58442+ void (* const invalidatepage) (struct page *, unsigned long);
58443+ int (* const releasepage) (struct page *, gfp_t);
58444+ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov,
58445 loff_t offset, unsigned long nr_segs);
58446- int (*get_xip_mem)(struct address_space *, pgoff_t, int,
58447+ int (* const get_xip_mem)(struct address_space *, pgoff_t, int,
58448 void **, unsigned long *);
58449 /* migrate the contents of a page to the specified target */
58450- int (*migratepage) (struct address_space *,
58451+ int (* const migratepage) (struct address_space *,
58452 struct page *, struct page *);
58453- int (*launder_page) (struct page *);
58454- int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
58455+ int (* const launder_page) (struct page *);
58456+ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *,
58457 unsigned long);
58458- int (*error_remove_page)(struct address_space *, struct page *);
58459+ int (* const error_remove_page)(struct address_space *, struct page *);
58460 };
58461
58462 /*
58463@@ -1031,19 +1036,19 @@ static inline int file_check_writeable(s
58464 typedef struct files_struct *fl_owner_t;
58465
58466 struct file_lock_operations {
58467- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
58468- void (*fl_release_private)(struct file_lock *);
58469+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58470+ void (* const fl_release_private)(struct file_lock *);
58471 };
58472
58473 struct lock_manager_operations {
58474- int (*fl_compare_owner)(struct file_lock *, struct file_lock *);
58475- void (*fl_notify)(struct file_lock *); /* unblock callback */
58476- int (*fl_grant)(struct file_lock *, struct file_lock *, int);
58477- void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
58478- void (*fl_release_private)(struct file_lock *);
58479- void (*fl_break)(struct file_lock *);
58480- int (*fl_mylease)(struct file_lock *, struct file_lock *);
58481- int (*fl_change)(struct file_lock **, int);
58482+ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *);
58483+ void (* const fl_notify)(struct file_lock *); /* unblock callback */
58484+ int (* const fl_grant)(struct file_lock *, struct file_lock *, int);
58485+ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *);
58486+ void (* const fl_release_private)(struct file_lock *);
58487+ void (* const fl_break)(struct file_lock *);
58488+ int (* const fl_mylease)(struct file_lock *, struct file_lock *);
58489+ int (* const fl_change)(struct file_lock **, int);
58490 };
58491
58492 struct lock_manager {
58493@@ -1442,7 +1447,7 @@ struct fiemap_extent_info {
58494 unsigned int fi_flags; /* Flags as passed from user */
58495 unsigned int fi_extents_mapped; /* Number of mapped extents */
58496 unsigned int fi_extents_max; /* Size of fiemap_extent array */
58497- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent
58498+ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent
58499 * array */
58500 };
58501 int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical,
58502@@ -1486,7 +1491,7 @@ struct block_device_operations;
58503 * can be called without the big kernel lock held in all filesystems.
58504 */
58505 struct file_operations {
58506- struct module *owner;
58507+ struct module * const owner;
58508 loff_t (*llseek) (struct file *, loff_t, int);
58509 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
58510 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
58511@@ -1559,30 +1564,30 @@ extern ssize_t vfs_writev(struct file *,
58512 unsigned long, loff_t *);
58513
58514 struct super_operations {
58515- struct inode *(*alloc_inode)(struct super_block *sb);
58516- void (*destroy_inode)(struct inode *);
58517+ struct inode *(* const alloc_inode)(struct super_block *sb);
58518+ void (* const destroy_inode)(struct inode *);
58519
58520- void (*dirty_inode) (struct inode *);
58521- int (*write_inode) (struct inode *, int);
58522- void (*drop_inode) (struct inode *);
58523- void (*delete_inode) (struct inode *);
58524- void (*put_super) (struct super_block *);
58525- void (*write_super) (struct super_block *);
58526- int (*sync_fs)(struct super_block *sb, int wait);
58527- int (*freeze_fs) (struct super_block *);
58528- int (*unfreeze_fs) (struct super_block *);
58529- int (*statfs) (struct dentry *, struct kstatfs *);
58530- int (*remount_fs) (struct super_block *, int *, char *);
58531- void (*clear_inode) (struct inode *);
58532- void (*umount_begin) (struct super_block *);
58533+ void (* const dirty_inode) (struct inode *);
58534+ int (* const write_inode) (struct inode *, int);
58535+ void (* const drop_inode) (struct inode *);
58536+ void (* const delete_inode) (struct inode *);
58537+ void (* const put_super) (struct super_block *);
58538+ void (* const write_super) (struct super_block *);
58539+ int (* const sync_fs)(struct super_block *sb, int wait);
58540+ int (* const freeze_fs) (struct super_block *);
58541+ int (* const unfreeze_fs) (struct super_block *);
58542+ int (* const statfs) (struct dentry *, struct kstatfs *);
58543+ int (* const remount_fs) (struct super_block *, int *, char *);
58544+ void (* const clear_inode) (struct inode *);
58545+ void (* const umount_begin) (struct super_block *);
58546
58547- int (*show_options)(struct seq_file *, struct vfsmount *);
58548- int (*show_stats)(struct seq_file *, struct vfsmount *);
58549+ int (* const show_options)(struct seq_file *, struct vfsmount *);
58550+ int (* const show_stats)(struct seq_file *, struct vfsmount *);
58551 #ifdef CONFIG_QUOTA
58552- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
58553- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58554+ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t);
58555+ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t);
58556 #endif
58557- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58558+ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
58559 };
58560
58561 /*
58562diff -urNp linux-2.6.32.43/include/linux/fs_struct.h linux-2.6.32.43/include/linux/fs_struct.h
58563--- linux-2.6.32.43/include/linux/fs_struct.h 2011-03-27 14:31:47.000000000 -0400
58564+++ linux-2.6.32.43/include/linux/fs_struct.h 2011-04-17 15:56:46.000000000 -0400
58565@@ -4,7 +4,7 @@
58566 #include <linux/path.h>
58567
58568 struct fs_struct {
58569- int users;
58570+ atomic_t users;
58571 rwlock_t lock;
58572 int umask;
58573 int in_exec;
58574diff -urNp linux-2.6.32.43/include/linux/ftrace_event.h linux-2.6.32.43/include/linux/ftrace_event.h
58575--- linux-2.6.32.43/include/linux/ftrace_event.h 2011-03-27 14:31:47.000000000 -0400
58576+++ linux-2.6.32.43/include/linux/ftrace_event.h 2011-05-04 17:56:28.000000000 -0400
58577@@ -163,7 +163,7 @@ extern int trace_define_field(struct ftr
58578 int filter_type);
58579 extern int trace_define_common_fields(struct ftrace_event_call *call);
58580
58581-#define is_signed_type(type) (((type)(-1)) < 0)
58582+#define is_signed_type(type) (((type)(-1)) < (type)1)
58583
58584 int trace_set_clr_event(const char *system, const char *event, int set);
58585
58586diff -urNp linux-2.6.32.43/include/linux/genhd.h linux-2.6.32.43/include/linux/genhd.h
58587--- linux-2.6.32.43/include/linux/genhd.h 2011-03-27 14:31:47.000000000 -0400
58588+++ linux-2.6.32.43/include/linux/genhd.h 2011-04-17 15:56:46.000000000 -0400
58589@@ -161,7 +161,7 @@ struct gendisk {
58590
58591 struct timer_rand_state *random;
58592
58593- atomic_t sync_io; /* RAID */
58594+ atomic_unchecked_t sync_io; /* RAID */
58595 struct work_struct async_notify;
58596 #ifdef CONFIG_BLK_DEV_INTEGRITY
58597 struct blk_integrity *integrity;
58598diff -urNp linux-2.6.32.43/include/linux/gracl.h linux-2.6.32.43/include/linux/gracl.h
58599--- linux-2.6.32.43/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
58600+++ linux-2.6.32.43/include/linux/gracl.h 2011-04-17 15:56:46.000000000 -0400
58601@@ -0,0 +1,317 @@
58602+#ifndef GR_ACL_H
58603+#define GR_ACL_H
58604+
58605+#include <linux/grdefs.h>
58606+#include <linux/resource.h>
58607+#include <linux/capability.h>
58608+#include <linux/dcache.h>
58609+#include <asm/resource.h>
58610+
58611+/* Major status information */
58612+
58613+#define GR_VERSION "grsecurity 2.2.2"
58614+#define GRSECURITY_VERSION 0x2202
58615+
58616+enum {
58617+ GR_SHUTDOWN = 0,
58618+ GR_ENABLE = 1,
58619+ GR_SPROLE = 2,
58620+ GR_RELOAD = 3,
58621+ GR_SEGVMOD = 4,
58622+ GR_STATUS = 5,
58623+ GR_UNSPROLE = 6,
58624+ GR_PASSSET = 7,
58625+ GR_SPROLEPAM = 8,
58626+};
58627+
58628+/* Password setup definitions
58629+ * kernel/grhash.c */
58630+enum {
58631+ GR_PW_LEN = 128,
58632+ GR_SALT_LEN = 16,
58633+ GR_SHA_LEN = 32,
58634+};
58635+
58636+enum {
58637+ GR_SPROLE_LEN = 64,
58638+};
58639+
58640+enum {
58641+ GR_NO_GLOB = 0,
58642+ GR_REG_GLOB,
58643+ GR_CREATE_GLOB
58644+};
58645+
58646+#define GR_NLIMITS 32
58647+
58648+/* Begin Data Structures */
58649+
58650+struct sprole_pw {
58651+ unsigned char *rolename;
58652+ unsigned char salt[GR_SALT_LEN];
58653+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
58654+};
58655+
58656+struct name_entry {
58657+ __u32 key;
58658+ ino_t inode;
58659+ dev_t device;
58660+ char *name;
58661+ __u16 len;
58662+ __u8 deleted;
58663+ struct name_entry *prev;
58664+ struct name_entry *next;
58665+};
58666+
58667+struct inodev_entry {
58668+ struct name_entry *nentry;
58669+ struct inodev_entry *prev;
58670+ struct inodev_entry *next;
58671+};
58672+
58673+struct acl_role_db {
58674+ struct acl_role_label **r_hash;
58675+ __u32 r_size;
58676+};
58677+
58678+struct inodev_db {
58679+ struct inodev_entry **i_hash;
58680+ __u32 i_size;
58681+};
58682+
58683+struct name_db {
58684+ struct name_entry **n_hash;
58685+ __u32 n_size;
58686+};
58687+
58688+struct crash_uid {
58689+ uid_t uid;
58690+ unsigned long expires;
58691+};
58692+
58693+struct gr_hash_struct {
58694+ void **table;
58695+ void **nametable;
58696+ void *first;
58697+ __u32 table_size;
58698+ __u32 used_size;
58699+ int type;
58700+};
58701+
58702+/* Userspace Grsecurity ACL data structures */
58703+
58704+struct acl_subject_label {
58705+ char *filename;
58706+ ino_t inode;
58707+ dev_t device;
58708+ __u32 mode;
58709+ kernel_cap_t cap_mask;
58710+ kernel_cap_t cap_lower;
58711+ kernel_cap_t cap_invert_audit;
58712+
58713+ struct rlimit res[GR_NLIMITS];
58714+ __u32 resmask;
58715+
58716+ __u8 user_trans_type;
58717+ __u8 group_trans_type;
58718+ uid_t *user_transitions;
58719+ gid_t *group_transitions;
58720+ __u16 user_trans_num;
58721+ __u16 group_trans_num;
58722+
58723+ __u32 sock_families[2];
58724+ __u32 ip_proto[8];
58725+ __u32 ip_type;
58726+ struct acl_ip_label **ips;
58727+ __u32 ip_num;
58728+ __u32 inaddr_any_override;
58729+
58730+ __u32 crashes;
58731+ unsigned long expires;
58732+
58733+ struct acl_subject_label *parent_subject;
58734+ struct gr_hash_struct *hash;
58735+ struct acl_subject_label *prev;
58736+ struct acl_subject_label *next;
58737+
58738+ struct acl_object_label **obj_hash;
58739+ __u32 obj_hash_size;
58740+ __u16 pax_flags;
58741+};
58742+
58743+struct role_allowed_ip {
58744+ __u32 addr;
58745+ __u32 netmask;
58746+
58747+ struct role_allowed_ip *prev;
58748+ struct role_allowed_ip *next;
58749+};
58750+
58751+struct role_transition {
58752+ char *rolename;
58753+
58754+ struct role_transition *prev;
58755+ struct role_transition *next;
58756+};
58757+
58758+struct acl_role_label {
58759+ char *rolename;
58760+ uid_t uidgid;
58761+ __u16 roletype;
58762+
58763+ __u16 auth_attempts;
58764+ unsigned long expires;
58765+
58766+ struct acl_subject_label *root_label;
58767+ struct gr_hash_struct *hash;
58768+
58769+ struct acl_role_label *prev;
58770+ struct acl_role_label *next;
58771+
58772+ struct role_transition *transitions;
58773+ struct role_allowed_ip *allowed_ips;
58774+ uid_t *domain_children;
58775+ __u16 domain_child_num;
58776+
58777+ struct acl_subject_label **subj_hash;
58778+ __u32 subj_hash_size;
58779+};
58780+
58781+struct user_acl_role_db {
58782+ struct acl_role_label **r_table;
58783+ __u32 num_pointers; /* Number of allocations to track */
58784+ __u32 num_roles; /* Number of roles */
58785+ __u32 num_domain_children; /* Number of domain children */
58786+ __u32 num_subjects; /* Number of subjects */
58787+ __u32 num_objects; /* Number of objects */
58788+};
58789+
58790+struct acl_object_label {
58791+ char *filename;
58792+ ino_t inode;
58793+ dev_t device;
58794+ __u32 mode;
58795+
58796+ struct acl_subject_label *nested;
58797+ struct acl_object_label *globbed;
58798+
58799+ /* next two structures not used */
58800+
58801+ struct acl_object_label *prev;
58802+ struct acl_object_label *next;
58803+};
58804+
58805+struct acl_ip_label {
58806+ char *iface;
58807+ __u32 addr;
58808+ __u32 netmask;
58809+ __u16 low, high;
58810+ __u8 mode;
58811+ __u32 type;
58812+ __u32 proto[8];
58813+
58814+ /* next two structures not used */
58815+
58816+ struct acl_ip_label *prev;
58817+ struct acl_ip_label *next;
58818+};
58819+
58820+struct gr_arg {
58821+ struct user_acl_role_db role_db;
58822+ unsigned char pw[GR_PW_LEN];
58823+ unsigned char salt[GR_SALT_LEN];
58824+ unsigned char sum[GR_SHA_LEN];
58825+ unsigned char sp_role[GR_SPROLE_LEN];
58826+ struct sprole_pw *sprole_pws;
58827+ dev_t segv_device;
58828+ ino_t segv_inode;
58829+ uid_t segv_uid;
58830+ __u16 num_sprole_pws;
58831+ __u16 mode;
58832+};
58833+
58834+struct gr_arg_wrapper {
58835+ struct gr_arg *arg;
58836+ __u32 version;
58837+ __u32 size;
58838+};
58839+
58840+struct subject_map {
58841+ struct acl_subject_label *user;
58842+ struct acl_subject_label *kernel;
58843+ struct subject_map *prev;
58844+ struct subject_map *next;
58845+};
58846+
58847+struct acl_subj_map_db {
58848+ struct subject_map **s_hash;
58849+ __u32 s_size;
58850+};
58851+
58852+/* End Data Structures Section */
58853+
58854+/* Hash functions generated by empirical testing by Brad Spengler
58855+ Makes good use of the low bits of the inode. Generally 0-1 times
58856+ in loop for successful match. 0-3 for unsuccessful match.
58857+ Shift/add algorithm with modulus of table size and an XOR*/
58858+
58859+static __inline__ unsigned int
58860+rhash(const uid_t uid, const __u16 type, const unsigned int sz)
58861+{
58862+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
58863+}
58864+
58865+ static __inline__ unsigned int
58866+shash(const struct acl_subject_label *userp, const unsigned int sz)
58867+{
58868+ return ((const unsigned long)userp % sz);
58869+}
58870+
58871+static __inline__ unsigned int
58872+fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
58873+{
58874+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
58875+}
58876+
58877+static __inline__ unsigned int
58878+nhash(const char *name, const __u16 len, const unsigned int sz)
58879+{
58880+ return full_name_hash((const unsigned char *)name, len) % sz;
58881+}
58882+
58883+#define FOR_EACH_ROLE_START(role) \
58884+ role = role_list; \
58885+ while (role) {
58886+
58887+#define FOR_EACH_ROLE_END(role) \
58888+ role = role->prev; \
58889+ }
58890+
58891+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
58892+ subj = NULL; \
58893+ iter = 0; \
58894+ while (iter < role->subj_hash_size) { \
58895+ if (subj == NULL) \
58896+ subj = role->subj_hash[iter]; \
58897+ if (subj == NULL) { \
58898+ iter++; \
58899+ continue; \
58900+ }
58901+
58902+#define FOR_EACH_SUBJECT_END(subj,iter) \
58903+ subj = subj->next; \
58904+ if (subj == NULL) \
58905+ iter++; \
58906+ }
58907+
58908+
58909+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
58910+ subj = role->hash->first; \
58911+ while (subj != NULL) {
58912+
58913+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
58914+ subj = subj->next; \
58915+ }
58916+
58917+#endif
58918+
58919diff -urNp linux-2.6.32.43/include/linux/gralloc.h linux-2.6.32.43/include/linux/gralloc.h
58920--- linux-2.6.32.43/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
58921+++ linux-2.6.32.43/include/linux/gralloc.h 2011-04-17 15:56:46.000000000 -0400
58922@@ -0,0 +1,9 @@
58923+#ifndef __GRALLOC_H
58924+#define __GRALLOC_H
58925+
58926+void acl_free_all(void);
58927+int acl_alloc_stack_init(unsigned long size);
58928+void *acl_alloc(unsigned long len);
58929+void *acl_alloc_num(unsigned long num, unsigned long len);
58930+
58931+#endif
58932diff -urNp linux-2.6.32.43/include/linux/grdefs.h linux-2.6.32.43/include/linux/grdefs.h
58933--- linux-2.6.32.43/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
58934+++ linux-2.6.32.43/include/linux/grdefs.h 2011-06-11 16:20:26.000000000 -0400
58935@@ -0,0 +1,140 @@
58936+#ifndef GRDEFS_H
58937+#define GRDEFS_H
58938+
58939+/* Begin grsecurity status declarations */
58940+
58941+enum {
58942+ GR_READY = 0x01,
58943+ GR_STATUS_INIT = 0x00 // disabled state
58944+};
58945+
58946+/* Begin ACL declarations */
58947+
58948+/* Role flags */
58949+
58950+enum {
58951+ GR_ROLE_USER = 0x0001,
58952+ GR_ROLE_GROUP = 0x0002,
58953+ GR_ROLE_DEFAULT = 0x0004,
58954+ GR_ROLE_SPECIAL = 0x0008,
58955+ GR_ROLE_AUTH = 0x0010,
58956+ GR_ROLE_NOPW = 0x0020,
58957+ GR_ROLE_GOD = 0x0040,
58958+ GR_ROLE_LEARN = 0x0080,
58959+ GR_ROLE_TPE = 0x0100,
58960+ GR_ROLE_DOMAIN = 0x0200,
58961+ GR_ROLE_PAM = 0x0400,
58962+ GR_ROLE_PERSIST = 0x800
58963+};
58964+
58965+/* ACL Subject and Object mode flags */
58966+enum {
58967+ GR_DELETED = 0x80000000
58968+};
58969+
58970+/* ACL Object-only mode flags */
58971+enum {
58972+ GR_READ = 0x00000001,
58973+ GR_APPEND = 0x00000002,
58974+ GR_WRITE = 0x00000004,
58975+ GR_EXEC = 0x00000008,
58976+ GR_FIND = 0x00000010,
58977+ GR_INHERIT = 0x00000020,
58978+ GR_SETID = 0x00000040,
58979+ GR_CREATE = 0x00000080,
58980+ GR_DELETE = 0x00000100,
58981+ GR_LINK = 0x00000200,
58982+ GR_AUDIT_READ = 0x00000400,
58983+ GR_AUDIT_APPEND = 0x00000800,
58984+ GR_AUDIT_WRITE = 0x00001000,
58985+ GR_AUDIT_EXEC = 0x00002000,
58986+ GR_AUDIT_FIND = 0x00004000,
58987+ GR_AUDIT_INHERIT= 0x00008000,
58988+ GR_AUDIT_SETID = 0x00010000,
58989+ GR_AUDIT_CREATE = 0x00020000,
58990+ GR_AUDIT_DELETE = 0x00040000,
58991+ GR_AUDIT_LINK = 0x00080000,
58992+ GR_PTRACERD = 0x00100000,
58993+ GR_NOPTRACE = 0x00200000,
58994+ GR_SUPPRESS = 0x00400000,
58995+ GR_NOLEARN = 0x00800000,
58996+ GR_INIT_TRANSFER= 0x01000000
58997+};
58998+
58999+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
59000+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
59001+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
59002+
59003+/* ACL subject-only mode flags */
59004+enum {
59005+ GR_KILL = 0x00000001,
59006+ GR_VIEW = 0x00000002,
59007+ GR_PROTECTED = 0x00000004,
59008+ GR_LEARN = 0x00000008,
59009+ GR_OVERRIDE = 0x00000010,
59010+ /* just a placeholder, this mode is only used in userspace */
59011+ GR_DUMMY = 0x00000020,
59012+ GR_PROTSHM = 0x00000040,
59013+ GR_KILLPROC = 0x00000080,
59014+ GR_KILLIPPROC = 0x00000100,
59015+ /* just a placeholder, this mode is only used in userspace */
59016+ GR_NOTROJAN = 0x00000200,
59017+ GR_PROTPROCFD = 0x00000400,
59018+ GR_PROCACCT = 0x00000800,
59019+ GR_RELAXPTRACE = 0x00001000,
59020+ GR_NESTED = 0x00002000,
59021+ GR_INHERITLEARN = 0x00004000,
59022+ GR_PROCFIND = 0x00008000,
59023+ GR_POVERRIDE = 0x00010000,
59024+ GR_KERNELAUTH = 0x00020000,
59025+ GR_ATSECURE = 0x00040000,
59026+ GR_SHMEXEC = 0x00080000
59027+};
59028+
59029+enum {
59030+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
59031+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
59032+ GR_PAX_ENABLE_MPROTECT = 0x0004,
59033+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
59034+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
59035+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
59036+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
59037+ GR_PAX_DISABLE_MPROTECT = 0x0400,
59038+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
59039+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
59040+};
59041+
59042+enum {
59043+ GR_ID_USER = 0x01,
59044+ GR_ID_GROUP = 0x02,
59045+};
59046+
59047+enum {
59048+ GR_ID_ALLOW = 0x01,
59049+ GR_ID_DENY = 0x02,
59050+};
59051+
59052+#define GR_CRASH_RES 31
59053+#define GR_UIDTABLE_MAX 500
59054+
59055+/* begin resource learning section */
59056+enum {
59057+ GR_RLIM_CPU_BUMP = 60,
59058+ GR_RLIM_FSIZE_BUMP = 50000,
59059+ GR_RLIM_DATA_BUMP = 10000,
59060+ GR_RLIM_STACK_BUMP = 1000,
59061+ GR_RLIM_CORE_BUMP = 10000,
59062+ GR_RLIM_RSS_BUMP = 500000,
59063+ GR_RLIM_NPROC_BUMP = 1,
59064+ GR_RLIM_NOFILE_BUMP = 5,
59065+ GR_RLIM_MEMLOCK_BUMP = 50000,
59066+ GR_RLIM_AS_BUMP = 500000,
59067+ GR_RLIM_LOCKS_BUMP = 2,
59068+ GR_RLIM_SIGPENDING_BUMP = 5,
59069+ GR_RLIM_MSGQUEUE_BUMP = 10000,
59070+ GR_RLIM_NICE_BUMP = 1,
59071+ GR_RLIM_RTPRIO_BUMP = 1,
59072+ GR_RLIM_RTTIME_BUMP = 1000000
59073+};
59074+
59075+#endif
59076diff -urNp linux-2.6.32.43/include/linux/grinternal.h linux-2.6.32.43/include/linux/grinternal.h
59077--- linux-2.6.32.43/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
59078+++ linux-2.6.32.43/include/linux/grinternal.h 2011-07-14 20:35:29.000000000 -0400
59079@@ -0,0 +1,218 @@
59080+#ifndef __GRINTERNAL_H
59081+#define __GRINTERNAL_H
59082+
59083+#ifdef CONFIG_GRKERNSEC
59084+
59085+#include <linux/fs.h>
59086+#include <linux/mnt_namespace.h>
59087+#include <linux/nsproxy.h>
59088+#include <linux/gracl.h>
59089+#include <linux/grdefs.h>
59090+#include <linux/grmsg.h>
59091+
59092+void gr_add_learn_entry(const char *fmt, ...)
59093+ __attribute__ ((format (printf, 1, 2)));
59094+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
59095+ const struct vfsmount *mnt);
59096+__u32 gr_check_create(const struct dentry *new_dentry,
59097+ const struct dentry *parent,
59098+ const struct vfsmount *mnt, const __u32 mode);
59099+int gr_check_protected_task(const struct task_struct *task);
59100+__u32 to_gr_audit(const __u32 reqmode);
59101+int gr_set_acls(const int type);
59102+int gr_apply_subject_to_task(struct task_struct *task);
59103+int gr_acl_is_enabled(void);
59104+char gr_roletype_to_char(void);
59105+
59106+void gr_handle_alertkill(struct task_struct *task);
59107+char *gr_to_filename(const struct dentry *dentry,
59108+ const struct vfsmount *mnt);
59109+char *gr_to_filename1(const struct dentry *dentry,
59110+ const struct vfsmount *mnt);
59111+char *gr_to_filename2(const struct dentry *dentry,
59112+ const struct vfsmount *mnt);
59113+char *gr_to_filename3(const struct dentry *dentry,
59114+ const struct vfsmount *mnt);
59115+
59116+extern int grsec_enable_harden_ptrace;
59117+extern int grsec_enable_link;
59118+extern int grsec_enable_fifo;
59119+extern int grsec_enable_execve;
59120+extern int grsec_enable_shm;
59121+extern int grsec_enable_execlog;
59122+extern int grsec_enable_signal;
59123+extern int grsec_enable_audit_ptrace;
59124+extern int grsec_enable_forkfail;
59125+extern int grsec_enable_time;
59126+extern int grsec_enable_rofs;
59127+extern int grsec_enable_chroot_shmat;
59128+extern int grsec_enable_chroot_mount;
59129+extern int grsec_enable_chroot_double;
59130+extern int grsec_enable_chroot_pivot;
59131+extern int grsec_enable_chroot_chdir;
59132+extern int grsec_enable_chroot_chmod;
59133+extern int grsec_enable_chroot_mknod;
59134+extern int grsec_enable_chroot_fchdir;
59135+extern int grsec_enable_chroot_nice;
59136+extern int grsec_enable_chroot_execlog;
59137+extern int grsec_enable_chroot_caps;
59138+extern int grsec_enable_chroot_sysctl;
59139+extern int grsec_enable_chroot_unix;
59140+extern int grsec_enable_tpe;
59141+extern int grsec_tpe_gid;
59142+extern int grsec_enable_tpe_all;
59143+extern int grsec_enable_tpe_invert;
59144+extern int grsec_enable_socket_all;
59145+extern int grsec_socket_all_gid;
59146+extern int grsec_enable_socket_client;
59147+extern int grsec_socket_client_gid;
59148+extern int grsec_enable_socket_server;
59149+extern int grsec_socket_server_gid;
59150+extern int grsec_audit_gid;
59151+extern int grsec_enable_group;
59152+extern int grsec_enable_audit_textrel;
59153+extern int grsec_enable_log_rwxmaps;
59154+extern int grsec_enable_mount;
59155+extern int grsec_enable_chdir;
59156+extern int grsec_resource_logging;
59157+extern int grsec_enable_blackhole;
59158+extern int grsec_lastack_retries;
59159+extern int grsec_enable_brute;
59160+extern int grsec_lock;
59161+
59162+extern spinlock_t grsec_alert_lock;
59163+extern unsigned long grsec_alert_wtime;
59164+extern unsigned long grsec_alert_fyet;
59165+
59166+extern spinlock_t grsec_audit_lock;
59167+
59168+extern rwlock_t grsec_exec_file_lock;
59169+
59170+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
59171+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
59172+ (tsk)->exec_file->f_vfsmnt) : "/")
59173+
59174+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
59175+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
59176+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59177+
59178+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
59179+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
59180+ (tsk)->exec_file->f_vfsmnt) : "/")
59181+
59182+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
59183+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
59184+ (tsk)->real_parent->exec_file->f_vfsmnt) : "/")
59185+
59186+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
59187+
59188+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
59189+
59190+#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), (task)->comm, \
59191+ (task)->pid, (cred)->uid, \
59192+ (cred)->euid, (cred)->gid, (cred)->egid, \
59193+ gr_parent_task_fullpath(task), \
59194+ (task)->real_parent->comm, (task)->real_parent->pid, \
59195+ (pcred)->uid, (pcred)->euid, \
59196+ (pcred)->gid, (pcred)->egid
59197+
59198+#define GR_CHROOT_CAPS {{ \
59199+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
59200+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
59201+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
59202+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
59203+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
59204+ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }}
59205+
59206+#define security_learn(normal_msg,args...) \
59207+({ \
59208+ read_lock(&grsec_exec_file_lock); \
59209+ gr_add_learn_entry(normal_msg "\n", ## args); \
59210+ read_unlock(&grsec_exec_file_lock); \
59211+})
59212+
59213+enum {
59214+ GR_DO_AUDIT,
59215+ GR_DONT_AUDIT,
59216+ GR_DONT_AUDIT_GOOD
59217+};
59218+
59219+enum {
59220+ GR_TTYSNIFF,
59221+ GR_RBAC,
59222+ GR_RBAC_STR,
59223+ GR_STR_RBAC,
59224+ GR_RBAC_MODE2,
59225+ GR_RBAC_MODE3,
59226+ GR_FILENAME,
59227+ GR_SYSCTL_HIDDEN,
59228+ GR_NOARGS,
59229+ GR_ONE_INT,
59230+ GR_ONE_INT_TWO_STR,
59231+ GR_ONE_STR,
59232+ GR_STR_INT,
59233+ GR_TWO_STR_INT,
59234+ GR_TWO_INT,
59235+ GR_TWO_U64,
59236+ GR_THREE_INT,
59237+ GR_FIVE_INT_TWO_STR,
59238+ GR_TWO_STR,
59239+ GR_THREE_STR,
59240+ GR_FOUR_STR,
59241+ GR_STR_FILENAME,
59242+ GR_FILENAME_STR,
59243+ GR_FILENAME_TWO_INT,
59244+ GR_FILENAME_TWO_INT_STR,
59245+ GR_TEXTREL,
59246+ GR_PTRACE,
59247+ GR_RESOURCE,
59248+ GR_CAP,
59249+ GR_SIG,
59250+ GR_SIG2,
59251+ GR_CRASH1,
59252+ GR_CRASH2,
59253+ GR_PSACCT,
59254+ GR_RWXMAP
59255+};
59256+
59257+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
59258+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
59259+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
59260+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
59261+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
59262+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
59263+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
59264+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
59265+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
59266+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
59267+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
59268+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
59269+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
59270+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
59271+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
59272+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
59273+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
59274+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
59275+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
59276+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
59277+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
59278+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
59279+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
59280+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
59281+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
59282+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
59283+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
59284+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
59285+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
59286+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
59287+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
59288+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
59289+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
59290+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
59291+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
59292+
59293+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
59294+
59295+#endif
59296+
59297+#endif
59298diff -urNp linux-2.6.32.43/include/linux/grmsg.h linux-2.6.32.43/include/linux/grmsg.h
59299--- linux-2.6.32.43/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
59300+++ linux-2.6.32.43/include/linux/grmsg.h 2011-04-17 15:56:46.000000000 -0400
59301@@ -0,0 +1,108 @@
59302+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
59303+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
59304+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
59305+#define GR_STOPMOD_MSG "denied modification of module state by "
59306+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
59307+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
59308+#define GR_IOPERM_MSG "denied use of ioperm() by "
59309+#define GR_IOPL_MSG "denied use of iopl() by "
59310+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
59311+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
59312+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
59313+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
59314+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
59315+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
59316+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
59317+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
59318+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
59319+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
59320+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
59321+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
59322+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
59323+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
59324+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
59325+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
59326+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
59327+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
59328+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
59329+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
59330+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
59331+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
59332+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
59333+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
59334+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
59335+#define GR_NPROC_MSG "denied overstep of process limit by "
59336+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
59337+#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by "
59338+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
59339+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
59340+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
59341+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
59342+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
59343+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
59344+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
59345+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
59346+#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by "
59347+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
59348+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
59349+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
59350+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
59351+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
59352+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
59353+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
59354+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
59355+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by "
59356+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
59357+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
59358+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
59359+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
59360+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
59361+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
59362+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
59363+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
59364+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
59365+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
59366+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
59367+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
59368+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
59369+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
59370+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
59371+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
59372+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
59373+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
59374+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
59375+#define GR_FAILFORK_MSG "failed fork with errno %s by "
59376+#define GR_NICE_CHROOT_MSG "denied priority change by "
59377+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
59378+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
59379+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
59380+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
59381+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
59382+#define GR_TIME_MSG "time set by "
59383+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
59384+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
59385+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
59386+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
59387+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
59388+#define GR_BIND_MSG "denied bind() by "
59389+#define GR_CONNECT_MSG "denied connect() by "
59390+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
59391+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
59392+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
59393+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
59394+#define GR_CAP_ACL_MSG "use of %s denied for "
59395+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
59396+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
59397+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
59398+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
59399+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
59400+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
59401+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
59402+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
59403+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
59404+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
59405+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
59406+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
59407+#define GR_VM86_MSG "denied use of vm86 by "
59408+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
59409+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
59410diff -urNp linux-2.6.32.43/include/linux/grsecurity.h linux-2.6.32.43/include/linux/grsecurity.h
59411--- linux-2.6.32.43/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
59412+++ linux-2.6.32.43/include/linux/grsecurity.h 2011-08-05 19:53:46.000000000 -0400
59413@@ -0,0 +1,218 @@
59414+#ifndef GR_SECURITY_H
59415+#define GR_SECURITY_H
59416+#include <linux/fs.h>
59417+#include <linux/fs_struct.h>
59418+#include <linux/binfmts.h>
59419+#include <linux/gracl.h>
59420+#include <linux/compat.h>
59421+
59422+/* notify of brain-dead configs */
59423+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
59424+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
59425+#endif
59426+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
59427+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
59428+#endif
59429+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59430+#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59431+#endif
59432+#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS)
59433+#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled."
59434+#endif
59435+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
59436+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
59437+#endif
59438+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
59439+#error "CONFIG_PAX enabled, but no PaX options are enabled."
59440+#endif
59441+
59442+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags);
59443+void gr_handle_brute_check(void);
59444+void gr_handle_kernel_exploit(void);
59445+int gr_process_user_ban(void);
59446+
59447+char gr_roletype_to_char(void);
59448+
59449+int gr_acl_enable_at_secure(void);
59450+
59451+int gr_check_user_change(int real, int effective, int fs);
59452+int gr_check_group_change(int real, int effective, int fs);
59453+
59454+void gr_del_task_from_ip_table(struct task_struct *p);
59455+
59456+int gr_pid_is_chrooted(struct task_struct *p);
59457+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
59458+int gr_handle_chroot_nice(void);
59459+int gr_handle_chroot_sysctl(const int op);
59460+int gr_handle_chroot_setpriority(struct task_struct *p,
59461+ const int niceval);
59462+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
59463+int gr_handle_chroot_chroot(const struct dentry *dentry,
59464+ const struct vfsmount *mnt);
59465+int gr_handle_chroot_caps(struct path *path);
59466+void gr_handle_chroot_chdir(struct path *path);
59467+int gr_handle_chroot_chmod(const struct dentry *dentry,
59468+ const struct vfsmount *mnt, const int mode);
59469+int gr_handle_chroot_mknod(const struct dentry *dentry,
59470+ const struct vfsmount *mnt, const int mode);
59471+int gr_handle_chroot_mount(const struct dentry *dentry,
59472+ const struct vfsmount *mnt,
59473+ const char *dev_name);
59474+int gr_handle_chroot_pivot(void);
59475+int gr_handle_chroot_unix(const pid_t pid);
59476+
59477+int gr_handle_rawio(const struct inode *inode);
59478+int gr_handle_nproc(void);
59479+
59480+void gr_handle_ioperm(void);
59481+void gr_handle_iopl(void);
59482+
59483+int gr_tpe_allow(const struct file *file);
59484+
59485+void gr_set_chroot_entries(struct task_struct *task, struct path *path);
59486+void gr_clear_chroot_entries(struct task_struct *task);
59487+
59488+void gr_log_forkfail(const int retval);
59489+void gr_log_timechange(void);
59490+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
59491+void gr_log_chdir(const struct dentry *dentry,
59492+ const struct vfsmount *mnt);
59493+void gr_log_chroot_exec(const struct dentry *dentry,
59494+ const struct vfsmount *mnt);
59495+void gr_handle_exec_args(struct linux_binprm *bprm, const char __user *const __user *argv);
59496+#ifdef CONFIG_COMPAT
59497+void gr_handle_exec_args_compat(struct linux_binprm *bprm, compat_uptr_t __user *argv);
59498+#endif
59499+void gr_log_remount(const char *devname, const int retval);
59500+void gr_log_unmount(const char *devname, const int retval);
59501+void gr_log_mount(const char *from, const char *to, const int retval);
59502+void gr_log_textrel(struct vm_area_struct *vma);
59503+void gr_log_rwxmmap(struct file *file);
59504+void gr_log_rwxmprotect(struct file *file);
59505+
59506+int gr_handle_follow_link(const struct inode *parent,
59507+ const struct inode *inode,
59508+ const struct dentry *dentry,
59509+ const struct vfsmount *mnt);
59510+int gr_handle_fifo(const struct dentry *dentry,
59511+ const struct vfsmount *mnt,
59512+ const struct dentry *dir, const int flag,
59513+ const int acc_mode);
59514+int gr_handle_hardlink(const struct dentry *dentry,
59515+ const struct vfsmount *mnt,
59516+ struct inode *inode,
59517+ const int mode, const char *to);
59518+
59519+int gr_is_capable(const int cap);
59520+int gr_is_capable_nolog(const int cap);
59521+void gr_learn_resource(const struct task_struct *task, const int limit,
59522+ const unsigned long wanted, const int gt);
59523+void gr_copy_label(struct task_struct *tsk);
59524+void gr_handle_crash(struct task_struct *task, const int sig);
59525+int gr_handle_signal(const struct task_struct *p, const int sig);
59526+int gr_check_crash_uid(const uid_t uid);
59527+int gr_check_protected_task(const struct task_struct *task);
59528+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
59529+int gr_acl_handle_mmap(const struct file *file,
59530+ const unsigned long prot);
59531+int gr_acl_handle_mprotect(const struct file *file,
59532+ const unsigned long prot);
59533+int gr_check_hidden_task(const struct task_struct *tsk);
59534+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
59535+ const struct vfsmount *mnt);
59536+__u32 gr_acl_handle_utime(const struct dentry *dentry,
59537+ const struct vfsmount *mnt);
59538+__u32 gr_acl_handle_access(const struct dentry *dentry,
59539+ const struct vfsmount *mnt, const int fmode);
59540+__u32 gr_acl_handle_fchmod(const struct dentry *dentry,
59541+ const struct vfsmount *mnt, mode_t mode);
59542+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
59543+ const struct vfsmount *mnt, mode_t mode);
59544+__u32 gr_acl_handle_chown(const struct dentry *dentry,
59545+ const struct vfsmount *mnt);
59546+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
59547+ const struct vfsmount *mnt);
59548+int gr_handle_ptrace(struct task_struct *task, const long request);
59549+int gr_handle_proc_ptrace(struct task_struct *task);
59550+__u32 gr_acl_handle_execve(const struct dentry *dentry,
59551+ const struct vfsmount *mnt);
59552+int gr_check_crash_exec(const struct file *filp);
59553+int gr_acl_is_enabled(void);
59554+void gr_set_kernel_label(struct task_struct *task);
59555+void gr_set_role_label(struct task_struct *task, const uid_t uid,
59556+ const gid_t gid);
59557+int gr_set_proc_label(const struct dentry *dentry,
59558+ const struct vfsmount *mnt,
59559+ const int unsafe_share);
59560+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
59561+ const struct vfsmount *mnt);
59562+__u32 gr_acl_handle_open(const struct dentry *dentry,
59563+ const struct vfsmount *mnt, const int fmode);
59564+__u32 gr_acl_handle_creat(const struct dentry *dentry,
59565+ const struct dentry *p_dentry,
59566+ const struct vfsmount *p_mnt, const int fmode,
59567+ const int imode);
59568+void gr_handle_create(const struct dentry *dentry,
59569+ const struct vfsmount *mnt);
59570+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
59571+ const struct dentry *parent_dentry,
59572+ const struct vfsmount *parent_mnt,
59573+ const int mode);
59574+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
59575+ const struct dentry *parent_dentry,
59576+ const struct vfsmount *parent_mnt);
59577+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
59578+ const struct vfsmount *mnt);
59579+void gr_handle_delete(const ino_t ino, const dev_t dev);
59580+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
59581+ const struct vfsmount *mnt);
59582+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
59583+ const struct dentry *parent_dentry,
59584+ const struct vfsmount *parent_mnt,
59585+ const char *from);
59586+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
59587+ const struct dentry *parent_dentry,
59588+ const struct vfsmount *parent_mnt,
59589+ const struct dentry *old_dentry,
59590+ const struct vfsmount *old_mnt, const char *to);
59591+int gr_acl_handle_rename(struct dentry *new_dentry,
59592+ struct dentry *parent_dentry,
59593+ const struct vfsmount *parent_mnt,
59594+ struct dentry *old_dentry,
59595+ struct inode *old_parent_inode,
59596+ struct vfsmount *old_mnt, const char *newname);
59597+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
59598+ struct dentry *old_dentry,
59599+ struct dentry *new_dentry,
59600+ struct vfsmount *mnt, const __u8 replace);
59601+__u32 gr_check_link(const struct dentry *new_dentry,
59602+ const struct dentry *parent_dentry,
59603+ const struct vfsmount *parent_mnt,
59604+ const struct dentry *old_dentry,
59605+ const struct vfsmount *old_mnt);
59606+int gr_acl_handle_filldir(const struct file *file, const char *name,
59607+ const unsigned int namelen, const ino_t ino);
59608+
59609+__u32 gr_acl_handle_unix(const struct dentry *dentry,
59610+ const struct vfsmount *mnt);
59611+void gr_acl_handle_exit(void);
59612+void gr_acl_handle_psacct(struct task_struct *task, const long code);
59613+int gr_acl_handle_procpidmem(const struct task_struct *task);
59614+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
59615+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
59616+void gr_audit_ptrace(struct task_struct *task);
59617+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
59618+
59619+#ifdef CONFIG_GRKERNSEC
59620+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
59621+void gr_handle_vm86(void);
59622+void gr_handle_mem_readwrite(u64 from, u64 to);
59623+
59624+extern int grsec_enable_dmesg;
59625+extern int grsec_disable_privio;
59626+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
59627+extern int grsec_enable_chroot_findtask;
59628+#endif
59629+#endif
59630+
59631+#endif
59632diff -urNp linux-2.6.32.43/include/linux/hdpu_features.h linux-2.6.32.43/include/linux/hdpu_features.h
59633--- linux-2.6.32.43/include/linux/hdpu_features.h 2011-03-27 14:31:47.000000000 -0400
59634+++ linux-2.6.32.43/include/linux/hdpu_features.h 2011-04-17 15:56:46.000000000 -0400
59635@@ -3,7 +3,7 @@
59636 struct cpustate_t {
59637 spinlock_t lock;
59638 int excl;
59639- int open_count;
59640+ atomic_t open_count;
59641 unsigned char cached_val;
59642 int inited;
59643 unsigned long *set_addr;
59644diff -urNp linux-2.6.32.43/include/linux/highmem.h linux-2.6.32.43/include/linux/highmem.h
59645--- linux-2.6.32.43/include/linux/highmem.h 2011-03-27 14:31:47.000000000 -0400
59646+++ linux-2.6.32.43/include/linux/highmem.h 2011-04-17 15:56:46.000000000 -0400
59647@@ -137,6 +137,18 @@ static inline void clear_highpage(struct
59648 kunmap_atomic(kaddr, KM_USER0);
59649 }
59650
59651+static inline void sanitize_highpage(struct page *page)
59652+{
59653+ void *kaddr;
59654+ unsigned long flags;
59655+
59656+ local_irq_save(flags);
59657+ kaddr = kmap_atomic(page, KM_CLEARPAGE);
59658+ clear_page(kaddr);
59659+ kunmap_atomic(kaddr, KM_CLEARPAGE);
59660+ local_irq_restore(flags);
59661+}
59662+
59663 static inline void zero_user_segments(struct page *page,
59664 unsigned start1, unsigned end1,
59665 unsigned start2, unsigned end2)
59666diff -urNp linux-2.6.32.43/include/linux/i2o.h linux-2.6.32.43/include/linux/i2o.h
59667--- linux-2.6.32.43/include/linux/i2o.h 2011-03-27 14:31:47.000000000 -0400
59668+++ linux-2.6.32.43/include/linux/i2o.h 2011-05-04 17:56:28.000000000 -0400
59669@@ -564,7 +564,7 @@ struct i2o_controller {
59670 struct i2o_device *exec; /* Executive */
59671 #if BITS_PER_LONG == 64
59672 spinlock_t context_list_lock; /* lock for context_list */
59673- atomic_t context_list_counter; /* needed for unique contexts */
59674+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
59675 struct list_head context_list; /* list of context id's
59676 and pointers */
59677 #endif
59678diff -urNp linux-2.6.32.43/include/linux/init_task.h linux-2.6.32.43/include/linux/init_task.h
59679--- linux-2.6.32.43/include/linux/init_task.h 2011-03-27 14:31:47.000000000 -0400
59680+++ linux-2.6.32.43/include/linux/init_task.h 2011-05-18 20:44:59.000000000 -0400
59681@@ -83,6 +83,12 @@ extern struct group_info init_groups;
59682 #define INIT_IDS
59683 #endif
59684
59685+#ifdef CONFIG_X86
59686+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
59687+#else
59688+#define INIT_TASK_THREAD_INFO
59689+#endif
59690+
59691 #ifdef CONFIG_SECURITY_FILE_CAPABILITIES
59692 /*
59693 * Because of the reduced scope of CAP_SETPCAP when filesystem
59694@@ -156,6 +162,7 @@ extern struct cred init_cred;
59695 __MUTEX_INITIALIZER(tsk.cred_guard_mutex), \
59696 .comm = "swapper", \
59697 .thread = INIT_THREAD, \
59698+ INIT_TASK_THREAD_INFO \
59699 .fs = &init_fs, \
59700 .files = &init_files, \
59701 .signal = &init_signals, \
59702diff -urNp linux-2.6.32.43/include/linux/intel-iommu.h linux-2.6.32.43/include/linux/intel-iommu.h
59703--- linux-2.6.32.43/include/linux/intel-iommu.h 2011-03-27 14:31:47.000000000 -0400
59704+++ linux-2.6.32.43/include/linux/intel-iommu.h 2011-08-05 20:33:55.000000000 -0400
59705@@ -296,7 +296,7 @@ struct iommu_flush {
59706 u8 fm, u64 type);
59707 void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
59708 unsigned int size_order, u64 type);
59709-};
59710+} __no_const;
59711
59712 enum {
59713 SR_DMAR_FECTL_REG,
59714diff -urNp linux-2.6.32.43/include/linux/interrupt.h linux-2.6.32.43/include/linux/interrupt.h
59715--- linux-2.6.32.43/include/linux/interrupt.h 2011-06-25 12:55:35.000000000 -0400
59716+++ linux-2.6.32.43/include/linux/interrupt.h 2011-06-25 12:56:37.000000000 -0400
59717@@ -363,7 +363,7 @@ enum
59718 /* map softirq index to softirq name. update 'softirq_to_name' in
59719 * kernel/softirq.c when adding a new softirq.
59720 */
59721-extern char *softirq_to_name[NR_SOFTIRQS];
59722+extern const char * const softirq_to_name[NR_SOFTIRQS];
59723
59724 /* softirq mask and active fields moved to irq_cpustat_t in
59725 * asm/hardirq.h to get better cache usage. KAO
59726@@ -371,12 +371,12 @@ extern char *softirq_to_name[NR_SOFTIRQS
59727
59728 struct softirq_action
59729 {
59730- void (*action)(struct softirq_action *);
59731+ void (*action)(void);
59732 };
59733
59734 asmlinkage void do_softirq(void);
59735 asmlinkage void __do_softirq(void);
59736-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
59737+extern void open_softirq(int nr, void (*action)(void));
59738 extern void softirq_init(void);
59739 #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0)
59740 extern void raise_softirq_irqoff(unsigned int nr);
59741diff -urNp linux-2.6.32.43/include/linux/irq.h linux-2.6.32.43/include/linux/irq.h
59742--- linux-2.6.32.43/include/linux/irq.h 2011-03-27 14:31:47.000000000 -0400
59743+++ linux-2.6.32.43/include/linux/irq.h 2011-04-17 15:56:46.000000000 -0400
59744@@ -438,12 +438,12 @@ extern int set_irq_msi(unsigned int irq,
59745 static inline bool alloc_desc_masks(struct irq_desc *desc, int node,
59746 bool boot)
59747 {
59748+#ifdef CONFIG_CPUMASK_OFFSTACK
59749 gfp_t gfp = GFP_ATOMIC;
59750
59751 if (boot)
59752 gfp = GFP_NOWAIT;
59753
59754-#ifdef CONFIG_CPUMASK_OFFSTACK
59755 if (!alloc_cpumask_var_node(&desc->affinity, gfp, node))
59756 return false;
59757
59758diff -urNp linux-2.6.32.43/include/linux/kallsyms.h linux-2.6.32.43/include/linux/kallsyms.h
59759--- linux-2.6.32.43/include/linux/kallsyms.h 2011-03-27 14:31:47.000000000 -0400
59760+++ linux-2.6.32.43/include/linux/kallsyms.h 2011-04-17 15:56:46.000000000 -0400
59761@@ -15,7 +15,8 @@
59762
59763 struct module;
59764
59765-#ifdef CONFIG_KALLSYMS
59766+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
59767+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
59768 /* Lookup the address for a symbol. Returns 0 if not found. */
59769 unsigned long kallsyms_lookup_name(const char *name);
59770
59771@@ -92,6 +93,15 @@ static inline int lookup_symbol_attrs(un
59772 /* Stupid that this does nothing, but I didn't create this mess. */
59773 #define __print_symbol(fmt, addr)
59774 #endif /*CONFIG_KALLSYMS*/
59775+#else /* when included by kallsyms.c, vsnprintf.c, or
59776+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
59777+extern void __print_symbol(const char *fmt, unsigned long address);
59778+extern int sprint_symbol(char *buffer, unsigned long address);
59779+const char *kallsyms_lookup(unsigned long addr,
59780+ unsigned long *symbolsize,
59781+ unsigned long *offset,
59782+ char **modname, char *namebuf);
59783+#endif
59784
59785 /* This macro allows us to keep printk typechecking */
59786 static void __check_printsym_format(const char *fmt, ...)
59787diff -urNp linux-2.6.32.43/include/linux/kgdb.h linux-2.6.32.43/include/linux/kgdb.h
59788--- linux-2.6.32.43/include/linux/kgdb.h 2011-03-27 14:31:47.000000000 -0400
59789+++ linux-2.6.32.43/include/linux/kgdb.h 2011-08-05 20:33:55.000000000 -0400
59790@@ -74,8 +74,8 @@ void kgdb_breakpoint(void);
59791
59792 extern int kgdb_connected;
59793
59794-extern atomic_t kgdb_setting_breakpoint;
59795-extern atomic_t kgdb_cpu_doing_single_step;
59796+extern atomic_unchecked_t kgdb_setting_breakpoint;
59797+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
59798
59799 extern struct task_struct *kgdb_usethread;
59800 extern struct task_struct *kgdb_contthread;
59801@@ -226,8 +226,8 @@ extern int kgdb_arch_remove_breakpoint(u
59802 * hardware debug registers.
59803 */
59804 struct kgdb_arch {
59805- unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59806- unsigned long flags;
59807+ const unsigned char gdb_bpt_instr[BREAK_INSTR_SIZE];
59808+ const unsigned long flags;
59809
59810 int (*set_breakpoint)(unsigned long, char *);
59811 int (*remove_breakpoint)(unsigned long, char *);
59812@@ -251,20 +251,20 @@ struct kgdb_arch {
59813 */
59814 struct kgdb_io {
59815 const char *name;
59816- int (*read_char) (void);
59817- void (*write_char) (u8);
59818- void (*flush) (void);
59819- int (*init) (void);
59820- void (*pre_exception) (void);
59821- void (*post_exception) (void);
59822+ int (* const read_char) (void);
59823+ void (* const write_char) (u8);
59824+ void (* const flush) (void);
59825+ int (* const init) (void);
59826+ void (* const pre_exception) (void);
59827+ void (* const post_exception) (void);
59828 };
59829
59830-extern struct kgdb_arch arch_kgdb_ops;
59831+extern const struct kgdb_arch arch_kgdb_ops;
59832
59833 extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs);
59834
59835-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops);
59836-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops);
59837+extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops);
59838+extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops);
59839
59840 extern int kgdb_hex2long(char **ptr, unsigned long *long_val);
59841 extern int kgdb_mem2hex(char *mem, char *buf, int count);
59842diff -urNp linux-2.6.32.43/include/linux/kmod.h linux-2.6.32.43/include/linux/kmod.h
59843--- linux-2.6.32.43/include/linux/kmod.h 2011-03-27 14:31:47.000000000 -0400
59844+++ linux-2.6.32.43/include/linux/kmod.h 2011-04-17 15:56:46.000000000 -0400
59845@@ -31,6 +31,8 @@
59846 * usually useless though. */
59847 extern int __request_module(bool wait, const char *name, ...) \
59848 __attribute__((format(printf, 2, 3)));
59849+extern int ___request_module(bool wait, char *param_name, const char *name, ...) \
59850+ __attribute__((format(printf, 3, 4)));
59851 #define request_module(mod...) __request_module(true, mod)
59852 #define request_module_nowait(mod...) __request_module(false, mod)
59853 #define try_then_request_module(x, mod...) \
59854diff -urNp linux-2.6.32.43/include/linux/kobject.h linux-2.6.32.43/include/linux/kobject.h
59855--- linux-2.6.32.43/include/linux/kobject.h 2011-03-27 14:31:47.000000000 -0400
59856+++ linux-2.6.32.43/include/linux/kobject.h 2011-04-17 15:56:46.000000000 -0400
59857@@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kob
59858
59859 struct kobj_type {
59860 void (*release)(struct kobject *kobj);
59861- struct sysfs_ops *sysfs_ops;
59862+ const struct sysfs_ops *sysfs_ops;
59863 struct attribute **default_attrs;
59864 };
59865
59866@@ -118,9 +118,9 @@ struct kobj_uevent_env {
59867 };
59868
59869 struct kset_uevent_ops {
59870- int (*filter)(struct kset *kset, struct kobject *kobj);
59871- const char *(*name)(struct kset *kset, struct kobject *kobj);
59872- int (*uevent)(struct kset *kset, struct kobject *kobj,
59873+ int (* const filter)(struct kset *kset, struct kobject *kobj);
59874+ const char *(* const name)(struct kset *kset, struct kobject *kobj);
59875+ int (* const uevent)(struct kset *kset, struct kobject *kobj,
59876 struct kobj_uevent_env *env);
59877 };
59878
59879@@ -132,7 +132,7 @@ struct kobj_attribute {
59880 const char *buf, size_t count);
59881 };
59882
59883-extern struct sysfs_ops kobj_sysfs_ops;
59884+extern const struct sysfs_ops kobj_sysfs_ops;
59885
59886 /**
59887 * struct kset - a set of kobjects of a specific type, belonging to a specific subsystem.
59888@@ -155,14 +155,14 @@ struct kset {
59889 struct list_head list;
59890 spinlock_t list_lock;
59891 struct kobject kobj;
59892- struct kset_uevent_ops *uevent_ops;
59893+ const struct kset_uevent_ops *uevent_ops;
59894 };
59895
59896 extern void kset_init(struct kset *kset);
59897 extern int __must_check kset_register(struct kset *kset);
59898 extern void kset_unregister(struct kset *kset);
59899 extern struct kset * __must_check kset_create_and_add(const char *name,
59900- struct kset_uevent_ops *u,
59901+ const struct kset_uevent_ops *u,
59902 struct kobject *parent_kobj);
59903
59904 static inline struct kset *to_kset(struct kobject *kobj)
59905diff -urNp linux-2.6.32.43/include/linux/kvm_host.h linux-2.6.32.43/include/linux/kvm_host.h
59906--- linux-2.6.32.43/include/linux/kvm_host.h 2011-03-27 14:31:47.000000000 -0400
59907+++ linux-2.6.32.43/include/linux/kvm_host.h 2011-04-17 15:56:46.000000000 -0400
59908@@ -210,7 +210,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc
59909 void vcpu_load(struct kvm_vcpu *vcpu);
59910 void vcpu_put(struct kvm_vcpu *vcpu);
59911
59912-int kvm_init(void *opaque, unsigned int vcpu_size,
59913+int kvm_init(const void *opaque, unsigned int vcpu_size,
59914 struct module *module);
59915 void kvm_exit(void);
59916
59917@@ -316,7 +316,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(
59918 struct kvm_guest_debug *dbg);
59919 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
59920
59921-int kvm_arch_init(void *opaque);
59922+int kvm_arch_init(const void *opaque);
59923 void kvm_arch_exit(void);
59924
59925 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
59926diff -urNp linux-2.6.32.43/include/linux/libata.h linux-2.6.32.43/include/linux/libata.h
59927--- linux-2.6.32.43/include/linux/libata.h 2011-03-27 14:31:47.000000000 -0400
59928+++ linux-2.6.32.43/include/linux/libata.h 2011-08-05 20:33:55.000000000 -0400
59929@@ -525,11 +525,11 @@ struct ata_ioports {
59930
59931 struct ata_host {
59932 spinlock_t lock;
59933- struct device *dev;
59934+ struct device *dev;
59935 void __iomem * const *iomap;
59936 unsigned int n_ports;
59937 void *private_data;
59938- struct ata_port_operations *ops;
59939+ const struct ata_port_operations *ops;
59940 unsigned long flags;
59941 #ifdef CONFIG_ATA_ACPI
59942 acpi_handle acpi_handle;
59943@@ -710,7 +710,7 @@ struct ata_link {
59944
59945 struct ata_port {
59946 struct Scsi_Host *scsi_host; /* our co-allocated scsi host */
59947- struct ata_port_operations *ops;
59948+ const struct ata_port_operations *ops;
59949 spinlock_t *lock;
59950 /* Flags owned by the EH context. Only EH should touch these once the
59951 port is active */
59952@@ -883,7 +883,7 @@ struct ata_port_operations {
59953 * ->inherits must be the last field and all the preceding
59954 * fields must be pointers.
59955 */
59956- const struct ata_port_operations *inherits;
59957+ const struct ata_port_operations * const inherits;
59958 };
59959
59960 struct ata_port_info {
59961@@ -892,7 +892,7 @@ struct ata_port_info {
59962 unsigned long pio_mask;
59963 unsigned long mwdma_mask;
59964 unsigned long udma_mask;
59965- struct ata_port_operations *port_ops;
59966+ const struct ata_port_operations *port_ops;
59967 void *private_data;
59968 };
59969
59970@@ -916,7 +916,7 @@ extern const unsigned long sata_deb_timi
59971 extern const unsigned long sata_deb_timing_hotplug[];
59972 extern const unsigned long sata_deb_timing_long[];
59973
59974-extern struct ata_port_operations ata_dummy_port_ops;
59975+extern const struct ata_port_operations ata_dummy_port_ops;
59976 extern const struct ata_port_info ata_dummy_port_info;
59977
59978 static inline const unsigned long *
59979@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_
59980 struct scsi_host_template *sht);
59981 extern void ata_host_detach(struct ata_host *host);
59982 extern void ata_host_init(struct ata_host *, struct device *,
59983- unsigned long, struct ata_port_operations *);
59984+ unsigned long, const struct ata_port_operations *);
59985 extern int ata_scsi_detect(struct scsi_host_template *sht);
59986 extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg);
59987 extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *));
59988diff -urNp linux-2.6.32.43/include/linux/lockd/bind.h linux-2.6.32.43/include/linux/lockd/bind.h
59989--- linux-2.6.32.43/include/linux/lockd/bind.h 2011-03-27 14:31:47.000000000 -0400
59990+++ linux-2.6.32.43/include/linux/lockd/bind.h 2011-04-17 15:56:46.000000000 -0400
59991@@ -23,13 +23,13 @@ struct svc_rqst;
59992 * This is the set of functions for lockd->nfsd communication
59993 */
59994 struct nlmsvc_binding {
59995- __be32 (*fopen)(struct svc_rqst *,
59996+ __be32 (* const fopen)(struct svc_rqst *,
59997 struct nfs_fh *,
59998 struct file **);
59999- void (*fclose)(struct file *);
60000+ void (* const fclose)(struct file *);
60001 };
60002
60003-extern struct nlmsvc_binding * nlmsvc_ops;
60004+extern const struct nlmsvc_binding * nlmsvc_ops;
60005
60006 /*
60007 * Similar to nfs_client_initdata, but without the NFS-specific
60008diff -urNp linux-2.6.32.43/include/linux/mca.h linux-2.6.32.43/include/linux/mca.h
60009--- linux-2.6.32.43/include/linux/mca.h 2011-03-27 14:31:47.000000000 -0400
60010+++ linux-2.6.32.43/include/linux/mca.h 2011-08-05 20:33:55.000000000 -0400
60011@@ -80,7 +80,7 @@ struct mca_bus_accessor_functions {
60012 int region);
60013 void * (*mca_transform_memory)(struct mca_device *,
60014 void *memory);
60015-};
60016+} __no_const;
60017
60018 struct mca_bus {
60019 u64 default_dma_mask;
60020diff -urNp linux-2.6.32.43/include/linux/memory.h linux-2.6.32.43/include/linux/memory.h
60021--- linux-2.6.32.43/include/linux/memory.h 2011-03-27 14:31:47.000000000 -0400
60022+++ linux-2.6.32.43/include/linux/memory.h 2011-08-05 20:33:55.000000000 -0400
60023@@ -108,7 +108,7 @@ struct memory_accessor {
60024 size_t count);
60025 ssize_t (*write)(struct memory_accessor *, const char *buf,
60026 off_t offset, size_t count);
60027-};
60028+} __no_const;
60029
60030 /*
60031 * Kernel text modification mutex, used for code patching. Users of this lock
60032diff -urNp linux-2.6.32.43/include/linux/mm.h linux-2.6.32.43/include/linux/mm.h
60033--- linux-2.6.32.43/include/linux/mm.h 2011-03-27 14:31:47.000000000 -0400
60034+++ linux-2.6.32.43/include/linux/mm.h 2011-04-17 15:56:46.000000000 -0400
60035@@ -106,7 +106,14 @@ extern unsigned int kobjsize(const void
60036
60037 #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */
60038 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
60039+
60040+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
60041+#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */
60042+#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */
60043+#else
60044 #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */
60045+#endif
60046+
60047 #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */
60048 #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */
60049
60050@@ -841,12 +848,6 @@ int set_page_dirty(struct page *page);
60051 int set_page_dirty_lock(struct page *page);
60052 int clear_page_dirty_for_io(struct page *page);
60053
60054-/* Is the vma a continuation of the stack vma above it? */
60055-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr)
60056-{
60057- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
60058-}
60059-
60060 extern unsigned long move_page_tables(struct vm_area_struct *vma,
60061 unsigned long old_addr, struct vm_area_struct *new_vma,
60062 unsigned long new_addr, unsigned long len);
60063@@ -890,6 +891,8 @@ struct shrinker {
60064 extern void register_shrinker(struct shrinker *);
60065 extern void unregister_shrinker(struct shrinker *);
60066
60067+pgprot_t vm_get_page_prot(unsigned long vm_flags);
60068+
60069 int vma_wants_writenotify(struct vm_area_struct *vma);
60070
60071 extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl);
60072@@ -1162,6 +1165,7 @@ out:
60073 }
60074
60075 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
60076+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
60077
60078 extern unsigned long do_brk(unsigned long, unsigned long);
60079
60080@@ -1218,6 +1222,10 @@ extern struct vm_area_struct * find_vma(
60081 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
60082 struct vm_area_struct **pprev);
60083
60084+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
60085+extern void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
60086+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
60087+
60088 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
60089 NULL if none. Assume start_addr < end_addr. */
60090 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
60091@@ -1234,7 +1242,6 @@ static inline unsigned long vma_pages(st
60092 return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
60093 }
60094
60095-pgprot_t vm_get_page_prot(unsigned long vm_flags);
60096 struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
60097 int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
60098 unsigned long pfn, unsigned long size, pgprot_t);
60099@@ -1332,7 +1339,13 @@ extern void memory_failure(unsigned long
60100 extern int __memory_failure(unsigned long pfn, int trapno, int ref);
60101 extern int sysctl_memory_failure_early_kill;
60102 extern int sysctl_memory_failure_recovery;
60103-extern atomic_long_t mce_bad_pages;
60104+extern atomic_long_unchecked_t mce_bad_pages;
60105+
60106+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
60107+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
60108+#else
60109+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
60110+#endif
60111
60112 #endif /* __KERNEL__ */
60113 #endif /* _LINUX_MM_H */
60114diff -urNp linux-2.6.32.43/include/linux/mm_types.h linux-2.6.32.43/include/linux/mm_types.h
60115--- linux-2.6.32.43/include/linux/mm_types.h 2011-03-27 14:31:47.000000000 -0400
60116+++ linux-2.6.32.43/include/linux/mm_types.h 2011-04-17 15:56:46.000000000 -0400
60117@@ -186,6 +186,8 @@ struct vm_area_struct {
60118 #ifdef CONFIG_NUMA
60119 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
60120 #endif
60121+
60122+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
60123 };
60124
60125 struct core_thread {
60126@@ -287,6 +289,24 @@ struct mm_struct {
60127 #ifdef CONFIG_MMU_NOTIFIER
60128 struct mmu_notifier_mm *mmu_notifier_mm;
60129 #endif
60130+
60131+#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60132+ unsigned long pax_flags;
60133+#endif
60134+
60135+#ifdef CONFIG_PAX_DLRESOLVE
60136+ unsigned long call_dl_resolve;
60137+#endif
60138+
60139+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
60140+ unsigned long call_syscall;
60141+#endif
60142+
60143+#ifdef CONFIG_PAX_ASLR
60144+ unsigned long delta_mmap; /* randomized offset */
60145+ unsigned long delta_stack; /* randomized offset */
60146+#endif
60147+
60148 };
60149
60150 /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */
60151diff -urNp linux-2.6.32.43/include/linux/mmu_notifier.h linux-2.6.32.43/include/linux/mmu_notifier.h
60152--- linux-2.6.32.43/include/linux/mmu_notifier.h 2011-03-27 14:31:47.000000000 -0400
60153+++ linux-2.6.32.43/include/linux/mmu_notifier.h 2011-04-17 15:56:46.000000000 -0400
60154@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr
60155 */
60156 #define ptep_clear_flush_notify(__vma, __address, __ptep) \
60157 ({ \
60158- pte_t __pte; \
60159+ pte_t ___pte; \
60160 struct vm_area_struct *___vma = __vma; \
60161 unsigned long ___address = __address; \
60162- __pte = ptep_clear_flush(___vma, ___address, __ptep); \
60163+ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \
60164 mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \
60165- __pte; \
60166+ ___pte; \
60167 })
60168
60169 #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \
60170diff -urNp linux-2.6.32.43/include/linux/mmzone.h linux-2.6.32.43/include/linux/mmzone.h
60171--- linux-2.6.32.43/include/linux/mmzone.h 2011-03-27 14:31:47.000000000 -0400
60172+++ linux-2.6.32.43/include/linux/mmzone.h 2011-04-17 15:56:46.000000000 -0400
60173@@ -350,7 +350,7 @@ struct zone {
60174 unsigned long flags; /* zone flags, see below */
60175
60176 /* Zone statistics */
60177- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60178+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
60179
60180 /*
60181 * prev_priority holds the scanning priority for this zone. It is
60182diff -urNp linux-2.6.32.43/include/linux/mod_devicetable.h linux-2.6.32.43/include/linux/mod_devicetable.h
60183--- linux-2.6.32.43/include/linux/mod_devicetable.h 2011-03-27 14:31:47.000000000 -0400
60184+++ linux-2.6.32.43/include/linux/mod_devicetable.h 2011-04-17 15:56:46.000000000 -0400
60185@@ -12,7 +12,7 @@
60186 typedef unsigned long kernel_ulong_t;
60187 #endif
60188
60189-#define PCI_ANY_ID (~0)
60190+#define PCI_ANY_ID ((__u16)~0)
60191
60192 struct pci_device_id {
60193 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
60194@@ -131,7 +131,7 @@ struct usb_device_id {
60195 #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100
60196 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
60197
60198-#define HID_ANY_ID (~0)
60199+#define HID_ANY_ID (~0U)
60200
60201 struct hid_device_id {
60202 __u16 bus;
60203diff -urNp linux-2.6.32.43/include/linux/module.h linux-2.6.32.43/include/linux/module.h
60204--- linux-2.6.32.43/include/linux/module.h 2011-03-27 14:31:47.000000000 -0400
60205+++ linux-2.6.32.43/include/linux/module.h 2011-08-05 20:33:55.000000000 -0400
60206@@ -16,6 +16,7 @@
60207 #include <linux/kobject.h>
60208 #include <linux/moduleparam.h>
60209 #include <linux/tracepoint.h>
60210+#include <linux/fs.h>
60211
60212 #include <asm/local.h>
60213 #include <asm/module.h>
60214@@ -287,16 +288,16 @@ struct module
60215 int (*init)(void);
60216
60217 /* If this is non-NULL, vfree after init() returns */
60218- void *module_init;
60219+ void *module_init_rx, *module_init_rw;
60220
60221 /* Here is the actual code + data, vfree'd on unload. */
60222- void *module_core;
60223+ void *module_core_rx, *module_core_rw;
60224
60225 /* Here are the sizes of the init and core sections */
60226- unsigned int init_size, core_size;
60227+ unsigned int init_size_rw, core_size_rw;
60228
60229 /* The size of the executable code in each section. */
60230- unsigned int init_text_size, core_text_size;
60231+ unsigned int init_size_rx, core_size_rx;
60232
60233 /* Arch-specific module values */
60234 struct mod_arch_specific arch;
60235@@ -345,6 +346,10 @@ struct module
60236 #ifdef CONFIG_EVENT_TRACING
60237 struct ftrace_event_call *trace_events;
60238 unsigned int num_trace_events;
60239+ struct file_operations trace_id;
60240+ struct file_operations trace_enable;
60241+ struct file_operations trace_format;
60242+ struct file_operations trace_filter;
60243 #endif
60244 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
60245 unsigned long *ftrace_callsites;
60246@@ -393,16 +398,46 @@ struct module *__module_address(unsigned
60247 bool is_module_address(unsigned long addr);
60248 bool is_module_text_address(unsigned long addr);
60249
60250+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
60251+{
60252+
60253+#ifdef CONFIG_PAX_KERNEXEC
60254+ if (ktla_ktva(addr) >= (unsigned long)start &&
60255+ ktla_ktva(addr) < (unsigned long)start + size)
60256+ return 1;
60257+#endif
60258+
60259+ return ((void *)addr >= start && (void *)addr < start + size);
60260+}
60261+
60262+static inline int within_module_core_rx(unsigned long addr, struct module *mod)
60263+{
60264+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
60265+}
60266+
60267+static inline int within_module_core_rw(unsigned long addr, struct module *mod)
60268+{
60269+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
60270+}
60271+
60272+static inline int within_module_init_rx(unsigned long addr, struct module *mod)
60273+{
60274+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
60275+}
60276+
60277+static inline int within_module_init_rw(unsigned long addr, struct module *mod)
60278+{
60279+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
60280+}
60281+
60282 static inline int within_module_core(unsigned long addr, struct module *mod)
60283 {
60284- return (unsigned long)mod->module_core <= addr &&
60285- addr < (unsigned long)mod->module_core + mod->core_size;
60286+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
60287 }
60288
60289 static inline int within_module_init(unsigned long addr, struct module *mod)
60290 {
60291- return (unsigned long)mod->module_init <= addr &&
60292- addr < (unsigned long)mod->module_init + mod->init_size;
60293+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
60294 }
60295
60296 /* Search for module by name: must hold module_mutex. */
60297diff -urNp linux-2.6.32.43/include/linux/moduleloader.h linux-2.6.32.43/include/linux/moduleloader.h
60298--- linux-2.6.32.43/include/linux/moduleloader.h 2011-03-27 14:31:47.000000000 -0400
60299+++ linux-2.6.32.43/include/linux/moduleloader.h 2011-04-17 15:56:46.000000000 -0400
60300@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st
60301 sections. Returns NULL on failure. */
60302 void *module_alloc(unsigned long size);
60303
60304+#ifdef CONFIG_PAX_KERNEXEC
60305+void *module_alloc_exec(unsigned long size);
60306+#else
60307+#define module_alloc_exec(x) module_alloc(x)
60308+#endif
60309+
60310 /* Free memory returned from module_alloc. */
60311 void module_free(struct module *mod, void *module_region);
60312
60313+#ifdef CONFIG_PAX_KERNEXEC
60314+void module_free_exec(struct module *mod, void *module_region);
60315+#else
60316+#define module_free_exec(x, y) module_free((x), (y))
60317+#endif
60318+
60319 /* Apply the given relocation to the (simplified) ELF. Return -error
60320 or 0. */
60321 int apply_relocate(Elf_Shdr *sechdrs,
60322diff -urNp linux-2.6.32.43/include/linux/moduleparam.h linux-2.6.32.43/include/linux/moduleparam.h
60323--- linux-2.6.32.43/include/linux/moduleparam.h 2011-03-27 14:31:47.000000000 -0400
60324+++ linux-2.6.32.43/include/linux/moduleparam.h 2011-04-17 15:56:46.000000000 -0400
60325@@ -132,7 +132,7 @@ struct kparam_array
60326
60327 /* Actually copy string: maxlen param is usually sizeof(string). */
60328 #define module_param_string(name, string, len, perm) \
60329- static const struct kparam_string __param_string_##name \
60330+ static const struct kparam_string __param_string_##name __used \
60331 = { len, string }; \
60332 __module_param_call(MODULE_PARAM_PREFIX, name, \
60333 param_set_copystring, param_get_string, \
60334@@ -211,7 +211,7 @@ extern int param_get_invbool(char *buffe
60335
60336 /* Comma-separated array: *nump is set to number they actually specified. */
60337 #define module_param_array_named(name, array, type, nump, perm) \
60338- static const struct kparam_array __param_arr_##name \
60339+ static const struct kparam_array __param_arr_##name __used \
60340 = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type,\
60341 sizeof(array[0]), array }; \
60342 __module_param_call(MODULE_PARAM_PREFIX, name, \
60343diff -urNp linux-2.6.32.43/include/linux/mutex.h linux-2.6.32.43/include/linux/mutex.h
60344--- linux-2.6.32.43/include/linux/mutex.h 2011-03-27 14:31:47.000000000 -0400
60345+++ linux-2.6.32.43/include/linux/mutex.h 2011-04-17 15:56:46.000000000 -0400
60346@@ -51,7 +51,7 @@ struct mutex {
60347 spinlock_t wait_lock;
60348 struct list_head wait_list;
60349 #if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP)
60350- struct thread_info *owner;
60351+ struct task_struct *owner;
60352 #endif
60353 #ifdef CONFIG_DEBUG_MUTEXES
60354 const char *name;
60355diff -urNp linux-2.6.32.43/include/linux/namei.h linux-2.6.32.43/include/linux/namei.h
60356--- linux-2.6.32.43/include/linux/namei.h 2011-03-27 14:31:47.000000000 -0400
60357+++ linux-2.6.32.43/include/linux/namei.h 2011-04-17 15:56:46.000000000 -0400
60358@@ -22,7 +22,7 @@ struct nameidata {
60359 unsigned int flags;
60360 int last_type;
60361 unsigned depth;
60362- char *saved_names[MAX_NESTED_LINKS + 1];
60363+ const char *saved_names[MAX_NESTED_LINKS + 1];
60364
60365 /* Intent data */
60366 union {
60367@@ -84,12 +84,12 @@ extern int follow_up(struct path *);
60368 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
60369 extern void unlock_rename(struct dentry *, struct dentry *);
60370
60371-static inline void nd_set_link(struct nameidata *nd, char *path)
60372+static inline void nd_set_link(struct nameidata *nd, const char *path)
60373 {
60374 nd->saved_names[nd->depth] = path;
60375 }
60376
60377-static inline char *nd_get_link(struct nameidata *nd)
60378+static inline const char *nd_get_link(const struct nameidata *nd)
60379 {
60380 return nd->saved_names[nd->depth];
60381 }
60382diff -urNp linux-2.6.32.43/include/linux/netfilter/xt_gradm.h linux-2.6.32.43/include/linux/netfilter/xt_gradm.h
60383--- linux-2.6.32.43/include/linux/netfilter/xt_gradm.h 1969-12-31 19:00:00.000000000 -0500
60384+++ linux-2.6.32.43/include/linux/netfilter/xt_gradm.h 2011-04-17 15:56:46.000000000 -0400
60385@@ -0,0 +1,9 @@
60386+#ifndef _LINUX_NETFILTER_XT_GRADM_H
60387+#define _LINUX_NETFILTER_XT_GRADM_H 1
60388+
60389+struct xt_gradm_mtinfo {
60390+ __u16 flags;
60391+ __u16 invflags;
60392+};
60393+
60394+#endif
60395diff -urNp linux-2.6.32.43/include/linux/nodemask.h linux-2.6.32.43/include/linux/nodemask.h
60396--- linux-2.6.32.43/include/linux/nodemask.h 2011-03-27 14:31:47.000000000 -0400
60397+++ linux-2.6.32.43/include/linux/nodemask.h 2011-04-17 15:56:46.000000000 -0400
60398@@ -464,11 +464,11 @@ static inline int num_node_state(enum no
60399
60400 #define any_online_node(mask) \
60401 ({ \
60402- int node; \
60403- for_each_node_mask(node, (mask)) \
60404- if (node_online(node)) \
60405+ int __node; \
60406+ for_each_node_mask(__node, (mask)) \
60407+ if (node_online(__node)) \
60408 break; \
60409- node; \
60410+ __node; \
60411 })
60412
60413 #define num_online_nodes() num_node_state(N_ONLINE)
60414diff -urNp linux-2.6.32.43/include/linux/oprofile.h linux-2.6.32.43/include/linux/oprofile.h
60415--- linux-2.6.32.43/include/linux/oprofile.h 2011-03-27 14:31:47.000000000 -0400
60416+++ linux-2.6.32.43/include/linux/oprofile.h 2011-04-17 15:56:46.000000000 -0400
60417@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super
60418 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
60419 char const * name, ulong * val);
60420
60421-/** Create a file for read-only access to an atomic_t. */
60422+/** Create a file for read-only access to an atomic_unchecked_t. */
60423 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
60424- char const * name, atomic_t * val);
60425+ char const * name, atomic_unchecked_t * val);
60426
60427 /** create a directory */
60428 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
60429diff -urNp linux-2.6.32.43/include/linux/perf_event.h linux-2.6.32.43/include/linux/perf_event.h
60430--- linux-2.6.32.43/include/linux/perf_event.h 2011-03-27 14:31:47.000000000 -0400
60431+++ linux-2.6.32.43/include/linux/perf_event.h 2011-05-04 17:56:28.000000000 -0400
60432@@ -476,7 +476,7 @@ struct hw_perf_event {
60433 struct hrtimer hrtimer;
60434 };
60435 };
60436- atomic64_t prev_count;
60437+ atomic64_unchecked_t prev_count;
60438 u64 sample_period;
60439 u64 last_period;
60440 atomic64_t period_left;
60441@@ -557,7 +557,7 @@ struct perf_event {
60442 const struct pmu *pmu;
60443
60444 enum perf_event_active_state state;
60445- atomic64_t count;
60446+ atomic64_unchecked_t count;
60447
60448 /*
60449 * These are the total time in nanoseconds that the event
60450@@ -595,8 +595,8 @@ struct perf_event {
60451 * These accumulate total time (in nanoseconds) that children
60452 * events have been enabled and running, respectively.
60453 */
60454- atomic64_t child_total_time_enabled;
60455- atomic64_t child_total_time_running;
60456+ atomic64_unchecked_t child_total_time_enabled;
60457+ atomic64_unchecked_t child_total_time_running;
60458
60459 /*
60460 * Protect attach/detach and child_list:
60461diff -urNp linux-2.6.32.43/include/linux/pipe_fs_i.h linux-2.6.32.43/include/linux/pipe_fs_i.h
60462--- linux-2.6.32.43/include/linux/pipe_fs_i.h 2011-03-27 14:31:47.000000000 -0400
60463+++ linux-2.6.32.43/include/linux/pipe_fs_i.h 2011-04-17 15:56:46.000000000 -0400
60464@@ -46,9 +46,9 @@ struct pipe_inode_info {
60465 wait_queue_head_t wait;
60466 unsigned int nrbufs, curbuf;
60467 struct page *tmp_page;
60468- unsigned int readers;
60469- unsigned int writers;
60470- unsigned int waiting_writers;
60471+ atomic_t readers;
60472+ atomic_t writers;
60473+ atomic_t waiting_writers;
60474 unsigned int r_counter;
60475 unsigned int w_counter;
60476 struct fasync_struct *fasync_readers;
60477diff -urNp linux-2.6.32.43/include/linux/poison.h linux-2.6.32.43/include/linux/poison.h
60478--- linux-2.6.32.43/include/linux/poison.h 2011-03-27 14:31:47.000000000 -0400
60479+++ linux-2.6.32.43/include/linux/poison.h 2011-04-17 15:56:46.000000000 -0400
60480@@ -19,8 +19,8 @@
60481 * under normal circumstances, used to verify that nobody uses
60482 * non-initialized list entries.
60483 */
60484-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
60485-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
60486+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
60487+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
60488
60489 /********** include/linux/timer.h **********/
60490 /*
60491diff -urNp linux-2.6.32.43/include/linux/posix-timers.h linux-2.6.32.43/include/linux/posix-timers.h
60492--- linux-2.6.32.43/include/linux/posix-timers.h 2011-03-27 14:31:47.000000000 -0400
60493+++ linux-2.6.32.43/include/linux/posix-timers.h 2011-08-05 20:33:55.000000000 -0400
60494@@ -67,7 +67,7 @@ struct k_itimer {
60495 };
60496
60497 struct k_clock {
60498- int res; /* in nanoseconds */
60499+ const int res; /* in nanoseconds */
60500 int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
60501 int (*clock_set) (const clockid_t which_clock, struct timespec * tp);
60502 int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
60503diff -urNp linux-2.6.32.43/include/linux/preempt.h linux-2.6.32.43/include/linux/preempt.h
60504--- linux-2.6.32.43/include/linux/preempt.h 2011-03-27 14:31:47.000000000 -0400
60505+++ linux-2.6.32.43/include/linux/preempt.h 2011-08-05 20:33:55.000000000 -0400
60506@@ -110,7 +110,7 @@ struct preempt_ops {
60507 void (*sched_in)(struct preempt_notifier *notifier, int cpu);
60508 void (*sched_out)(struct preempt_notifier *notifier,
60509 struct task_struct *next);
60510-};
60511+} __no_const;
60512
60513 /**
60514 * preempt_notifier - key for installing preemption notifiers
60515diff -urNp linux-2.6.32.43/include/linux/proc_fs.h linux-2.6.32.43/include/linux/proc_fs.h
60516--- linux-2.6.32.43/include/linux/proc_fs.h 2011-03-27 14:31:47.000000000 -0400
60517+++ linux-2.6.32.43/include/linux/proc_fs.h 2011-08-05 20:33:55.000000000 -0400
60518@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro
60519 return proc_create_data(name, mode, parent, proc_fops, NULL);
60520 }
60521
60522+static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode,
60523+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
60524+{
60525+#ifdef CONFIG_GRKERNSEC_PROC_USER
60526+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
60527+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
60528+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
60529+#else
60530+ return proc_create_data(name, mode, parent, proc_fops, NULL);
60531+#endif
60532+}
60533+
60534+
60535 static inline struct proc_dir_entry *create_proc_read_entry(const char *name,
60536 mode_t mode, struct proc_dir_entry *base,
60537 read_proc_t *read_proc, void * data)
60538@@ -256,7 +269,7 @@ union proc_op {
60539 int (*proc_show)(struct seq_file *m,
60540 struct pid_namespace *ns, struct pid *pid,
60541 struct task_struct *task);
60542-};
60543+} __no_const;
60544
60545 struct ctl_table_header;
60546 struct ctl_table;
60547diff -urNp linux-2.6.32.43/include/linux/ptrace.h linux-2.6.32.43/include/linux/ptrace.h
60548--- linux-2.6.32.43/include/linux/ptrace.h 2011-03-27 14:31:47.000000000 -0400
60549+++ linux-2.6.32.43/include/linux/ptrace.h 2011-04-17 15:56:46.000000000 -0400
60550@@ -96,10 +96,10 @@ extern void __ptrace_unlink(struct task_
60551 extern void exit_ptrace(struct task_struct *tracer);
60552 #define PTRACE_MODE_READ 1
60553 #define PTRACE_MODE_ATTACH 2
60554-/* Returns 0 on success, -errno on denial. */
60555-extern int __ptrace_may_access(struct task_struct *task, unsigned int mode);
60556 /* Returns true on success, false on denial. */
60557 extern bool ptrace_may_access(struct task_struct *task, unsigned int mode);
60558+/* Returns true on success, false on denial. */
60559+extern bool ptrace_may_access_log(struct task_struct *task, unsigned int mode);
60560
60561 static inline int ptrace_reparented(struct task_struct *child)
60562 {
60563diff -urNp linux-2.6.32.43/include/linux/random.h linux-2.6.32.43/include/linux/random.h
60564--- linux-2.6.32.43/include/linux/random.h 2011-03-27 14:31:47.000000000 -0400
60565+++ linux-2.6.32.43/include/linux/random.h 2011-08-07 19:48:09.000000000 -0400
60566@@ -53,17 +53,6 @@ extern void add_interrupt_randomness(int
60567 extern void get_random_bytes(void *buf, int nbytes);
60568 void generate_random_uuid(unsigned char uuid_out[16]);
60569
60570-extern __u32 secure_ip_id(__be32 daddr);
60571-extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
60572-extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
60573- __be16 dport);
60574-extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
60575- __be16 sport, __be16 dport);
60576-extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
60577- __be16 sport, __be16 dport);
60578-extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
60579- __be16 sport, __be16 dport);
60580-
60581 #ifndef MODULE
60582 extern const struct file_operations random_fops, urandom_fops;
60583 #endif
60584@@ -74,6 +63,11 @@ unsigned long randomize_range(unsigned l
60585 u32 random32(void);
60586 void srandom32(u32 seed);
60587
60588+static inline unsigned long pax_get_random_long(void)
60589+{
60590+ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0);
60591+}
60592+
60593 #endif /* __KERNEL___ */
60594
60595 #endif /* _LINUX_RANDOM_H */
60596diff -urNp linux-2.6.32.43/include/linux/reboot.h linux-2.6.32.43/include/linux/reboot.h
60597--- linux-2.6.32.43/include/linux/reboot.h 2011-03-27 14:31:47.000000000 -0400
60598+++ linux-2.6.32.43/include/linux/reboot.h 2011-05-22 23:02:06.000000000 -0400
60599@@ -47,9 +47,9 @@ extern int unregister_reboot_notifier(st
60600 * Architecture-specific implementations of sys_reboot commands.
60601 */
60602
60603-extern void machine_restart(char *cmd);
60604-extern void machine_halt(void);
60605-extern void machine_power_off(void);
60606+extern void machine_restart(char *cmd) __noreturn;
60607+extern void machine_halt(void) __noreturn;
60608+extern void machine_power_off(void) __noreturn;
60609
60610 extern void machine_shutdown(void);
60611 struct pt_regs;
60612@@ -60,9 +60,9 @@ extern void machine_crash_shutdown(struc
60613 */
60614
60615 extern void kernel_restart_prepare(char *cmd);
60616-extern void kernel_restart(char *cmd);
60617-extern void kernel_halt(void);
60618-extern void kernel_power_off(void);
60619+extern void kernel_restart(char *cmd) __noreturn;
60620+extern void kernel_halt(void) __noreturn;
60621+extern void kernel_power_off(void) __noreturn;
60622
60623 void ctrl_alt_del(void);
60624
60625@@ -75,7 +75,7 @@ extern int orderly_poweroff(bool force);
60626 * Emergency restart, callable from an interrupt handler.
60627 */
60628
60629-extern void emergency_restart(void);
60630+extern void emergency_restart(void) __noreturn;
60631 #include <asm/emergency-restart.h>
60632
60633 #endif
60634diff -urNp linux-2.6.32.43/include/linux/reiserfs_fs.h linux-2.6.32.43/include/linux/reiserfs_fs.h
60635--- linux-2.6.32.43/include/linux/reiserfs_fs.h 2011-03-27 14:31:47.000000000 -0400
60636+++ linux-2.6.32.43/include/linux/reiserfs_fs.h 2011-04-17 15:56:46.000000000 -0400
60637@@ -1326,7 +1326,7 @@ static inline loff_t max_reiserfs_offset
60638 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
60639
60640 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
60641-#define get_generation(s) atomic_read (&fs_generation(s))
60642+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
60643 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
60644 #define __fs_changed(gen,s) (gen != get_generation (s))
60645 #define fs_changed(gen,s) ({cond_resched(); __fs_changed(gen, s);})
60646@@ -1534,24 +1534,24 @@ static inline struct super_block *sb_fro
60647 */
60648
60649 struct item_operations {
60650- int (*bytes_number) (struct item_head * ih, int block_size);
60651- void (*decrement_key) (struct cpu_key *);
60652- int (*is_left_mergeable) (struct reiserfs_key * ih,
60653+ int (* const bytes_number) (struct item_head * ih, int block_size);
60654+ void (* const decrement_key) (struct cpu_key *);
60655+ int (* const is_left_mergeable) (struct reiserfs_key * ih,
60656 unsigned long bsize);
60657- void (*print_item) (struct item_head *, char *item);
60658- void (*check_item) (struct item_head *, char *item);
60659+ void (* const print_item) (struct item_head *, char *item);
60660+ void (* const check_item) (struct item_head *, char *item);
60661
60662- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60663+ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi,
60664 int is_affected, int insert_size);
60665- int (*check_left) (struct virtual_item * vi, int free,
60666+ int (* const check_left) (struct virtual_item * vi, int free,
60667 int start_skip, int end_skip);
60668- int (*check_right) (struct virtual_item * vi, int free);
60669- int (*part_size) (struct virtual_item * vi, int from, int to);
60670- int (*unit_num) (struct virtual_item * vi);
60671- void (*print_vi) (struct virtual_item * vi);
60672+ int (* const check_right) (struct virtual_item * vi, int free);
60673+ int (* const part_size) (struct virtual_item * vi, int from, int to);
60674+ int (* const unit_num) (struct virtual_item * vi);
60675+ void (* const print_vi) (struct virtual_item * vi);
60676 };
60677
60678-extern struct item_operations *item_ops[TYPE_ANY + 1];
60679+extern const struct item_operations * const item_ops[TYPE_ANY + 1];
60680
60681 #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize)
60682 #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize)
60683diff -urNp linux-2.6.32.43/include/linux/reiserfs_fs_sb.h linux-2.6.32.43/include/linux/reiserfs_fs_sb.h
60684--- linux-2.6.32.43/include/linux/reiserfs_fs_sb.h 2011-03-27 14:31:47.000000000 -0400
60685+++ linux-2.6.32.43/include/linux/reiserfs_fs_sb.h 2011-04-17 15:56:46.000000000 -0400
60686@@ -377,7 +377,7 @@ struct reiserfs_sb_info {
60687 /* Comment? -Hans */
60688 wait_queue_head_t s_wait;
60689 /* To be obsoleted soon by per buffer seals.. -Hans */
60690- atomic_t s_generation_counter; // increased by one every time the
60691+ atomic_unchecked_t s_generation_counter; // increased by one every time the
60692 // tree gets re-balanced
60693 unsigned long s_properties; /* File system properties. Currently holds
60694 on-disk FS format */
60695diff -urNp linux-2.6.32.43/include/linux/relay.h linux-2.6.32.43/include/linux/relay.h
60696--- linux-2.6.32.43/include/linux/relay.h 2011-03-27 14:31:47.000000000 -0400
60697+++ linux-2.6.32.43/include/linux/relay.h 2011-08-05 20:33:55.000000000 -0400
60698@@ -159,7 +159,7 @@ struct rchan_callbacks
60699 * The callback should return 0 if successful, negative if not.
60700 */
60701 int (*remove_buf_file)(struct dentry *dentry);
60702-};
60703+} __no_const;
60704
60705 /*
60706 * CONFIG_RELAY kernel API, kernel/relay.c
60707diff -urNp linux-2.6.32.43/include/linux/sched.h linux-2.6.32.43/include/linux/sched.h
60708--- linux-2.6.32.43/include/linux/sched.h 2011-03-27 14:31:47.000000000 -0400
60709+++ linux-2.6.32.43/include/linux/sched.h 2011-08-05 20:33:55.000000000 -0400
60710@@ -101,6 +101,7 @@ struct bio;
60711 struct fs_struct;
60712 struct bts_context;
60713 struct perf_event_context;
60714+struct linux_binprm;
60715
60716 /*
60717 * List of flags we want to share for kernel threads,
60718@@ -350,7 +351,7 @@ extern signed long schedule_timeout_kill
60719 extern signed long schedule_timeout_uninterruptible(signed long timeout);
60720 asmlinkage void __schedule(void);
60721 asmlinkage void schedule(void);
60722-extern int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner);
60723+extern int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner);
60724
60725 struct nsproxy;
60726 struct user_namespace;
60727@@ -371,9 +372,12 @@ struct user_namespace;
60728 #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
60729
60730 extern int sysctl_max_map_count;
60731+extern unsigned long sysctl_heap_stack_gap;
60732
60733 #include <linux/aio.h>
60734
60735+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len);
60736+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len);
60737 extern unsigned long
60738 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
60739 unsigned long, unsigned long);
60740@@ -666,6 +670,16 @@ struct signal_struct {
60741 struct tty_audit_buf *tty_audit_buf;
60742 #endif
60743
60744+#ifdef CONFIG_GRKERNSEC
60745+ u32 curr_ip;
60746+ u32 saved_ip;
60747+ u32 gr_saddr;
60748+ u32 gr_daddr;
60749+ u16 gr_sport;
60750+ u16 gr_dport;
60751+ u8 used_accept:1;
60752+#endif
60753+
60754 int oom_adj; /* OOM kill score adjustment (bit shift) */
60755 };
60756
60757@@ -723,6 +737,11 @@ struct user_struct {
60758 struct key *session_keyring; /* UID's default session keyring */
60759 #endif
60760
60761+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT) || defined(CONFIG_GRKERNSEC_BRUTE)
60762+ unsigned int banned;
60763+ unsigned long ban_expires;
60764+#endif
60765+
60766 /* Hash table maintenance information */
60767 struct hlist_node uidhash_node;
60768 uid_t uid;
60769@@ -1328,8 +1347,8 @@ struct task_struct {
60770 struct list_head thread_group;
60771
60772 struct completion *vfork_done; /* for vfork() */
60773- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
60774- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60775+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
60776+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
60777
60778 cputime_t utime, stime, utimescaled, stimescaled;
60779 cputime_t gtime;
60780@@ -1343,16 +1362,6 @@ struct task_struct {
60781 struct task_cputime cputime_expires;
60782 struct list_head cpu_timers[3];
60783
60784-/* process credentials */
60785- const struct cred *real_cred; /* objective and real subjective task
60786- * credentials (COW) */
60787- const struct cred *cred; /* effective (overridable) subjective task
60788- * credentials (COW) */
60789- struct mutex cred_guard_mutex; /* guard against foreign influences on
60790- * credential calculations
60791- * (notably. ptrace) */
60792- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60793-
60794 char comm[TASK_COMM_LEN]; /* executable name excluding path
60795 - access with [gs]et_task_comm (which lock
60796 it with task_lock())
60797@@ -1369,6 +1378,10 @@ struct task_struct {
60798 #endif
60799 /* CPU-specific state of this task */
60800 struct thread_struct thread;
60801+/* thread_info moved to task_struct */
60802+#ifdef CONFIG_X86
60803+ struct thread_info tinfo;
60804+#endif
60805 /* filesystem information */
60806 struct fs_struct *fs;
60807 /* open file information */
60808@@ -1436,6 +1449,15 @@ struct task_struct {
60809 int hardirq_context;
60810 int softirq_context;
60811 #endif
60812+
60813+/* process credentials */
60814+ const struct cred *real_cred; /* objective and real subjective task
60815+ * credentials (COW) */
60816+ struct mutex cred_guard_mutex; /* guard against foreign influences on
60817+ * credential calculations
60818+ * (notably. ptrace) */
60819+ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
60820+
60821 #ifdef CONFIG_LOCKDEP
60822 # define MAX_LOCK_DEPTH 48UL
60823 u64 curr_chain_key;
60824@@ -1456,6 +1478,9 @@ struct task_struct {
60825
60826 struct backing_dev_info *backing_dev_info;
60827
60828+ const struct cred *cred; /* effective (overridable) subjective task
60829+ * credentials (COW) */
60830+
60831 struct io_context *io_context;
60832
60833 unsigned long ptrace_message;
60834@@ -1519,6 +1544,21 @@ struct task_struct {
60835 unsigned long default_timer_slack_ns;
60836
60837 struct list_head *scm_work_list;
60838+
60839+#ifdef CONFIG_GRKERNSEC
60840+ /* grsecurity */
60841+ struct dentry *gr_chroot_dentry;
60842+ struct acl_subject_label *acl;
60843+ struct acl_role_label *role;
60844+ struct file *exec_file;
60845+ u16 acl_role_id;
60846+ /* is this the task that authenticated to the special role */
60847+ u8 acl_sp_role;
60848+ u8 is_writable;
60849+ u8 brute;
60850+ u8 gr_is_chrooted;
60851+#endif
60852+
60853 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
60854 /* Index of current stored adress in ret_stack */
60855 int curr_ret_stack;
60856@@ -1542,6 +1582,57 @@ struct task_struct {
60857 #endif /* CONFIG_TRACING */
60858 };
60859
60860+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
60861+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
60862+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
60863+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
60864+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
60865+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
60866+
60867+#ifdef CONFIG_PAX_SOFTMODE
60868+extern int pax_softmode;
60869+#endif
60870+
60871+extern int pax_check_flags(unsigned long *);
60872+
60873+/* if tsk != current then task_lock must be held on it */
60874+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
60875+static inline unsigned long pax_get_flags(struct task_struct *tsk)
60876+{
60877+ if (likely(tsk->mm))
60878+ return tsk->mm->pax_flags;
60879+ else
60880+ return 0UL;
60881+}
60882+
60883+/* if tsk != current then task_lock must be held on it */
60884+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
60885+{
60886+ if (likely(tsk->mm)) {
60887+ tsk->mm->pax_flags = flags;
60888+ return 0;
60889+ }
60890+ return -EINVAL;
60891+}
60892+#endif
60893+
60894+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
60895+extern void pax_set_initial_flags(struct linux_binprm *bprm);
60896+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
60897+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
60898+#endif
60899+
60900+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
60901+extern void pax_report_insns(void *pc, void *sp);
60902+extern void pax_report_refcount_overflow(struct pt_regs *regs);
60903+extern NORET_TYPE void pax_report_usercopy(const void *ptr, unsigned long len, bool to, const char *type) ATTRIB_NORET;
60904+
60905+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
60906+extern void pax_track_stack(void);
60907+#else
60908+static inline void pax_track_stack(void) {}
60909+#endif
60910+
60911 /* Future-safe accessor for struct task_struct's cpus_allowed. */
60912 #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed)
60913
60914@@ -1978,7 +2069,9 @@ void yield(void);
60915 extern struct exec_domain default_exec_domain;
60916
60917 union thread_union {
60918+#ifndef CONFIG_X86
60919 struct thread_info thread_info;
60920+#endif
60921 unsigned long stack[THREAD_SIZE/sizeof(long)];
60922 };
60923
60924@@ -2011,6 +2104,7 @@ extern struct pid_namespace init_pid_ns;
60925 */
60926
60927 extern struct task_struct *find_task_by_vpid(pid_t nr);
60928+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
60929 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
60930 struct pid_namespace *ns);
60931
60932@@ -2155,7 +2249,7 @@ extern void __cleanup_sighand(struct sig
60933 extern void exit_itimers(struct signal_struct *);
60934 extern void flush_itimer_signals(void);
60935
60936-extern NORET_TYPE void do_group_exit(int);
60937+extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET;
60938
60939 extern void daemonize(const char *, ...);
60940 extern int allow_signal(int);
60941@@ -2284,13 +2378,17 @@ static inline unsigned long *end_of_stac
60942
60943 #endif
60944
60945-static inline int object_is_on_stack(void *obj)
60946+static inline int object_starts_on_stack(void *obj)
60947 {
60948- void *stack = task_stack_page(current);
60949+ const void *stack = task_stack_page(current);
60950
60951 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
60952 }
60953
60954+#ifdef CONFIG_PAX_USERCOPY
60955+extern int object_is_on_stack(const void *obj, unsigned long len);
60956+#endif
60957+
60958 extern void thread_info_cache_init(void);
60959
60960 #ifdef CONFIG_DEBUG_STACK_USAGE
60961diff -urNp linux-2.6.32.43/include/linux/screen_info.h linux-2.6.32.43/include/linux/screen_info.h
60962--- linux-2.6.32.43/include/linux/screen_info.h 2011-03-27 14:31:47.000000000 -0400
60963+++ linux-2.6.32.43/include/linux/screen_info.h 2011-04-17 15:56:46.000000000 -0400
60964@@ -42,7 +42,8 @@ struct screen_info {
60965 __u16 pages; /* 0x32 */
60966 __u16 vesa_attributes; /* 0x34 */
60967 __u32 capabilities; /* 0x36 */
60968- __u8 _reserved[6]; /* 0x3a */
60969+ __u16 vesapm_size; /* 0x3a */
60970+ __u8 _reserved[4]; /* 0x3c */
60971 } __attribute__((packed));
60972
60973 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
60974diff -urNp linux-2.6.32.43/include/linux/security.h linux-2.6.32.43/include/linux/security.h
60975--- linux-2.6.32.43/include/linux/security.h 2011-03-27 14:31:47.000000000 -0400
60976+++ linux-2.6.32.43/include/linux/security.h 2011-04-17 15:56:46.000000000 -0400
60977@@ -34,6 +34,7 @@
60978 #include <linux/key.h>
60979 #include <linux/xfrm.h>
60980 #include <linux/gfp.h>
60981+#include <linux/grsecurity.h>
60982 #include <net/flow.h>
60983
60984 /* Maximum number of letters for an LSM name string */
60985diff -urNp linux-2.6.32.43/include/linux/shm.h linux-2.6.32.43/include/linux/shm.h
60986--- linux-2.6.32.43/include/linux/shm.h 2011-03-27 14:31:47.000000000 -0400
60987+++ linux-2.6.32.43/include/linux/shm.h 2011-04-17 15:56:46.000000000 -0400
60988@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke
60989 pid_t shm_cprid;
60990 pid_t shm_lprid;
60991 struct user_struct *mlock_user;
60992+#ifdef CONFIG_GRKERNSEC
60993+ time_t shm_createtime;
60994+ pid_t shm_lapid;
60995+#endif
60996 };
60997
60998 /* shm_mode upper byte flags */
60999diff -urNp linux-2.6.32.43/include/linux/skbuff.h linux-2.6.32.43/include/linux/skbuff.h
61000--- linux-2.6.32.43/include/linux/skbuff.h 2011-03-27 14:31:47.000000000 -0400
61001+++ linux-2.6.32.43/include/linux/skbuff.h 2011-07-06 19:53:33.000000000 -0400
61002@@ -544,7 +544,7 @@ static inline union skb_shared_tx *skb_t
61003 */
61004 static inline int skb_queue_empty(const struct sk_buff_head *list)
61005 {
61006- return list->next == (struct sk_buff *)list;
61007+ return list->next == (const struct sk_buff *)list;
61008 }
61009
61010 /**
61011@@ -557,7 +557,7 @@ static inline int skb_queue_empty(const
61012 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
61013 const struct sk_buff *skb)
61014 {
61015- return (skb->next == (struct sk_buff *) list);
61016+ return (skb->next == (const struct sk_buff *) list);
61017 }
61018
61019 /**
61020@@ -570,7 +570,7 @@ static inline bool skb_queue_is_last(con
61021 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
61022 const struct sk_buff *skb)
61023 {
61024- return (skb->prev == (struct sk_buff *) list);
61025+ return (skb->prev == (const struct sk_buff *) list);
61026 }
61027
61028 /**
61029@@ -1367,7 +1367,7 @@ static inline int skb_network_offset(con
61030 * headroom, you should not reduce this.
61031 */
61032 #ifndef NET_SKB_PAD
61033-#define NET_SKB_PAD 32
61034+#define NET_SKB_PAD (_AC(32,UL))
61035 #endif
61036
61037 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
61038diff -urNp linux-2.6.32.43/include/linux/slab_def.h linux-2.6.32.43/include/linux/slab_def.h
61039--- linux-2.6.32.43/include/linux/slab_def.h 2011-03-27 14:31:47.000000000 -0400
61040+++ linux-2.6.32.43/include/linux/slab_def.h 2011-05-04 17:56:28.000000000 -0400
61041@@ -69,10 +69,10 @@ struct kmem_cache {
61042 unsigned long node_allocs;
61043 unsigned long node_frees;
61044 unsigned long node_overflow;
61045- atomic_t allochit;
61046- atomic_t allocmiss;
61047- atomic_t freehit;
61048- atomic_t freemiss;
61049+ atomic_unchecked_t allochit;
61050+ atomic_unchecked_t allocmiss;
61051+ atomic_unchecked_t freehit;
61052+ atomic_unchecked_t freemiss;
61053
61054 /*
61055 * If debugging is enabled, then the allocator can add additional
61056diff -urNp linux-2.6.32.43/include/linux/slab.h linux-2.6.32.43/include/linux/slab.h
61057--- linux-2.6.32.43/include/linux/slab.h 2011-03-27 14:31:47.000000000 -0400
61058+++ linux-2.6.32.43/include/linux/slab.h 2011-04-17 15:56:46.000000000 -0400
61059@@ -11,12 +11,20 @@
61060
61061 #include <linux/gfp.h>
61062 #include <linux/types.h>
61063+#include <linux/err.h>
61064
61065 /*
61066 * Flags to pass to kmem_cache_create().
61067 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
61068 */
61069 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
61070+
61071+#ifdef CONFIG_PAX_USERCOPY
61072+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
61073+#else
61074+#define SLAB_USERCOPY 0x00000000UL
61075+#endif
61076+
61077 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
61078 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
61079 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
61080@@ -82,10 +90,13 @@
61081 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
61082 * Both make kfree a no-op.
61083 */
61084-#define ZERO_SIZE_PTR ((void *)16)
61085+#define ZERO_SIZE_PTR \
61086+({ \
61087+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
61088+ (void *)(-MAX_ERRNO-1L); \
61089+})
61090
61091-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
61092- (unsigned long)ZERO_SIZE_PTR)
61093+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
61094
61095 /*
61096 * struct kmem_cache related prototypes
61097@@ -138,6 +149,7 @@ void * __must_check krealloc(const void
61098 void kfree(const void *);
61099 void kzfree(const void *);
61100 size_t ksize(const void *);
61101+void check_object_size(const void *ptr, unsigned long n, bool to);
61102
61103 /*
61104 * Allocator specific definitions. These are mainly used to establish optimized
61105@@ -328,4 +340,37 @@ static inline void *kzalloc_node(size_t
61106
61107 void __init kmem_cache_init_late(void);
61108
61109+#define kmalloc(x, y) \
61110+({ \
61111+ void *___retval; \
61112+ intoverflow_t ___x = (intoverflow_t)x; \
61113+ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\
61114+ ___retval = NULL; \
61115+ else \
61116+ ___retval = kmalloc((size_t)___x, (y)); \
61117+ ___retval; \
61118+})
61119+
61120+#define kmalloc_node(x, y, z) \
61121+({ \
61122+ void *___retval; \
61123+ intoverflow_t ___x = (intoverflow_t)x; \
61124+ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\
61125+ ___retval = NULL; \
61126+ else \
61127+ ___retval = kmalloc_node((size_t)___x, (y), (z));\
61128+ ___retval; \
61129+})
61130+
61131+#define kzalloc(x, y) \
61132+({ \
61133+ void *___retval; \
61134+ intoverflow_t ___x = (intoverflow_t)x; \
61135+ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\
61136+ ___retval = NULL; \
61137+ else \
61138+ ___retval = kzalloc((size_t)___x, (y)); \
61139+ ___retval; \
61140+})
61141+
61142 #endif /* _LINUX_SLAB_H */
61143diff -urNp linux-2.6.32.43/include/linux/slub_def.h linux-2.6.32.43/include/linux/slub_def.h
61144--- linux-2.6.32.43/include/linux/slub_def.h 2011-03-27 14:31:47.000000000 -0400
61145+++ linux-2.6.32.43/include/linux/slub_def.h 2011-08-05 20:33:55.000000000 -0400
61146@@ -86,7 +86,7 @@ struct kmem_cache {
61147 struct kmem_cache_order_objects max;
61148 struct kmem_cache_order_objects min;
61149 gfp_t allocflags; /* gfp flags to use on each alloc */
61150- int refcount; /* Refcount for slab cache destroy */
61151+ atomic_t refcount; /* Refcount for slab cache destroy */
61152 void (*ctor)(void *);
61153 int inuse; /* Offset to metadata */
61154 int align; /* Alignment */
61155@@ -215,7 +215,7 @@ static __always_inline struct kmem_cache
61156 #endif
61157
61158 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
61159-void *__kmalloc(size_t size, gfp_t flags);
61160+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1);
61161
61162 #ifdef CONFIG_KMEMTRACE
61163 extern void *kmem_cache_alloc_notrace(struct kmem_cache *s, gfp_t gfpflags);
61164diff -urNp linux-2.6.32.43/include/linux/sonet.h linux-2.6.32.43/include/linux/sonet.h
61165--- linux-2.6.32.43/include/linux/sonet.h 2011-03-27 14:31:47.000000000 -0400
61166+++ linux-2.6.32.43/include/linux/sonet.h 2011-04-17 15:56:46.000000000 -0400
61167@@ -61,7 +61,7 @@ struct sonet_stats {
61168 #include <asm/atomic.h>
61169
61170 struct k_sonet_stats {
61171-#define __HANDLE_ITEM(i) atomic_t i
61172+#define __HANDLE_ITEM(i) atomic_unchecked_t i
61173 __SONET_ITEMS
61174 #undef __HANDLE_ITEM
61175 };
61176diff -urNp linux-2.6.32.43/include/linux/sunrpc/cache.h linux-2.6.32.43/include/linux/sunrpc/cache.h
61177--- linux-2.6.32.43/include/linux/sunrpc/cache.h 2011-03-27 14:31:47.000000000 -0400
61178+++ linux-2.6.32.43/include/linux/sunrpc/cache.h 2011-08-05 20:33:55.000000000 -0400
61179@@ -125,7 +125,7 @@ struct cache_detail {
61180 */
61181 struct cache_req {
61182 struct cache_deferred_req *(*defer)(struct cache_req *req);
61183-};
61184+} __no_const;
61185 /* this must be embedded in a deferred_request that is being
61186 * delayed awaiting cache-fill
61187 */
61188diff -urNp linux-2.6.32.43/include/linux/sunrpc/clnt.h linux-2.6.32.43/include/linux/sunrpc/clnt.h
61189--- linux-2.6.32.43/include/linux/sunrpc/clnt.h 2011-03-27 14:31:47.000000000 -0400
61190+++ linux-2.6.32.43/include/linux/sunrpc/clnt.h 2011-04-17 15:56:46.000000000 -0400
61191@@ -167,9 +167,9 @@ static inline unsigned short rpc_get_por
61192 {
61193 switch (sap->sa_family) {
61194 case AF_INET:
61195- return ntohs(((struct sockaddr_in *)sap)->sin_port);
61196+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
61197 case AF_INET6:
61198- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
61199+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
61200 }
61201 return 0;
61202 }
61203@@ -202,7 +202,7 @@ static inline bool __rpc_cmp_addr4(const
61204 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
61205 const struct sockaddr *src)
61206 {
61207- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
61208+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
61209 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
61210
61211 dsin->sin_family = ssin->sin_family;
61212@@ -299,7 +299,7 @@ static inline u32 rpc_get_scope_id(const
61213 if (sa->sa_family != AF_INET6)
61214 return 0;
61215
61216- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
61217+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
61218 }
61219
61220 #endif /* __KERNEL__ */
61221diff -urNp linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h
61222--- linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h 2011-03-27 14:31:47.000000000 -0400
61223+++ linux-2.6.32.43/include/linux/sunrpc/svc_rdma.h 2011-05-04 17:56:28.000000000 -0400
61224@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
61225 extern unsigned int svcrdma_max_requests;
61226 extern unsigned int svcrdma_max_req_size;
61227
61228-extern atomic_t rdma_stat_recv;
61229-extern atomic_t rdma_stat_read;
61230-extern atomic_t rdma_stat_write;
61231-extern atomic_t rdma_stat_sq_starve;
61232-extern atomic_t rdma_stat_rq_starve;
61233-extern atomic_t rdma_stat_rq_poll;
61234-extern atomic_t rdma_stat_rq_prod;
61235-extern atomic_t rdma_stat_sq_poll;
61236-extern atomic_t rdma_stat_sq_prod;
61237+extern atomic_unchecked_t rdma_stat_recv;
61238+extern atomic_unchecked_t rdma_stat_read;
61239+extern atomic_unchecked_t rdma_stat_write;
61240+extern atomic_unchecked_t rdma_stat_sq_starve;
61241+extern atomic_unchecked_t rdma_stat_rq_starve;
61242+extern atomic_unchecked_t rdma_stat_rq_poll;
61243+extern atomic_unchecked_t rdma_stat_rq_prod;
61244+extern atomic_unchecked_t rdma_stat_sq_poll;
61245+extern atomic_unchecked_t rdma_stat_sq_prod;
61246
61247 #define RPCRDMA_VERSION 1
61248
61249diff -urNp linux-2.6.32.43/include/linux/suspend.h linux-2.6.32.43/include/linux/suspend.h
61250--- linux-2.6.32.43/include/linux/suspend.h 2011-03-27 14:31:47.000000000 -0400
61251+++ linux-2.6.32.43/include/linux/suspend.h 2011-04-17 15:56:46.000000000 -0400
61252@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t;
61253 * which require special recovery actions in that situation.
61254 */
61255 struct platform_suspend_ops {
61256- int (*valid)(suspend_state_t state);
61257- int (*begin)(suspend_state_t state);
61258- int (*prepare)(void);
61259- int (*prepare_late)(void);
61260- int (*enter)(suspend_state_t state);
61261- void (*wake)(void);
61262- void (*finish)(void);
61263- void (*end)(void);
61264- void (*recover)(void);
61265+ int (* const valid)(suspend_state_t state);
61266+ int (* const begin)(suspend_state_t state);
61267+ int (* const prepare)(void);
61268+ int (* const prepare_late)(void);
61269+ int (* const enter)(suspend_state_t state);
61270+ void (* const wake)(void);
61271+ void (* const finish)(void);
61272+ void (* const end)(void);
61273+ void (* const recover)(void);
61274 };
61275
61276 #ifdef CONFIG_SUSPEND
61277@@ -120,7 +120,7 @@ struct platform_suspend_ops {
61278 * suspend_set_ops - set platform dependent suspend operations
61279 * @ops: The new suspend operations to set.
61280 */
61281-extern void suspend_set_ops(struct platform_suspend_ops *ops);
61282+extern void suspend_set_ops(const struct platform_suspend_ops *ops);
61283 extern int suspend_valid_only_mem(suspend_state_t state);
61284
61285 /**
61286@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st
61287 #else /* !CONFIG_SUSPEND */
61288 #define suspend_valid_only_mem NULL
61289
61290-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {}
61291+static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {}
61292 static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; }
61293 #endif /* !CONFIG_SUSPEND */
61294
61295@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone
61296 * platforms which require special recovery actions in that situation.
61297 */
61298 struct platform_hibernation_ops {
61299- int (*begin)(void);
61300- void (*end)(void);
61301- int (*pre_snapshot)(void);
61302- void (*finish)(void);
61303- int (*prepare)(void);
61304- int (*enter)(void);
61305- void (*leave)(void);
61306- int (*pre_restore)(void);
61307- void (*restore_cleanup)(void);
61308- void (*recover)(void);
61309+ int (* const begin)(void);
61310+ void (* const end)(void);
61311+ int (* const pre_snapshot)(void);
61312+ void (* const finish)(void);
61313+ int (* const prepare)(void);
61314+ int (* const enter)(void);
61315+ void (* const leave)(void);
61316+ int (* const pre_restore)(void);
61317+ void (* const restore_cleanup)(void);
61318+ void (* const recover)(void);
61319 };
61320
61321 #ifdef CONFIG_HIBERNATION
61322@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct
61323 extern void swsusp_unset_page_free(struct page *);
61324 extern unsigned long get_safe_page(gfp_t gfp_mask);
61325
61326-extern void hibernation_set_ops(struct platform_hibernation_ops *ops);
61327+extern void hibernation_set_ops(const struct platform_hibernation_ops *ops);
61328 extern int hibernate(void);
61329 extern bool system_entering_hibernation(void);
61330 #else /* CONFIG_HIBERNATION */
61331@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd
61332 static inline void swsusp_set_page_free(struct page *p) {}
61333 static inline void swsusp_unset_page_free(struct page *p) {}
61334
61335-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {}
61336+static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {}
61337 static inline int hibernate(void) { return -ENOSYS; }
61338 static inline bool system_entering_hibernation(void) { return false; }
61339 #endif /* CONFIG_HIBERNATION */
61340diff -urNp linux-2.6.32.43/include/linux/sysctl.h linux-2.6.32.43/include/linux/sysctl.h
61341--- linux-2.6.32.43/include/linux/sysctl.h 2011-03-27 14:31:47.000000000 -0400
61342+++ linux-2.6.32.43/include/linux/sysctl.h 2011-04-17 15:56:46.000000000 -0400
61343@@ -164,7 +164,11 @@ enum
61344 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
61345 };
61346
61347-
61348+#ifdef CONFIG_PAX_SOFTMODE
61349+enum {
61350+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
61351+};
61352+#endif
61353
61354 /* CTL_VM names: */
61355 enum
61356@@ -982,6 +986,8 @@ typedef int proc_handler (struct ctl_tab
61357
61358 extern int proc_dostring(struct ctl_table *, int,
61359 void __user *, size_t *, loff_t *);
61360+extern int proc_dostring_modpriv(struct ctl_table *, int,
61361+ void __user *, size_t *, loff_t *);
61362 extern int proc_dointvec(struct ctl_table *, int,
61363 void __user *, size_t *, loff_t *);
61364 extern int proc_dointvec_minmax(struct ctl_table *, int,
61365@@ -1003,6 +1009,7 @@ extern int do_sysctl (int __user *name,
61366
61367 extern ctl_handler sysctl_data;
61368 extern ctl_handler sysctl_string;
61369+extern ctl_handler sysctl_string_modpriv;
61370 extern ctl_handler sysctl_intvec;
61371 extern ctl_handler sysctl_jiffies;
61372 extern ctl_handler sysctl_ms_jiffies;
61373diff -urNp linux-2.6.32.43/include/linux/sysfs.h linux-2.6.32.43/include/linux/sysfs.h
61374--- linux-2.6.32.43/include/linux/sysfs.h 2011-03-27 14:31:47.000000000 -0400
61375+++ linux-2.6.32.43/include/linux/sysfs.h 2011-04-17 15:56:46.000000000 -0400
61376@@ -75,8 +75,8 @@ struct bin_attribute {
61377 };
61378
61379 struct sysfs_ops {
61380- ssize_t (*show)(struct kobject *, struct attribute *,char *);
61381- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t);
61382+ ssize_t (* const show)(struct kobject *, struct attribute *,char *);
61383+ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t);
61384 };
61385
61386 struct sysfs_dirent;
61387diff -urNp linux-2.6.32.43/include/linux/thread_info.h linux-2.6.32.43/include/linux/thread_info.h
61388--- linux-2.6.32.43/include/linux/thread_info.h 2011-03-27 14:31:47.000000000 -0400
61389+++ linux-2.6.32.43/include/linux/thread_info.h 2011-04-17 15:56:46.000000000 -0400
61390@@ -23,7 +23,7 @@ struct restart_block {
61391 };
61392 /* For futex_wait and futex_wait_requeue_pi */
61393 struct {
61394- u32 *uaddr;
61395+ u32 __user *uaddr;
61396 u32 val;
61397 u32 flags;
61398 u32 bitset;
61399diff -urNp linux-2.6.32.43/include/linux/tty.h linux-2.6.32.43/include/linux/tty.h
61400--- linux-2.6.32.43/include/linux/tty.h 2011-03-27 14:31:47.000000000 -0400
61401+++ linux-2.6.32.43/include/linux/tty.h 2011-08-05 20:33:55.000000000 -0400
61402@@ -493,7 +493,6 @@ extern void tty_ldisc_begin(void);
61403 /* This last one is just for the tty layer internals and shouldn't be used elsewhere */
61404 extern void tty_ldisc_enable(struct tty_struct *tty);
61405
61406-
61407 /* n_tty.c */
61408 extern struct tty_ldisc_ops tty_ldisc_N_TTY;
61409
61410diff -urNp linux-2.6.32.43/include/linux/tty_ldisc.h linux-2.6.32.43/include/linux/tty_ldisc.h
61411--- linux-2.6.32.43/include/linux/tty_ldisc.h 2011-03-27 14:31:47.000000000 -0400
61412+++ linux-2.6.32.43/include/linux/tty_ldisc.h 2011-04-17 15:56:46.000000000 -0400
61413@@ -139,7 +139,7 @@ struct tty_ldisc_ops {
61414
61415 struct module *owner;
61416
61417- int refcount;
61418+ atomic_t refcount;
61419 };
61420
61421 struct tty_ldisc {
61422diff -urNp linux-2.6.32.43/include/linux/types.h linux-2.6.32.43/include/linux/types.h
61423--- linux-2.6.32.43/include/linux/types.h 2011-03-27 14:31:47.000000000 -0400
61424+++ linux-2.6.32.43/include/linux/types.h 2011-04-17 15:56:46.000000000 -0400
61425@@ -191,10 +191,26 @@ typedef struct {
61426 volatile int counter;
61427 } atomic_t;
61428
61429+#ifdef CONFIG_PAX_REFCOUNT
61430+typedef struct {
61431+ volatile int counter;
61432+} atomic_unchecked_t;
61433+#else
61434+typedef atomic_t atomic_unchecked_t;
61435+#endif
61436+
61437 #ifdef CONFIG_64BIT
61438 typedef struct {
61439 volatile long counter;
61440 } atomic64_t;
61441+
61442+#ifdef CONFIG_PAX_REFCOUNT
61443+typedef struct {
61444+ volatile long counter;
61445+} atomic64_unchecked_t;
61446+#else
61447+typedef atomic64_t atomic64_unchecked_t;
61448+#endif
61449 #endif
61450
61451 struct ustat {
61452diff -urNp linux-2.6.32.43/include/linux/uaccess.h linux-2.6.32.43/include/linux/uaccess.h
61453--- linux-2.6.32.43/include/linux/uaccess.h 2011-03-27 14:31:47.000000000 -0400
61454+++ linux-2.6.32.43/include/linux/uaccess.h 2011-04-17 15:56:46.000000000 -0400
61455@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_
61456 long ret; \
61457 mm_segment_t old_fs = get_fs(); \
61458 \
61459- set_fs(KERNEL_DS); \
61460 pagefault_disable(); \
61461+ set_fs(KERNEL_DS); \
61462 ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
61463- pagefault_enable(); \
61464 set_fs(old_fs); \
61465+ pagefault_enable(); \
61466 ret; \
61467 })
61468
61469@@ -93,7 +93,7 @@ static inline unsigned long __copy_from_
61470 * Safely read from address @src to the buffer at @dst. If a kernel fault
61471 * happens, handle that and return -EFAULT.
61472 */
61473-extern long probe_kernel_read(void *dst, void *src, size_t size);
61474+extern long probe_kernel_read(void *dst, const void *src, size_t size);
61475
61476 /*
61477 * probe_kernel_write(): safely attempt to write to a location
61478@@ -104,6 +104,6 @@ extern long probe_kernel_read(void *dst,
61479 * Safely write to address @dst from the buffer at @src. If a kernel fault
61480 * happens, handle that and return -EFAULT.
61481 */
61482-extern long probe_kernel_write(void *dst, void *src, size_t size);
61483+extern long probe_kernel_write(void *dst, const void *src, size_t size);
61484
61485 #endif /* __LINUX_UACCESS_H__ */
61486diff -urNp linux-2.6.32.43/include/linux/unaligned/access_ok.h linux-2.6.32.43/include/linux/unaligned/access_ok.h
61487--- linux-2.6.32.43/include/linux/unaligned/access_ok.h 2011-03-27 14:31:47.000000000 -0400
61488+++ linux-2.6.32.43/include/linux/unaligned/access_ok.h 2011-04-17 15:56:46.000000000 -0400
61489@@ -6,32 +6,32 @@
61490
61491 static inline u16 get_unaligned_le16(const void *p)
61492 {
61493- return le16_to_cpup((__le16 *)p);
61494+ return le16_to_cpup((const __le16 *)p);
61495 }
61496
61497 static inline u32 get_unaligned_le32(const void *p)
61498 {
61499- return le32_to_cpup((__le32 *)p);
61500+ return le32_to_cpup((const __le32 *)p);
61501 }
61502
61503 static inline u64 get_unaligned_le64(const void *p)
61504 {
61505- return le64_to_cpup((__le64 *)p);
61506+ return le64_to_cpup((const __le64 *)p);
61507 }
61508
61509 static inline u16 get_unaligned_be16(const void *p)
61510 {
61511- return be16_to_cpup((__be16 *)p);
61512+ return be16_to_cpup((const __be16 *)p);
61513 }
61514
61515 static inline u32 get_unaligned_be32(const void *p)
61516 {
61517- return be32_to_cpup((__be32 *)p);
61518+ return be32_to_cpup((const __be32 *)p);
61519 }
61520
61521 static inline u64 get_unaligned_be64(const void *p)
61522 {
61523- return be64_to_cpup((__be64 *)p);
61524+ return be64_to_cpup((const __be64 *)p);
61525 }
61526
61527 static inline void put_unaligned_le16(u16 val, void *p)
61528diff -urNp linux-2.6.32.43/include/linux/vmalloc.h linux-2.6.32.43/include/linux/vmalloc.h
61529--- linux-2.6.32.43/include/linux/vmalloc.h 2011-03-27 14:31:47.000000000 -0400
61530+++ linux-2.6.32.43/include/linux/vmalloc.h 2011-04-17 15:56:46.000000000 -0400
61531@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining
61532 #define VM_MAP 0x00000004 /* vmap()ed pages */
61533 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
61534 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
61535+
61536+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
61537+#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */
61538+#endif
61539+
61540 /* bits [20..32] reserved for arch specific ioremap internals */
61541
61542 /*
61543@@ -123,4 +128,81 @@ struct vm_struct **pcpu_get_vm_areas(con
61544
61545 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
61546
61547+#define vmalloc(x) \
61548+({ \
61549+ void *___retval; \
61550+ intoverflow_t ___x = (intoverflow_t)x; \
61551+ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \
61552+ ___retval = NULL; \
61553+ else \
61554+ ___retval = vmalloc((unsigned long)___x); \
61555+ ___retval; \
61556+})
61557+
61558+#define __vmalloc(x, y, z) \
61559+({ \
61560+ void *___retval; \
61561+ intoverflow_t ___x = (intoverflow_t)x; \
61562+ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\
61563+ ___retval = NULL; \
61564+ else \
61565+ ___retval = __vmalloc((unsigned long)___x, (y), (z));\
61566+ ___retval; \
61567+})
61568+
61569+#define vmalloc_user(x) \
61570+({ \
61571+ void *___retval; \
61572+ intoverflow_t ___x = (intoverflow_t)x; \
61573+ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\
61574+ ___retval = NULL; \
61575+ else \
61576+ ___retval = vmalloc_user((unsigned long)___x); \
61577+ ___retval; \
61578+})
61579+
61580+#define vmalloc_exec(x) \
61581+({ \
61582+ void *___retval; \
61583+ intoverflow_t ___x = (intoverflow_t)x; \
61584+ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\
61585+ ___retval = NULL; \
61586+ else \
61587+ ___retval = vmalloc_exec((unsigned long)___x); \
61588+ ___retval; \
61589+})
61590+
61591+#define vmalloc_node(x, y) \
61592+({ \
61593+ void *___retval; \
61594+ intoverflow_t ___x = (intoverflow_t)x; \
61595+ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\
61596+ ___retval = NULL; \
61597+ else \
61598+ ___retval = vmalloc_node((unsigned long)___x, (y));\
61599+ ___retval; \
61600+})
61601+
61602+#define vmalloc_32(x) \
61603+({ \
61604+ void *___retval; \
61605+ intoverflow_t ___x = (intoverflow_t)x; \
61606+ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\
61607+ ___retval = NULL; \
61608+ else \
61609+ ___retval = vmalloc_32((unsigned long)___x); \
61610+ ___retval; \
61611+})
61612+
61613+#define vmalloc_32_user(x) \
61614+({ \
61615+ void *___retval; \
61616+ intoverflow_t ___x = (intoverflow_t)x; \
61617+ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\
61618+ ___retval = NULL; \
61619+ else \
61620+ ___retval = vmalloc_32_user((unsigned long)___x);\
61621+ ___retval; \
61622+})
61623+
61624 #endif /* _LINUX_VMALLOC_H */
61625diff -urNp linux-2.6.32.43/include/linux/vmstat.h linux-2.6.32.43/include/linux/vmstat.h
61626--- linux-2.6.32.43/include/linux/vmstat.h 2011-03-27 14:31:47.000000000 -0400
61627+++ linux-2.6.32.43/include/linux/vmstat.h 2011-04-17 15:56:46.000000000 -0400
61628@@ -136,18 +136,18 @@ static inline void vm_events_fold_cpu(in
61629 /*
61630 * Zone based page accounting with per cpu differentials.
61631 */
61632-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61633+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
61634
61635 static inline void zone_page_state_add(long x, struct zone *zone,
61636 enum zone_stat_item item)
61637 {
61638- atomic_long_add(x, &zone->vm_stat[item]);
61639- atomic_long_add(x, &vm_stat[item]);
61640+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
61641+ atomic_long_add_unchecked(x, &vm_stat[item]);
61642 }
61643
61644 static inline unsigned long global_page_state(enum zone_stat_item item)
61645 {
61646- long x = atomic_long_read(&vm_stat[item]);
61647+ long x = atomic_long_read_unchecked(&vm_stat[item]);
61648 #ifdef CONFIG_SMP
61649 if (x < 0)
61650 x = 0;
61651@@ -158,7 +158,7 @@ static inline unsigned long global_page_
61652 static inline unsigned long zone_page_state(struct zone *zone,
61653 enum zone_stat_item item)
61654 {
61655- long x = atomic_long_read(&zone->vm_stat[item]);
61656+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61657 #ifdef CONFIG_SMP
61658 if (x < 0)
61659 x = 0;
61660@@ -175,7 +175,7 @@ static inline unsigned long zone_page_st
61661 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
61662 enum zone_stat_item item)
61663 {
61664- long x = atomic_long_read(&zone->vm_stat[item]);
61665+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
61666
61667 #ifdef CONFIG_SMP
61668 int cpu;
61669@@ -264,8 +264,8 @@ static inline void __mod_zone_page_state
61670
61671 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
61672 {
61673- atomic_long_inc(&zone->vm_stat[item]);
61674- atomic_long_inc(&vm_stat[item]);
61675+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
61676+ atomic_long_inc_unchecked(&vm_stat[item]);
61677 }
61678
61679 static inline void __inc_zone_page_state(struct page *page,
61680@@ -276,8 +276,8 @@ static inline void __inc_zone_page_state
61681
61682 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
61683 {
61684- atomic_long_dec(&zone->vm_stat[item]);
61685- atomic_long_dec(&vm_stat[item]);
61686+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
61687+ atomic_long_dec_unchecked(&vm_stat[item]);
61688 }
61689
61690 static inline void __dec_zone_page_state(struct page *page,
61691diff -urNp linux-2.6.32.43/include/media/v4l2-dev.h linux-2.6.32.43/include/media/v4l2-dev.h
61692--- linux-2.6.32.43/include/media/v4l2-dev.h 2011-03-27 14:31:47.000000000 -0400
61693+++ linux-2.6.32.43/include/media/v4l2-dev.h 2011-08-05 20:33:55.000000000 -0400
61694@@ -34,7 +34,7 @@ struct v4l2_device;
61695 #define V4L2_FL_UNREGISTERED (0)
61696
61697 struct v4l2_file_operations {
61698- struct module *owner;
61699+ struct module * const owner;
61700 ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
61701 ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
61702 unsigned int (*poll) (struct file *, struct poll_table_struct *);
61703diff -urNp linux-2.6.32.43/include/media/v4l2-device.h linux-2.6.32.43/include/media/v4l2-device.h
61704--- linux-2.6.32.43/include/media/v4l2-device.h 2011-03-27 14:31:47.000000000 -0400
61705+++ linux-2.6.32.43/include/media/v4l2-device.h 2011-05-04 17:56:28.000000000 -0400
61706@@ -71,7 +71,7 @@ int __must_check v4l2_device_register(st
61707 this function returns 0. If the name ends with a digit (e.g. cx18),
61708 then the name will be set to cx18-0 since cx180 looks really odd. */
61709 int v4l2_device_set_name(struct v4l2_device *v4l2_dev, const char *basename,
61710- atomic_t *instance);
61711+ atomic_unchecked_t *instance);
61712
61713 /* Set v4l2_dev->dev to NULL. Call when the USB parent disconnects.
61714 Since the parent disappears this ensures that v4l2_dev doesn't have an
61715diff -urNp linux-2.6.32.43/include/net/flow.h linux-2.6.32.43/include/net/flow.h
61716--- linux-2.6.32.43/include/net/flow.h 2011-03-27 14:31:47.000000000 -0400
61717+++ linux-2.6.32.43/include/net/flow.h 2011-05-04 17:56:28.000000000 -0400
61718@@ -92,7 +92,7 @@ typedef int (*flow_resolve_t)(struct net
61719 extern void *flow_cache_lookup(struct net *net, struct flowi *key, u16 family,
61720 u8 dir, flow_resolve_t resolver);
61721 extern void flow_cache_flush(void);
61722-extern atomic_t flow_cache_genid;
61723+extern atomic_unchecked_t flow_cache_genid;
61724
61725 static inline int flow_cache_uli_match(struct flowi *fl1, struct flowi *fl2)
61726 {
61727diff -urNp linux-2.6.32.43/include/net/inetpeer.h linux-2.6.32.43/include/net/inetpeer.h
61728--- linux-2.6.32.43/include/net/inetpeer.h 2011-03-27 14:31:47.000000000 -0400
61729+++ linux-2.6.32.43/include/net/inetpeer.h 2011-04-17 15:56:46.000000000 -0400
61730@@ -24,7 +24,7 @@ struct inet_peer
61731 __u32 dtime; /* the time of last use of not
61732 * referenced entries */
61733 atomic_t refcnt;
61734- atomic_t rid; /* Frag reception counter */
61735+ atomic_unchecked_t rid; /* Frag reception counter */
61736 __u32 tcp_ts;
61737 unsigned long tcp_ts_stamp;
61738 };
61739diff -urNp linux-2.6.32.43/include/net/ip_vs.h linux-2.6.32.43/include/net/ip_vs.h
61740--- linux-2.6.32.43/include/net/ip_vs.h 2011-03-27 14:31:47.000000000 -0400
61741+++ linux-2.6.32.43/include/net/ip_vs.h 2011-05-04 17:56:28.000000000 -0400
61742@@ -365,7 +365,7 @@ struct ip_vs_conn {
61743 struct ip_vs_conn *control; /* Master control connection */
61744 atomic_t n_control; /* Number of controlled ones */
61745 struct ip_vs_dest *dest; /* real server */
61746- atomic_t in_pkts; /* incoming packet counter */
61747+ atomic_unchecked_t in_pkts; /* incoming packet counter */
61748
61749 /* packet transmitter for different forwarding methods. If it
61750 mangles the packet, it must return NF_DROP or better NF_STOLEN,
61751@@ -466,7 +466,7 @@ struct ip_vs_dest {
61752 union nf_inet_addr addr; /* IP address of the server */
61753 __be16 port; /* port number of the server */
61754 volatile unsigned flags; /* dest status flags */
61755- atomic_t conn_flags; /* flags to copy to conn */
61756+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
61757 atomic_t weight; /* server weight */
61758
61759 atomic_t refcnt; /* reference counter */
61760diff -urNp linux-2.6.32.43/include/net/irda/ircomm_core.h linux-2.6.32.43/include/net/irda/ircomm_core.h
61761--- linux-2.6.32.43/include/net/irda/ircomm_core.h 2011-03-27 14:31:47.000000000 -0400
61762+++ linux-2.6.32.43/include/net/irda/ircomm_core.h 2011-08-05 20:33:55.000000000 -0400
61763@@ -51,7 +51,7 @@ typedef struct {
61764 int (*connect_response)(struct ircomm_cb *, struct sk_buff *);
61765 int (*disconnect_request)(struct ircomm_cb *, struct sk_buff *,
61766 struct ircomm_info *);
61767-} call_t;
61768+} __no_const call_t;
61769
61770 struct ircomm_cb {
61771 irda_queue_t queue;
61772diff -urNp linux-2.6.32.43/include/net/irda/ircomm_tty.h linux-2.6.32.43/include/net/irda/ircomm_tty.h
61773--- linux-2.6.32.43/include/net/irda/ircomm_tty.h 2011-03-27 14:31:47.000000000 -0400
61774+++ linux-2.6.32.43/include/net/irda/ircomm_tty.h 2011-04-17 15:56:46.000000000 -0400
61775@@ -35,6 +35,7 @@
61776 #include <linux/termios.h>
61777 #include <linux/timer.h>
61778 #include <linux/tty.h> /* struct tty_struct */
61779+#include <asm/local.h>
61780
61781 #include <net/irda/irias_object.h>
61782 #include <net/irda/ircomm_core.h>
61783@@ -105,8 +106,8 @@ struct ircomm_tty_cb {
61784 unsigned short close_delay;
61785 unsigned short closing_wait; /* time to wait before closing */
61786
61787- int open_count;
61788- int blocked_open; /* # of blocked opens */
61789+ local_t open_count;
61790+ local_t blocked_open; /* # of blocked opens */
61791
61792 /* Protect concurent access to :
61793 * o self->open_count
61794diff -urNp linux-2.6.32.43/include/net/iucv/af_iucv.h linux-2.6.32.43/include/net/iucv/af_iucv.h
61795--- linux-2.6.32.43/include/net/iucv/af_iucv.h 2011-03-27 14:31:47.000000000 -0400
61796+++ linux-2.6.32.43/include/net/iucv/af_iucv.h 2011-05-04 17:56:28.000000000 -0400
61797@@ -87,7 +87,7 @@ struct iucv_sock {
61798 struct iucv_sock_list {
61799 struct hlist_head head;
61800 rwlock_t lock;
61801- atomic_t autobind_name;
61802+ atomic_unchecked_t autobind_name;
61803 };
61804
61805 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
61806diff -urNp linux-2.6.32.43/include/net/lapb.h linux-2.6.32.43/include/net/lapb.h
61807--- linux-2.6.32.43/include/net/lapb.h 2011-03-27 14:31:47.000000000 -0400
61808+++ linux-2.6.32.43/include/net/lapb.h 2011-08-05 20:33:55.000000000 -0400
61809@@ -95,7 +95,7 @@ struct lapb_cb {
61810 struct sk_buff_head write_queue;
61811 struct sk_buff_head ack_queue;
61812 unsigned char window;
61813- struct lapb_register_struct callbacks;
61814+ struct lapb_register_struct *callbacks;
61815
61816 /* FRMR control information */
61817 struct lapb_frame frmr_data;
61818diff -urNp linux-2.6.32.43/include/net/neighbour.h linux-2.6.32.43/include/net/neighbour.h
61819--- linux-2.6.32.43/include/net/neighbour.h 2011-03-27 14:31:47.000000000 -0400
61820+++ linux-2.6.32.43/include/net/neighbour.h 2011-04-17 15:56:46.000000000 -0400
61821@@ -125,12 +125,12 @@ struct neighbour
61822 struct neigh_ops
61823 {
61824 int family;
61825- void (*solicit)(struct neighbour *, struct sk_buff*);
61826- void (*error_report)(struct neighbour *, struct sk_buff*);
61827- int (*output)(struct sk_buff*);
61828- int (*connected_output)(struct sk_buff*);
61829- int (*hh_output)(struct sk_buff*);
61830- int (*queue_xmit)(struct sk_buff*);
61831+ void (* const solicit)(struct neighbour *, struct sk_buff*);
61832+ void (* const error_report)(struct neighbour *, struct sk_buff*);
61833+ int (* const output)(struct sk_buff*);
61834+ int (* const connected_output)(struct sk_buff*);
61835+ int (* const hh_output)(struct sk_buff*);
61836+ int (* const queue_xmit)(struct sk_buff*);
61837 };
61838
61839 struct pneigh_entry
61840diff -urNp linux-2.6.32.43/include/net/netlink.h linux-2.6.32.43/include/net/netlink.h
61841--- linux-2.6.32.43/include/net/netlink.h 2011-07-13 17:23:04.000000000 -0400
61842+++ linux-2.6.32.43/include/net/netlink.h 2011-07-13 17:23:19.000000000 -0400
61843@@ -558,7 +558,7 @@ static inline void *nlmsg_get_pos(struct
61844 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
61845 {
61846 if (mark)
61847- skb_trim(skb, (unsigned char *) mark - skb->data);
61848+ skb_trim(skb, (const unsigned char *) mark - skb->data);
61849 }
61850
61851 /**
61852diff -urNp linux-2.6.32.43/include/net/netns/ipv4.h linux-2.6.32.43/include/net/netns/ipv4.h
61853--- linux-2.6.32.43/include/net/netns/ipv4.h 2011-03-27 14:31:47.000000000 -0400
61854+++ linux-2.6.32.43/include/net/netns/ipv4.h 2011-05-04 17:56:28.000000000 -0400
61855@@ -54,7 +54,7 @@ struct netns_ipv4 {
61856 int current_rt_cache_rebuild_count;
61857
61858 struct timer_list rt_secret_timer;
61859- atomic_t rt_genid;
61860+ atomic_unchecked_t rt_genid;
61861
61862 #ifdef CONFIG_IP_MROUTE
61863 struct sock *mroute_sk;
61864diff -urNp linux-2.6.32.43/include/net/sctp/sctp.h linux-2.6.32.43/include/net/sctp/sctp.h
61865--- linux-2.6.32.43/include/net/sctp/sctp.h 2011-03-27 14:31:47.000000000 -0400
61866+++ linux-2.6.32.43/include/net/sctp/sctp.h 2011-04-17 15:56:46.000000000 -0400
61867@@ -305,8 +305,8 @@ extern int sctp_debug_flag;
61868
61869 #else /* SCTP_DEBUG */
61870
61871-#define SCTP_DEBUG_PRINTK(whatever...)
61872-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
61873+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
61874+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
61875 #define SCTP_ENABLE_DEBUG
61876 #define SCTP_DISABLE_DEBUG
61877 #define SCTP_ASSERT(expr, str, func)
61878diff -urNp linux-2.6.32.43/include/net/secure_seq.h linux-2.6.32.43/include/net/secure_seq.h
61879--- linux-2.6.32.43/include/net/secure_seq.h 1969-12-31 19:00:00.000000000 -0500
61880+++ linux-2.6.32.43/include/net/secure_seq.h 2011-08-07 19:48:09.000000000 -0400
61881@@ -0,0 +1,20 @@
61882+#ifndef _NET_SECURE_SEQ
61883+#define _NET_SECURE_SEQ
61884+
61885+#include <linux/types.h>
61886+
61887+extern __u32 secure_ip_id(__be32 daddr);
61888+extern __u32 secure_ipv6_id(const __be32 daddr[4]);
61889+extern u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
61890+extern u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
61891+ __be16 dport);
61892+extern __u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
61893+ __be16 sport, __be16 dport);
61894+extern __u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61895+ __be16 sport, __be16 dport);
61896+extern u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
61897+ __be16 sport, __be16 dport);
61898+extern u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
61899+ __be16 sport, __be16 dport);
61900+
61901+#endif /* _NET_SECURE_SEQ */
61902diff -urNp linux-2.6.32.43/include/net/sock.h linux-2.6.32.43/include/net/sock.h
61903--- linux-2.6.32.43/include/net/sock.h 2011-03-27 14:31:47.000000000 -0400
61904+++ linux-2.6.32.43/include/net/sock.h 2011-05-04 17:56:28.000000000 -0400
61905@@ -272,7 +272,7 @@ struct sock {
61906 rwlock_t sk_callback_lock;
61907 int sk_err,
61908 sk_err_soft;
61909- atomic_t sk_drops;
61910+ atomic_unchecked_t sk_drops;
61911 unsigned short sk_ack_backlog;
61912 unsigned short sk_max_ack_backlog;
61913 __u32 sk_priority;
61914diff -urNp linux-2.6.32.43/include/net/tcp.h linux-2.6.32.43/include/net/tcp.h
61915--- linux-2.6.32.43/include/net/tcp.h 2011-03-27 14:31:47.000000000 -0400
61916+++ linux-2.6.32.43/include/net/tcp.h 2011-04-17 15:56:46.000000000 -0400
61917@@ -1444,6 +1444,7 @@ enum tcp_seq_states {
61918 struct tcp_seq_afinfo {
61919 char *name;
61920 sa_family_t family;
61921+ /* cannot be const */
61922 struct file_operations seq_fops;
61923 struct seq_operations seq_ops;
61924 };
61925diff -urNp linux-2.6.32.43/include/net/udp.h linux-2.6.32.43/include/net/udp.h
61926--- linux-2.6.32.43/include/net/udp.h 2011-03-27 14:31:47.000000000 -0400
61927+++ linux-2.6.32.43/include/net/udp.h 2011-04-17 15:56:46.000000000 -0400
61928@@ -187,6 +187,7 @@ struct udp_seq_afinfo {
61929 char *name;
61930 sa_family_t family;
61931 struct udp_table *udp_table;
61932+ /* cannot be const */
61933 struct file_operations seq_fops;
61934 struct seq_operations seq_ops;
61935 };
61936diff -urNp linux-2.6.32.43/include/rdma/iw_cm.h linux-2.6.32.43/include/rdma/iw_cm.h
61937--- linux-2.6.32.43/include/rdma/iw_cm.h 2011-03-27 14:31:47.000000000 -0400
61938+++ linux-2.6.32.43/include/rdma/iw_cm.h 2011-08-05 20:33:55.000000000 -0400
61939@@ -129,7 +129,7 @@ struct iw_cm_verbs {
61940 int backlog);
61941
61942 int (*destroy_listen)(struct iw_cm_id *cm_id);
61943-};
61944+} __no_const;
61945
61946 /**
61947 * iw_create_cm_id - Create an IW CM identifier.
61948diff -urNp linux-2.6.32.43/include/scsi/scsi_device.h linux-2.6.32.43/include/scsi/scsi_device.h
61949--- linux-2.6.32.43/include/scsi/scsi_device.h 2011-04-17 17:00:52.000000000 -0400
61950+++ linux-2.6.32.43/include/scsi/scsi_device.h 2011-05-04 17:56:28.000000000 -0400
61951@@ -156,9 +156,9 @@ struct scsi_device {
61952 unsigned int max_device_blocked; /* what device_blocked counts down from */
61953 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
61954
61955- atomic_t iorequest_cnt;
61956- atomic_t iodone_cnt;
61957- atomic_t ioerr_cnt;
61958+ atomic_unchecked_t iorequest_cnt;
61959+ atomic_unchecked_t iodone_cnt;
61960+ atomic_unchecked_t ioerr_cnt;
61961
61962 struct device sdev_gendev,
61963 sdev_dev;
61964diff -urNp linux-2.6.32.43/include/scsi/scsi_transport_fc.h linux-2.6.32.43/include/scsi/scsi_transport_fc.h
61965--- linux-2.6.32.43/include/scsi/scsi_transport_fc.h 2011-03-27 14:31:47.000000000 -0400
61966+++ linux-2.6.32.43/include/scsi/scsi_transport_fc.h 2011-08-05 20:33:55.000000000 -0400
61967@@ -663,9 +663,9 @@ struct fc_function_template {
61968 int (*bsg_timeout)(struct fc_bsg_job *);
61969
61970 /* allocation lengths for host-specific data */
61971- u32 dd_fcrport_size;
61972- u32 dd_fcvport_size;
61973- u32 dd_bsg_size;
61974+ const u32 dd_fcrport_size;
61975+ const u32 dd_fcvport_size;
61976+ const u32 dd_bsg_size;
61977
61978 /*
61979 * The driver sets these to tell the transport class it
61980@@ -675,39 +675,39 @@ struct fc_function_template {
61981 */
61982
61983 /* remote port fixed attributes */
61984- unsigned long show_rport_maxframe_size:1;
61985- unsigned long show_rport_supported_classes:1;
61986- unsigned long show_rport_dev_loss_tmo:1;
61987+ const unsigned long show_rport_maxframe_size:1;
61988+ const unsigned long show_rport_supported_classes:1;
61989+ const unsigned long show_rport_dev_loss_tmo:1;
61990
61991 /*
61992 * target dynamic attributes
61993 * These should all be "1" if the driver uses the remote port
61994 * add/delete functions (so attributes reflect rport values).
61995 */
61996- unsigned long show_starget_node_name:1;
61997- unsigned long show_starget_port_name:1;
61998- unsigned long show_starget_port_id:1;
61999+ const unsigned long show_starget_node_name:1;
62000+ const unsigned long show_starget_port_name:1;
62001+ const unsigned long show_starget_port_id:1;
62002
62003 /* host fixed attributes */
62004- unsigned long show_host_node_name:1;
62005- unsigned long show_host_port_name:1;
62006- unsigned long show_host_permanent_port_name:1;
62007- unsigned long show_host_supported_classes:1;
62008- unsigned long show_host_supported_fc4s:1;
62009- unsigned long show_host_supported_speeds:1;
62010- unsigned long show_host_maxframe_size:1;
62011- unsigned long show_host_serial_number:1;
62012+ const unsigned long show_host_node_name:1;
62013+ const unsigned long show_host_port_name:1;
62014+ const unsigned long show_host_permanent_port_name:1;
62015+ const unsigned long show_host_supported_classes:1;
62016+ const unsigned long show_host_supported_fc4s:1;
62017+ const unsigned long show_host_supported_speeds:1;
62018+ const unsigned long show_host_maxframe_size:1;
62019+ const unsigned long show_host_serial_number:1;
62020 /* host dynamic attributes */
62021- unsigned long show_host_port_id:1;
62022- unsigned long show_host_port_type:1;
62023- unsigned long show_host_port_state:1;
62024- unsigned long show_host_active_fc4s:1;
62025- unsigned long show_host_speed:1;
62026- unsigned long show_host_fabric_name:1;
62027- unsigned long show_host_symbolic_name:1;
62028- unsigned long show_host_system_hostname:1;
62029+ const unsigned long show_host_port_id:1;
62030+ const unsigned long show_host_port_type:1;
62031+ const unsigned long show_host_port_state:1;
62032+ const unsigned long show_host_active_fc4s:1;
62033+ const unsigned long show_host_speed:1;
62034+ const unsigned long show_host_fabric_name:1;
62035+ const unsigned long show_host_symbolic_name:1;
62036+ const unsigned long show_host_system_hostname:1;
62037
62038- unsigned long disable_target_scan:1;
62039+ const unsigned long disable_target_scan:1;
62040 };
62041
62042
62043diff -urNp linux-2.6.32.43/include/sound/ac97_codec.h linux-2.6.32.43/include/sound/ac97_codec.h
62044--- linux-2.6.32.43/include/sound/ac97_codec.h 2011-03-27 14:31:47.000000000 -0400
62045+++ linux-2.6.32.43/include/sound/ac97_codec.h 2011-04-17 15:56:46.000000000 -0400
62046@@ -419,15 +419,15 @@
62047 struct snd_ac97;
62048
62049 struct snd_ac97_build_ops {
62050- int (*build_3d) (struct snd_ac97 *ac97);
62051- int (*build_specific) (struct snd_ac97 *ac97);
62052- int (*build_spdif) (struct snd_ac97 *ac97);
62053- int (*build_post_spdif) (struct snd_ac97 *ac97);
62054+ int (* const build_3d) (struct snd_ac97 *ac97);
62055+ int (* const build_specific) (struct snd_ac97 *ac97);
62056+ int (* const build_spdif) (struct snd_ac97 *ac97);
62057+ int (* const build_post_spdif) (struct snd_ac97 *ac97);
62058 #ifdef CONFIG_PM
62059- void (*suspend) (struct snd_ac97 *ac97);
62060- void (*resume) (struct snd_ac97 *ac97);
62061+ void (* const suspend) (struct snd_ac97 *ac97);
62062+ void (* const resume) (struct snd_ac97 *ac97);
62063 #endif
62064- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
62065+ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */
62066 };
62067
62068 struct snd_ac97_bus_ops {
62069@@ -477,7 +477,7 @@ struct snd_ac97_template {
62070
62071 struct snd_ac97 {
62072 /* -- lowlevel (hardware) driver specific -- */
62073- struct snd_ac97_build_ops * build_ops;
62074+ const struct snd_ac97_build_ops * build_ops;
62075 void *private_data;
62076 void (*private_free) (struct snd_ac97 *ac97);
62077 /* --- */
62078diff -urNp linux-2.6.32.43/include/sound/ak4xxx-adda.h linux-2.6.32.43/include/sound/ak4xxx-adda.h
62079--- linux-2.6.32.43/include/sound/ak4xxx-adda.h 2011-03-27 14:31:47.000000000 -0400
62080+++ linux-2.6.32.43/include/sound/ak4xxx-adda.h 2011-08-05 20:33:55.000000000 -0400
62081@@ -35,7 +35,7 @@ struct snd_ak4xxx_ops {
62082 void (*write)(struct snd_akm4xxx *ak, int chip, unsigned char reg,
62083 unsigned char val);
62084 void (*set_rate_val)(struct snd_akm4xxx *ak, unsigned int rate);
62085-};
62086+} __no_const;
62087
62088 #define AK4XXX_IMAGE_SIZE (AK4XXX_MAX_CHIPS * 16) /* 64 bytes */
62089
62090diff -urNp linux-2.6.32.43/include/sound/hwdep.h linux-2.6.32.43/include/sound/hwdep.h
62091--- linux-2.6.32.43/include/sound/hwdep.h 2011-03-27 14:31:47.000000000 -0400
62092+++ linux-2.6.32.43/include/sound/hwdep.h 2011-08-05 20:33:55.000000000 -0400
62093@@ -49,7 +49,7 @@ struct snd_hwdep_ops {
62094 struct snd_hwdep_dsp_status *status);
62095 int (*dsp_load)(struct snd_hwdep *hw,
62096 struct snd_hwdep_dsp_image *image);
62097-};
62098+} __no_const;
62099
62100 struct snd_hwdep {
62101 struct snd_card *card;
62102diff -urNp linux-2.6.32.43/include/sound/info.h linux-2.6.32.43/include/sound/info.h
62103--- linux-2.6.32.43/include/sound/info.h 2011-03-27 14:31:47.000000000 -0400
62104+++ linux-2.6.32.43/include/sound/info.h 2011-08-05 20:33:55.000000000 -0400
62105@@ -44,7 +44,7 @@ struct snd_info_entry_text {
62106 struct snd_info_buffer *buffer);
62107 void (*write)(struct snd_info_entry *entry,
62108 struct snd_info_buffer *buffer);
62109-};
62110+} __no_const;
62111
62112 struct snd_info_entry_ops {
62113 int (*open)(struct snd_info_entry *entry,
62114diff -urNp linux-2.6.32.43/include/sound/sb16_csp.h linux-2.6.32.43/include/sound/sb16_csp.h
62115--- linux-2.6.32.43/include/sound/sb16_csp.h 2011-03-27 14:31:47.000000000 -0400
62116+++ linux-2.6.32.43/include/sound/sb16_csp.h 2011-08-05 20:33:55.000000000 -0400
62117@@ -139,7 +139,7 @@ struct snd_sb_csp_ops {
62118 int (*csp_start) (struct snd_sb_csp * p, int sample_width, int channels);
62119 int (*csp_stop) (struct snd_sb_csp * p);
62120 int (*csp_qsound_transfer) (struct snd_sb_csp * p);
62121-};
62122+} __no_const;
62123
62124 /*
62125 * CSP private data
62126diff -urNp linux-2.6.32.43/include/sound/ymfpci.h linux-2.6.32.43/include/sound/ymfpci.h
62127--- linux-2.6.32.43/include/sound/ymfpci.h 2011-03-27 14:31:47.000000000 -0400
62128+++ linux-2.6.32.43/include/sound/ymfpci.h 2011-05-04 17:56:28.000000000 -0400
62129@@ -358,7 +358,7 @@ struct snd_ymfpci {
62130 spinlock_t reg_lock;
62131 spinlock_t voice_lock;
62132 wait_queue_head_t interrupt_sleep;
62133- atomic_t interrupt_sleep_count;
62134+ atomic_unchecked_t interrupt_sleep_count;
62135 struct snd_info_entry *proc_entry;
62136 const struct firmware *dsp_microcode;
62137 const struct firmware *controller_microcode;
62138diff -urNp linux-2.6.32.43/include/trace/events/irq.h linux-2.6.32.43/include/trace/events/irq.h
62139--- linux-2.6.32.43/include/trace/events/irq.h 2011-03-27 14:31:47.000000000 -0400
62140+++ linux-2.6.32.43/include/trace/events/irq.h 2011-04-17 15:56:46.000000000 -0400
62141@@ -34,7 +34,7 @@
62142 */
62143 TRACE_EVENT(irq_handler_entry,
62144
62145- TP_PROTO(int irq, struct irqaction *action),
62146+ TP_PROTO(int irq, const struct irqaction *action),
62147
62148 TP_ARGS(irq, action),
62149
62150@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry,
62151 */
62152 TRACE_EVENT(irq_handler_exit,
62153
62154- TP_PROTO(int irq, struct irqaction *action, int ret),
62155+ TP_PROTO(int irq, const struct irqaction *action, int ret),
62156
62157 TP_ARGS(irq, action, ret),
62158
62159@@ -95,7 +95,7 @@ TRACE_EVENT(irq_handler_exit,
62160 */
62161 TRACE_EVENT(softirq_entry,
62162
62163- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
62164+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
62165
62166 TP_ARGS(h, vec),
62167
62168@@ -124,7 +124,7 @@ TRACE_EVENT(softirq_entry,
62169 */
62170 TRACE_EVENT(softirq_exit,
62171
62172- TP_PROTO(struct softirq_action *h, struct softirq_action *vec),
62173+ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec),
62174
62175 TP_ARGS(h, vec),
62176
62177diff -urNp linux-2.6.32.43/include/video/uvesafb.h linux-2.6.32.43/include/video/uvesafb.h
62178--- linux-2.6.32.43/include/video/uvesafb.h 2011-03-27 14:31:47.000000000 -0400
62179+++ linux-2.6.32.43/include/video/uvesafb.h 2011-04-17 15:56:46.000000000 -0400
62180@@ -177,6 +177,7 @@ struct uvesafb_par {
62181 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
62182 u8 pmi_setpal; /* PMI for palette changes */
62183 u16 *pmi_base; /* protected mode interface location */
62184+ u8 *pmi_code; /* protected mode code location */
62185 void *pmi_start;
62186 void *pmi_pal;
62187 u8 *vbe_state_orig; /*
62188diff -urNp linux-2.6.32.43/init/do_mounts.c linux-2.6.32.43/init/do_mounts.c
62189--- linux-2.6.32.43/init/do_mounts.c 2011-03-27 14:31:47.000000000 -0400
62190+++ linux-2.6.32.43/init/do_mounts.c 2011-04-17 15:56:46.000000000 -0400
62191@@ -216,11 +216,11 @@ static void __init get_fs_names(char *pa
62192
62193 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
62194 {
62195- int err = sys_mount(name, "/root", fs, flags, data);
62196+ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data);
62197 if (err)
62198 return err;
62199
62200- sys_chdir("/root");
62201+ sys_chdir((__force const char __user *)"/root");
62202 ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev;
62203 printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n",
62204 current->fs->pwd.mnt->mnt_sb->s_type->name,
62205@@ -311,18 +311,18 @@ void __init change_floppy(char *fmt, ...
62206 va_start(args, fmt);
62207 vsprintf(buf, fmt, args);
62208 va_end(args);
62209- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
62210+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
62211 if (fd >= 0) {
62212 sys_ioctl(fd, FDEJECT, 0);
62213 sys_close(fd);
62214 }
62215 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
62216- fd = sys_open("/dev/console", O_RDWR, 0);
62217+ fd = sys_open((char __user *)"/dev/console", O_RDWR, 0);
62218 if (fd >= 0) {
62219 sys_ioctl(fd, TCGETS, (long)&termios);
62220 termios.c_lflag &= ~ICANON;
62221 sys_ioctl(fd, TCSETSF, (long)&termios);
62222- sys_read(fd, &c, 1);
62223+ sys_read(fd, (char __user *)&c, 1);
62224 termios.c_lflag |= ICANON;
62225 sys_ioctl(fd, TCSETSF, (long)&termios);
62226 sys_close(fd);
62227@@ -416,6 +416,6 @@ void __init prepare_namespace(void)
62228 mount_root();
62229 out:
62230 devtmpfs_mount("dev");
62231- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62232- sys_chroot(".");
62233+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
62234+ sys_chroot((__force char __user *)".");
62235 }
62236diff -urNp linux-2.6.32.43/init/do_mounts.h linux-2.6.32.43/init/do_mounts.h
62237--- linux-2.6.32.43/init/do_mounts.h 2011-03-27 14:31:47.000000000 -0400
62238+++ linux-2.6.32.43/init/do_mounts.h 2011-04-17 15:56:46.000000000 -0400
62239@@ -15,15 +15,15 @@ extern int root_mountflags;
62240
62241 static inline int create_dev(char *name, dev_t dev)
62242 {
62243- sys_unlink(name);
62244- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
62245+ sys_unlink((__force char __user *)name);
62246+ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev));
62247 }
62248
62249 #if BITS_PER_LONG == 32
62250 static inline u32 bstat(char *name)
62251 {
62252 struct stat64 stat;
62253- if (sys_stat64(name, &stat) != 0)
62254+ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0)
62255 return 0;
62256 if (!S_ISBLK(stat.st_mode))
62257 return 0;
62258diff -urNp linux-2.6.32.43/init/do_mounts_initrd.c linux-2.6.32.43/init/do_mounts_initrd.c
62259--- linux-2.6.32.43/init/do_mounts_initrd.c 2011-03-27 14:31:47.000000000 -0400
62260+++ linux-2.6.32.43/init/do_mounts_initrd.c 2011-04-17 15:56:46.000000000 -0400
62261@@ -32,7 +32,7 @@ static int __init do_linuxrc(void * shel
62262 sys_close(old_fd);sys_close(root_fd);
62263 sys_close(0);sys_close(1);sys_close(2);
62264 sys_setsid();
62265- (void) sys_open("/dev/console",O_RDWR,0);
62266+ (void) sys_open((__force const char __user *)"/dev/console",O_RDWR,0);
62267 (void) sys_dup(0);
62268 (void) sys_dup(0);
62269 return kernel_execve(shell, argv, envp_init);
62270@@ -47,13 +47,13 @@ static void __init handle_initrd(void)
62271 create_dev("/dev/root.old", Root_RAM0);
62272 /* mount initrd on rootfs' /root */
62273 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
62274- sys_mkdir("/old", 0700);
62275- root_fd = sys_open("/", 0, 0);
62276- old_fd = sys_open("/old", 0, 0);
62277+ sys_mkdir((__force const char __user *)"/old", 0700);
62278+ root_fd = sys_open((__force const char __user *)"/", 0, 0);
62279+ old_fd = sys_open((__force const char __user *)"/old", 0, 0);
62280 /* move initrd over / and chdir/chroot in initrd root */
62281- sys_chdir("/root");
62282- sys_mount(".", "/", NULL, MS_MOVE, NULL);
62283- sys_chroot(".");
62284+ sys_chdir((__force const char __user *)"/root");
62285+ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL);
62286+ sys_chroot((__force const char __user *)".");
62287
62288 /*
62289 * In case that a resume from disk is carried out by linuxrc or one of
62290@@ -70,15 +70,15 @@ static void __init handle_initrd(void)
62291
62292 /* move initrd to rootfs' /old */
62293 sys_fchdir(old_fd);
62294- sys_mount("/", ".", NULL, MS_MOVE, NULL);
62295+ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL);
62296 /* switch root and cwd back to / of rootfs */
62297 sys_fchdir(root_fd);
62298- sys_chroot(".");
62299+ sys_chroot((__force const char __user *)".");
62300 sys_close(old_fd);
62301 sys_close(root_fd);
62302
62303 if (new_decode_dev(real_root_dev) == Root_RAM0) {
62304- sys_chdir("/old");
62305+ sys_chdir((__force const char __user *)"/old");
62306 return;
62307 }
62308
62309@@ -86,17 +86,17 @@ static void __init handle_initrd(void)
62310 mount_root();
62311
62312 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
62313- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
62314+ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL);
62315 if (!error)
62316 printk("okay\n");
62317 else {
62318- int fd = sys_open("/dev/root.old", O_RDWR, 0);
62319+ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0);
62320 if (error == -ENOENT)
62321 printk("/initrd does not exist. Ignored.\n");
62322 else
62323 printk("failed\n");
62324 printk(KERN_NOTICE "Unmounting old root\n");
62325- sys_umount("/old", MNT_DETACH);
62326+ sys_umount((__force char __user *)"/old", MNT_DETACH);
62327 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
62328 if (fd < 0) {
62329 error = fd;
62330@@ -119,11 +119,11 @@ int __init initrd_load(void)
62331 * mounted in the normal path.
62332 */
62333 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
62334- sys_unlink("/initrd.image");
62335+ sys_unlink((__force const char __user *)"/initrd.image");
62336 handle_initrd();
62337 return 1;
62338 }
62339 }
62340- sys_unlink("/initrd.image");
62341+ sys_unlink((__force const char __user *)"/initrd.image");
62342 return 0;
62343 }
62344diff -urNp linux-2.6.32.43/init/do_mounts_md.c linux-2.6.32.43/init/do_mounts_md.c
62345--- linux-2.6.32.43/init/do_mounts_md.c 2011-03-27 14:31:47.000000000 -0400
62346+++ linux-2.6.32.43/init/do_mounts_md.c 2011-04-17 15:56:46.000000000 -0400
62347@@ -170,7 +170,7 @@ static void __init md_setup_drive(void)
62348 partitioned ? "_d" : "", minor,
62349 md_setup_args[ent].device_names);
62350
62351- fd = sys_open(name, 0, 0);
62352+ fd = sys_open((__force char __user *)name, 0, 0);
62353 if (fd < 0) {
62354 printk(KERN_ERR "md: open failed - cannot start "
62355 "array %s\n", name);
62356@@ -233,7 +233,7 @@ static void __init md_setup_drive(void)
62357 * array without it
62358 */
62359 sys_close(fd);
62360- fd = sys_open(name, 0, 0);
62361+ fd = sys_open((__force char __user *)name, 0, 0);
62362 sys_ioctl(fd, BLKRRPART, 0);
62363 }
62364 sys_close(fd);
62365@@ -283,7 +283,7 @@ static void __init autodetect_raid(void)
62366
62367 wait_for_device_probe();
62368
62369- fd = sys_open("/dev/md0", 0, 0);
62370+ fd = sys_open((__force char __user *)"/dev/md0", 0, 0);
62371 if (fd >= 0) {
62372 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
62373 sys_close(fd);
62374diff -urNp linux-2.6.32.43/init/initramfs.c linux-2.6.32.43/init/initramfs.c
62375--- linux-2.6.32.43/init/initramfs.c 2011-03-27 14:31:47.000000000 -0400
62376+++ linux-2.6.32.43/init/initramfs.c 2011-04-17 15:56:46.000000000 -0400
62377@@ -74,7 +74,7 @@ static void __init free_hash(void)
62378 }
62379 }
62380
62381-static long __init do_utime(char __user *filename, time_t mtime)
62382+static long __init do_utime(__force char __user *filename, time_t mtime)
62383 {
62384 struct timespec t[2];
62385
62386@@ -109,7 +109,7 @@ static void __init dir_utime(void)
62387 struct dir_entry *de, *tmp;
62388 list_for_each_entry_safe(de, tmp, &dir_list, list) {
62389 list_del(&de->list);
62390- do_utime(de->name, de->mtime);
62391+ do_utime((__force char __user *)de->name, de->mtime);
62392 kfree(de->name);
62393 kfree(de);
62394 }
62395@@ -271,7 +271,7 @@ static int __init maybe_link(void)
62396 if (nlink >= 2) {
62397 char *old = find_link(major, minor, ino, mode, collected);
62398 if (old)
62399- return (sys_link(old, collected) < 0) ? -1 : 1;
62400+ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1;
62401 }
62402 return 0;
62403 }
62404@@ -280,11 +280,11 @@ static void __init clean_path(char *path
62405 {
62406 struct stat st;
62407
62408- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
62409+ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) {
62410 if (S_ISDIR(st.st_mode))
62411- sys_rmdir(path);
62412+ sys_rmdir((__force char __user *)path);
62413 else
62414- sys_unlink(path);
62415+ sys_unlink((__force char __user *)path);
62416 }
62417 }
62418
62419@@ -305,7 +305,7 @@ static int __init do_name(void)
62420 int openflags = O_WRONLY|O_CREAT;
62421 if (ml != 1)
62422 openflags |= O_TRUNC;
62423- wfd = sys_open(collected, openflags, mode);
62424+ wfd = sys_open((__force char __user *)collected, openflags, mode);
62425
62426 if (wfd >= 0) {
62427 sys_fchown(wfd, uid, gid);
62428@@ -317,17 +317,17 @@ static int __init do_name(void)
62429 }
62430 }
62431 } else if (S_ISDIR(mode)) {
62432- sys_mkdir(collected, mode);
62433- sys_chown(collected, uid, gid);
62434- sys_chmod(collected, mode);
62435+ sys_mkdir((__force char __user *)collected, mode);
62436+ sys_chown((__force char __user *)collected, uid, gid);
62437+ sys_chmod((__force char __user *)collected, mode);
62438 dir_add(collected, mtime);
62439 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
62440 S_ISFIFO(mode) || S_ISSOCK(mode)) {
62441 if (maybe_link() == 0) {
62442- sys_mknod(collected, mode, rdev);
62443- sys_chown(collected, uid, gid);
62444- sys_chmod(collected, mode);
62445- do_utime(collected, mtime);
62446+ sys_mknod((__force char __user *)collected, mode, rdev);
62447+ sys_chown((__force char __user *)collected, uid, gid);
62448+ sys_chmod((__force char __user *)collected, mode);
62449+ do_utime((__force char __user *)collected, mtime);
62450 }
62451 }
62452 return 0;
62453@@ -336,15 +336,15 @@ static int __init do_name(void)
62454 static int __init do_copy(void)
62455 {
62456 if (count >= body_len) {
62457- sys_write(wfd, victim, body_len);
62458+ sys_write(wfd, (__force char __user *)victim, body_len);
62459 sys_close(wfd);
62460- do_utime(vcollected, mtime);
62461+ do_utime((__force char __user *)vcollected, mtime);
62462 kfree(vcollected);
62463 eat(body_len);
62464 state = SkipIt;
62465 return 0;
62466 } else {
62467- sys_write(wfd, victim, count);
62468+ sys_write(wfd, (__force char __user *)victim, count);
62469 body_len -= count;
62470 eat(count);
62471 return 1;
62472@@ -355,9 +355,9 @@ static int __init do_symlink(void)
62473 {
62474 collected[N_ALIGN(name_len) + body_len] = '\0';
62475 clean_path(collected, 0);
62476- sys_symlink(collected + N_ALIGN(name_len), collected);
62477- sys_lchown(collected, uid, gid);
62478- do_utime(collected, mtime);
62479+ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected);
62480+ sys_lchown((__force char __user *)collected, uid, gid);
62481+ do_utime((__force char __user *)collected, mtime);
62482 state = SkipIt;
62483 next_state = Reset;
62484 return 0;
62485diff -urNp linux-2.6.32.43/init/Kconfig linux-2.6.32.43/init/Kconfig
62486--- linux-2.6.32.43/init/Kconfig 2011-05-10 22:12:01.000000000 -0400
62487+++ linux-2.6.32.43/init/Kconfig 2011-05-10 22:12:34.000000000 -0400
62488@@ -1004,7 +1004,7 @@ config SLUB_DEBUG
62489
62490 config COMPAT_BRK
62491 bool "Disable heap randomization"
62492- default y
62493+ default n
62494 help
62495 Randomizing heap placement makes heap exploits harder, but it
62496 also breaks ancient binaries (including anything libc5 based).
62497diff -urNp linux-2.6.32.43/init/main.c linux-2.6.32.43/init/main.c
62498--- linux-2.6.32.43/init/main.c 2011-05-10 22:12:01.000000000 -0400
62499+++ linux-2.6.32.43/init/main.c 2011-08-05 20:33:55.000000000 -0400
62500@@ -97,6 +97,7 @@ static inline void mark_rodata_ro(void)
62501 #ifdef CONFIG_TC
62502 extern void tc_init(void);
62503 #endif
62504+extern void grsecurity_init(void);
62505
62506 enum system_states system_state __read_mostly;
62507 EXPORT_SYMBOL(system_state);
62508@@ -183,6 +184,49 @@ static int __init set_reset_devices(char
62509
62510 __setup("reset_devices", set_reset_devices);
62511
62512+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
62513+extern char pax_enter_kernel_user[];
62514+extern char pax_exit_kernel_user[];
62515+extern pgdval_t clone_pgd_mask;
62516+#endif
62517+
62518+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
62519+static int __init setup_pax_nouderef(char *str)
62520+{
62521+#ifdef CONFIG_X86_32
62522+ unsigned int cpu;
62523+ struct desc_struct *gdt;
62524+
62525+ for (cpu = 0; cpu < NR_CPUS; cpu++) {
62526+ gdt = get_cpu_gdt_table(cpu);
62527+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
62528+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
62529+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
62530+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
62531+ }
62532+ asm("mov %0, %%ds; mov %0, %%es; mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory");
62533+#else
62534+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
62535+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
62536+ clone_pgd_mask = ~(pgdval_t)0UL;
62537+#endif
62538+
62539+ return 0;
62540+}
62541+early_param("pax_nouderef", setup_pax_nouderef);
62542+#endif
62543+
62544+#ifdef CONFIG_PAX_SOFTMODE
62545+int pax_softmode;
62546+
62547+static int __init setup_pax_softmode(char *str)
62548+{
62549+ get_option(&str, &pax_softmode);
62550+ return 1;
62551+}
62552+__setup("pax_softmode=", setup_pax_softmode);
62553+#endif
62554+
62555 static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
62556 char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
62557 static const char *panic_later, *panic_param;
62558@@ -705,52 +749,53 @@ int initcall_debug;
62559 core_param(initcall_debug, initcall_debug, bool, 0644);
62560
62561 static char msgbuf[64];
62562-static struct boot_trace_call call;
62563-static struct boot_trace_ret ret;
62564+static struct boot_trace_call trace_call;
62565+static struct boot_trace_ret trace_ret;
62566
62567 int do_one_initcall(initcall_t fn)
62568 {
62569 int count = preempt_count();
62570 ktime_t calltime, delta, rettime;
62571+ const char *msg1 = "", *msg2 = "";
62572
62573 if (initcall_debug) {
62574- call.caller = task_pid_nr(current);
62575- printk("calling %pF @ %i\n", fn, call.caller);
62576+ trace_call.caller = task_pid_nr(current);
62577+ printk("calling %pF @ %i\n", fn, trace_call.caller);
62578 calltime = ktime_get();
62579- trace_boot_call(&call, fn);
62580+ trace_boot_call(&trace_call, fn);
62581 enable_boot_trace();
62582 }
62583
62584- ret.result = fn();
62585+ trace_ret.result = fn();
62586
62587 if (initcall_debug) {
62588 disable_boot_trace();
62589 rettime = ktime_get();
62590 delta = ktime_sub(rettime, calltime);
62591- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62592- trace_boot_ret(&ret, fn);
62593+ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10;
62594+ trace_boot_ret(&trace_ret, fn);
62595 printk("initcall %pF returned %d after %Ld usecs\n", fn,
62596- ret.result, ret.duration);
62597+ trace_ret.result, trace_ret.duration);
62598 }
62599
62600 msgbuf[0] = 0;
62601
62602- if (ret.result && ret.result != -ENODEV && initcall_debug)
62603- sprintf(msgbuf, "error code %d ", ret.result);
62604+ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug)
62605+ sprintf(msgbuf, "error code %d ", trace_ret.result);
62606
62607 if (preempt_count() != count) {
62608- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf));
62609+ msg1 = " preemption imbalance";
62610 preempt_count() = count;
62611 }
62612 if (irqs_disabled()) {
62613- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
62614+ msg2 = " disabled interrupts";
62615 local_irq_enable();
62616 }
62617- if (msgbuf[0]) {
62618- printk("initcall %pF returned with %s\n", fn, msgbuf);
62619+ if (msgbuf[0] || *msg1 || *msg2) {
62620+ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2);
62621 }
62622
62623- return ret.result;
62624+ return trace_ret.result;
62625 }
62626
62627
62628@@ -893,11 +938,13 @@ static int __init kernel_init(void * unu
62629 if (!ramdisk_execute_command)
62630 ramdisk_execute_command = "/init";
62631
62632- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
62633+ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) {
62634 ramdisk_execute_command = NULL;
62635 prepare_namespace();
62636 }
62637
62638+ grsecurity_init();
62639+
62640 /*
62641 * Ok, we have completed the initial bootup, and
62642 * we're essentially up and running. Get rid of the
62643diff -urNp linux-2.6.32.43/init/noinitramfs.c linux-2.6.32.43/init/noinitramfs.c
62644--- linux-2.6.32.43/init/noinitramfs.c 2011-03-27 14:31:47.000000000 -0400
62645+++ linux-2.6.32.43/init/noinitramfs.c 2011-04-17 15:56:46.000000000 -0400
62646@@ -29,7 +29,7 @@ static int __init default_rootfs(void)
62647 {
62648 int err;
62649
62650- err = sys_mkdir("/dev", 0755);
62651+ err = sys_mkdir((const char __user *)"/dev", 0755);
62652 if (err < 0)
62653 goto out;
62654
62655@@ -39,7 +39,7 @@ static int __init default_rootfs(void)
62656 if (err < 0)
62657 goto out;
62658
62659- err = sys_mkdir("/root", 0700);
62660+ err = sys_mkdir((const char __user *)"/root", 0700);
62661 if (err < 0)
62662 goto out;
62663
62664diff -urNp linux-2.6.32.43/ipc/mqueue.c linux-2.6.32.43/ipc/mqueue.c
62665--- linux-2.6.32.43/ipc/mqueue.c 2011-03-27 14:31:47.000000000 -0400
62666+++ linux-2.6.32.43/ipc/mqueue.c 2011-04-17 15:56:46.000000000 -0400
62667@@ -150,6 +150,7 @@ static struct inode *mqueue_get_inode(st
62668 mq_bytes = (mq_msg_tblsz +
62669 (info->attr.mq_maxmsg * info->attr.mq_msgsize));
62670
62671+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
62672 spin_lock(&mq_lock);
62673 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
62674 u->mq_bytes + mq_bytes >
62675diff -urNp linux-2.6.32.43/ipc/msg.c linux-2.6.32.43/ipc/msg.c
62676--- linux-2.6.32.43/ipc/msg.c 2011-03-27 14:31:47.000000000 -0400
62677+++ linux-2.6.32.43/ipc/msg.c 2011-08-05 20:33:55.000000000 -0400
62678@@ -310,18 +310,19 @@ static inline int msg_security(struct ke
62679 return security_msg_queue_associate(msq, msgflg);
62680 }
62681
62682+static struct ipc_ops msg_ops = {
62683+ .getnew = newque,
62684+ .associate = msg_security,
62685+ .more_checks = NULL
62686+};
62687+
62688 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
62689 {
62690 struct ipc_namespace *ns;
62691- struct ipc_ops msg_ops;
62692 struct ipc_params msg_params;
62693
62694 ns = current->nsproxy->ipc_ns;
62695
62696- msg_ops.getnew = newque;
62697- msg_ops.associate = msg_security;
62698- msg_ops.more_checks = NULL;
62699-
62700 msg_params.key = key;
62701 msg_params.flg = msgflg;
62702
62703diff -urNp linux-2.6.32.43/ipc/sem.c linux-2.6.32.43/ipc/sem.c
62704--- linux-2.6.32.43/ipc/sem.c 2011-03-27 14:31:47.000000000 -0400
62705+++ linux-2.6.32.43/ipc/sem.c 2011-08-05 20:33:55.000000000 -0400
62706@@ -309,10 +309,15 @@ static inline int sem_more_checks(struct
62707 return 0;
62708 }
62709
62710+static struct ipc_ops sem_ops = {
62711+ .getnew = newary,
62712+ .associate = sem_security,
62713+ .more_checks = sem_more_checks
62714+};
62715+
62716 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
62717 {
62718 struct ipc_namespace *ns;
62719- struct ipc_ops sem_ops;
62720 struct ipc_params sem_params;
62721
62722 ns = current->nsproxy->ipc_ns;
62723@@ -320,10 +325,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int,
62724 if (nsems < 0 || nsems > ns->sc_semmsl)
62725 return -EINVAL;
62726
62727- sem_ops.getnew = newary;
62728- sem_ops.associate = sem_security;
62729- sem_ops.more_checks = sem_more_checks;
62730-
62731 sem_params.key = key;
62732 sem_params.flg = semflg;
62733 sem_params.u.nsems = nsems;
62734@@ -671,6 +672,8 @@ static int semctl_main(struct ipc_namesp
62735 ushort* sem_io = fast_sem_io;
62736 int nsems;
62737
62738+ pax_track_stack();
62739+
62740 sma = sem_lock_check(ns, semid);
62741 if (IS_ERR(sma))
62742 return PTR_ERR(sma);
62743@@ -1071,6 +1074,8 @@ SYSCALL_DEFINE4(semtimedop, int, semid,
62744 unsigned long jiffies_left = 0;
62745 struct ipc_namespace *ns;
62746
62747+ pax_track_stack();
62748+
62749 ns = current->nsproxy->ipc_ns;
62750
62751 if (nsops < 1 || semid < 0)
62752diff -urNp linux-2.6.32.43/ipc/shm.c linux-2.6.32.43/ipc/shm.c
62753--- linux-2.6.32.43/ipc/shm.c 2011-03-27 14:31:47.000000000 -0400
62754+++ linux-2.6.32.43/ipc/shm.c 2011-08-05 20:33:55.000000000 -0400
62755@@ -70,6 +70,14 @@ static void shm_destroy (struct ipc_name
62756 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
62757 #endif
62758
62759+#ifdef CONFIG_GRKERNSEC
62760+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62761+ const time_t shm_createtime, const uid_t cuid,
62762+ const int shmid);
62763+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
62764+ const time_t shm_createtime);
62765+#endif
62766+
62767 void shm_init_ns(struct ipc_namespace *ns)
62768 {
62769 ns->shm_ctlmax = SHMMAX;
62770@@ -396,6 +404,14 @@ static int newseg(struct ipc_namespace *
62771 shp->shm_lprid = 0;
62772 shp->shm_atim = shp->shm_dtim = 0;
62773 shp->shm_ctim = get_seconds();
62774+#ifdef CONFIG_GRKERNSEC
62775+ {
62776+ struct timespec timeval;
62777+ do_posix_clock_monotonic_gettime(&timeval);
62778+
62779+ shp->shm_createtime = timeval.tv_sec;
62780+ }
62781+#endif
62782 shp->shm_segsz = size;
62783 shp->shm_nattch = 0;
62784 shp->shm_file = file;
62785@@ -446,18 +462,19 @@ static inline int shm_more_checks(struct
62786 return 0;
62787 }
62788
62789+static struct ipc_ops shm_ops = {
62790+ .getnew = newseg,
62791+ .associate = shm_security,
62792+ .more_checks = shm_more_checks
62793+};
62794+
62795 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
62796 {
62797 struct ipc_namespace *ns;
62798- struct ipc_ops shm_ops;
62799 struct ipc_params shm_params;
62800
62801 ns = current->nsproxy->ipc_ns;
62802
62803- shm_ops.getnew = newseg;
62804- shm_ops.associate = shm_security;
62805- shm_ops.more_checks = shm_more_checks;
62806-
62807 shm_params.key = key;
62808 shm_params.flg = shmflg;
62809 shm_params.u.size = size;
62810@@ -880,9 +897,21 @@ long do_shmat(int shmid, char __user *sh
62811 if (err)
62812 goto out_unlock;
62813
62814+#ifdef CONFIG_GRKERNSEC
62815+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
62816+ shp->shm_perm.cuid, shmid) ||
62817+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
62818+ err = -EACCES;
62819+ goto out_unlock;
62820+ }
62821+#endif
62822+
62823 path.dentry = dget(shp->shm_file->f_path.dentry);
62824 path.mnt = shp->shm_file->f_path.mnt;
62825 shp->shm_nattch++;
62826+#ifdef CONFIG_GRKERNSEC
62827+ shp->shm_lapid = current->pid;
62828+#endif
62829 size = i_size_read(path.dentry->d_inode);
62830 shm_unlock(shp);
62831
62832diff -urNp linux-2.6.32.43/kernel/acct.c linux-2.6.32.43/kernel/acct.c
62833--- linux-2.6.32.43/kernel/acct.c 2011-03-27 14:31:47.000000000 -0400
62834+++ linux-2.6.32.43/kernel/acct.c 2011-04-17 15:56:46.000000000 -0400
62835@@ -579,7 +579,7 @@ static void do_acct_process(struct bsd_a
62836 */
62837 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
62838 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
62839- file->f_op->write(file, (char *)&ac,
62840+ file->f_op->write(file, (__force char __user *)&ac,
62841 sizeof(acct_t), &file->f_pos);
62842 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
62843 set_fs(fs);
62844diff -urNp linux-2.6.32.43/kernel/audit.c linux-2.6.32.43/kernel/audit.c
62845--- linux-2.6.32.43/kernel/audit.c 2011-03-27 14:31:47.000000000 -0400
62846+++ linux-2.6.32.43/kernel/audit.c 2011-05-04 17:56:28.000000000 -0400
62847@@ -110,7 +110,7 @@ u32 audit_sig_sid = 0;
62848 3) suppressed due to audit_rate_limit
62849 4) suppressed due to audit_backlog_limit
62850 */
62851-static atomic_t audit_lost = ATOMIC_INIT(0);
62852+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
62853
62854 /* The netlink socket. */
62855 static struct sock *audit_sock;
62856@@ -232,7 +232,7 @@ void audit_log_lost(const char *message)
62857 unsigned long now;
62858 int print;
62859
62860- atomic_inc(&audit_lost);
62861+ atomic_inc_unchecked(&audit_lost);
62862
62863 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
62864
62865@@ -251,7 +251,7 @@ void audit_log_lost(const char *message)
62866 printk(KERN_WARNING
62867 "audit: audit_lost=%d audit_rate_limit=%d "
62868 "audit_backlog_limit=%d\n",
62869- atomic_read(&audit_lost),
62870+ atomic_read_unchecked(&audit_lost),
62871 audit_rate_limit,
62872 audit_backlog_limit);
62873 audit_panic(message);
62874@@ -691,7 +691,7 @@ static int audit_receive_msg(struct sk_b
62875 status_set.pid = audit_pid;
62876 status_set.rate_limit = audit_rate_limit;
62877 status_set.backlog_limit = audit_backlog_limit;
62878- status_set.lost = atomic_read(&audit_lost);
62879+ status_set.lost = atomic_read_unchecked(&audit_lost);
62880 status_set.backlog = skb_queue_len(&audit_skb_queue);
62881 audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_GET, 0, 0,
62882 &status_set, sizeof(status_set));
62883@@ -891,8 +891,10 @@ static int audit_receive_msg(struct sk_b
62884 spin_unlock_irq(&tsk->sighand->siglock);
62885 }
62886 read_unlock(&tasklist_lock);
62887- audit_send_reply(NETLINK_CB(skb).pid, seq, AUDIT_TTY_GET, 0, 0,
62888- &s, sizeof(s));
62889+
62890+ if (!err)
62891+ audit_send_reply(NETLINK_CB(skb).pid, seq,
62892+ AUDIT_TTY_GET, 0, 0, &s, sizeof(s));
62893 break;
62894 }
62895 case AUDIT_TTY_SET: {
62896diff -urNp linux-2.6.32.43/kernel/auditsc.c linux-2.6.32.43/kernel/auditsc.c
62897--- linux-2.6.32.43/kernel/auditsc.c 2011-03-27 14:31:47.000000000 -0400
62898+++ linux-2.6.32.43/kernel/auditsc.c 2011-05-04 17:56:28.000000000 -0400
62899@@ -2113,7 +2113,7 @@ int auditsc_get_stamp(struct audit_conte
62900 }
62901
62902 /* global counter which is incremented every time something logs in */
62903-static atomic_t session_id = ATOMIC_INIT(0);
62904+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
62905
62906 /**
62907 * audit_set_loginuid - set a task's audit_context loginuid
62908@@ -2126,7 +2126,7 @@ static atomic_t session_id = ATOMIC_INIT
62909 */
62910 int audit_set_loginuid(struct task_struct *task, uid_t loginuid)
62911 {
62912- unsigned int sessionid = atomic_inc_return(&session_id);
62913+ unsigned int sessionid = atomic_inc_return_unchecked(&session_id);
62914 struct audit_context *context = task->audit_context;
62915
62916 if (context && context->in_syscall) {
62917diff -urNp linux-2.6.32.43/kernel/capability.c linux-2.6.32.43/kernel/capability.c
62918--- linux-2.6.32.43/kernel/capability.c 2011-03-27 14:31:47.000000000 -0400
62919+++ linux-2.6.32.43/kernel/capability.c 2011-04-17 15:56:46.000000000 -0400
62920@@ -305,10 +305,26 @@ int capable(int cap)
62921 BUG();
62922 }
62923
62924- if (security_capable(cap) == 0) {
62925+ if (security_capable(cap) == 0 && gr_is_capable(cap)) {
62926 current->flags |= PF_SUPERPRIV;
62927 return 1;
62928 }
62929 return 0;
62930 }
62931+
62932+int capable_nolog(int cap)
62933+{
62934+ if (unlikely(!cap_valid(cap))) {
62935+ printk(KERN_CRIT "capable() called with invalid cap=%u\n", cap);
62936+ BUG();
62937+ }
62938+
62939+ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) {
62940+ current->flags |= PF_SUPERPRIV;
62941+ return 1;
62942+ }
62943+ return 0;
62944+}
62945+
62946 EXPORT_SYMBOL(capable);
62947+EXPORT_SYMBOL(capable_nolog);
62948diff -urNp linux-2.6.32.43/kernel/cgroup.c linux-2.6.32.43/kernel/cgroup.c
62949--- linux-2.6.32.43/kernel/cgroup.c 2011-03-27 14:31:47.000000000 -0400
62950+++ linux-2.6.32.43/kernel/cgroup.c 2011-05-16 21:46:57.000000000 -0400
62951@@ -536,6 +536,8 @@ static struct css_set *find_css_set(
62952 struct hlist_head *hhead;
62953 struct cg_cgroup_link *link;
62954
62955+ pax_track_stack();
62956+
62957 /* First see if we already have a cgroup group that matches
62958 * the desired set */
62959 read_lock(&css_set_lock);
62960diff -urNp linux-2.6.32.43/kernel/configs.c linux-2.6.32.43/kernel/configs.c
62961--- linux-2.6.32.43/kernel/configs.c 2011-03-27 14:31:47.000000000 -0400
62962+++ linux-2.6.32.43/kernel/configs.c 2011-04-17 15:56:46.000000000 -0400
62963@@ -73,8 +73,19 @@ static int __init ikconfig_init(void)
62964 struct proc_dir_entry *entry;
62965
62966 /* create the current config file */
62967+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
62968+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
62969+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
62970+ &ikconfig_file_ops);
62971+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62972+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
62973+ &ikconfig_file_ops);
62974+#endif
62975+#else
62976 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
62977 &ikconfig_file_ops);
62978+#endif
62979+
62980 if (!entry)
62981 return -ENOMEM;
62982
62983diff -urNp linux-2.6.32.43/kernel/cpu.c linux-2.6.32.43/kernel/cpu.c
62984--- linux-2.6.32.43/kernel/cpu.c 2011-03-27 14:31:47.000000000 -0400
62985+++ linux-2.6.32.43/kernel/cpu.c 2011-04-17 15:56:46.000000000 -0400
62986@@ -19,7 +19,7 @@
62987 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
62988 static DEFINE_MUTEX(cpu_add_remove_lock);
62989
62990-static __cpuinitdata RAW_NOTIFIER_HEAD(cpu_chain);
62991+static RAW_NOTIFIER_HEAD(cpu_chain);
62992
62993 /* If set, cpu_up and cpu_down will return -EBUSY and do nothing.
62994 * Should always be manipulated under cpu_add_remove_lock
62995diff -urNp linux-2.6.32.43/kernel/cred.c linux-2.6.32.43/kernel/cred.c
62996--- linux-2.6.32.43/kernel/cred.c 2011-03-27 14:31:47.000000000 -0400
62997+++ linux-2.6.32.43/kernel/cred.c 2011-05-17 19:26:34.000000000 -0400
62998@@ -160,6 +160,8 @@ static void put_cred_rcu(struct rcu_head
62999 */
63000 void __put_cred(struct cred *cred)
63001 {
63002+ pax_track_stack();
63003+
63004 kdebug("__put_cred(%p{%d,%d})", cred,
63005 atomic_read(&cred->usage),
63006 read_cred_subscribers(cred));
63007@@ -184,6 +186,8 @@ void exit_creds(struct task_struct *tsk)
63008 {
63009 struct cred *cred;
63010
63011+ pax_track_stack();
63012+
63013 kdebug("exit_creds(%u,%p,%p,{%d,%d})", tsk->pid, tsk->real_cred, tsk->cred,
63014 atomic_read(&tsk->cred->usage),
63015 read_cred_subscribers(tsk->cred));
63016@@ -222,6 +226,8 @@ const struct cred *get_task_cred(struct
63017 {
63018 const struct cred *cred;
63019
63020+ pax_track_stack();
63021+
63022 rcu_read_lock();
63023
63024 do {
63025@@ -241,6 +247,8 @@ struct cred *cred_alloc_blank(void)
63026 {
63027 struct cred *new;
63028
63029+ pax_track_stack();
63030+
63031 new = kmem_cache_zalloc(cred_jar, GFP_KERNEL);
63032 if (!new)
63033 return NULL;
63034@@ -289,6 +297,8 @@ struct cred *prepare_creds(void)
63035 const struct cred *old;
63036 struct cred *new;
63037
63038+ pax_track_stack();
63039+
63040 validate_process_creds();
63041
63042 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
63043@@ -335,6 +345,8 @@ struct cred *prepare_exec_creds(void)
63044 struct thread_group_cred *tgcred = NULL;
63045 struct cred *new;
63046
63047+ pax_track_stack();
63048+
63049 #ifdef CONFIG_KEYS
63050 tgcred = kmalloc(sizeof(*tgcred), GFP_KERNEL);
63051 if (!tgcred)
63052@@ -441,6 +453,8 @@ int copy_creds(struct task_struct *p, un
63053 struct cred *new;
63054 int ret;
63055
63056+ pax_track_stack();
63057+
63058 mutex_init(&p->cred_guard_mutex);
63059
63060 if (
63061@@ -528,6 +542,8 @@ int commit_creds(struct cred *new)
63062 struct task_struct *task = current;
63063 const struct cred *old = task->real_cred;
63064
63065+ pax_track_stack();
63066+
63067 kdebug("commit_creds(%p{%d,%d})", new,
63068 atomic_read(&new->usage),
63069 read_cred_subscribers(new));
63070@@ -544,6 +560,8 @@ int commit_creds(struct cred *new)
63071
63072 get_cred(new); /* we will require a ref for the subj creds too */
63073
63074+ gr_set_role_label(task, new->uid, new->gid);
63075+
63076 /* dumpability changes */
63077 if (old->euid != new->euid ||
63078 old->egid != new->egid ||
63079@@ -606,6 +624,8 @@ EXPORT_SYMBOL(commit_creds);
63080 */
63081 void abort_creds(struct cred *new)
63082 {
63083+ pax_track_stack();
63084+
63085 kdebug("abort_creds(%p{%d,%d})", new,
63086 atomic_read(&new->usage),
63087 read_cred_subscribers(new));
63088@@ -629,6 +649,8 @@ const struct cred *override_creds(const
63089 {
63090 const struct cred *old = current->cred;
63091
63092+ pax_track_stack();
63093+
63094 kdebug("override_creds(%p{%d,%d})", new,
63095 atomic_read(&new->usage),
63096 read_cred_subscribers(new));
63097@@ -658,6 +680,8 @@ void revert_creds(const struct cred *old
63098 {
63099 const struct cred *override = current->cred;
63100
63101+ pax_track_stack();
63102+
63103 kdebug("revert_creds(%p{%d,%d})", old,
63104 atomic_read(&old->usage),
63105 read_cred_subscribers(old));
63106@@ -704,6 +728,8 @@ struct cred *prepare_kernel_cred(struct
63107 const struct cred *old;
63108 struct cred *new;
63109
63110+ pax_track_stack();
63111+
63112 new = kmem_cache_alloc(cred_jar, GFP_KERNEL);
63113 if (!new)
63114 return NULL;
63115@@ -758,6 +784,8 @@ EXPORT_SYMBOL(prepare_kernel_cred);
63116 */
63117 int set_security_override(struct cred *new, u32 secid)
63118 {
63119+ pax_track_stack();
63120+
63121 return security_kernel_act_as(new, secid);
63122 }
63123 EXPORT_SYMBOL(set_security_override);
63124@@ -777,6 +805,8 @@ int set_security_override_from_ctx(struc
63125 u32 secid;
63126 int ret;
63127
63128+ pax_track_stack();
63129+
63130 ret = security_secctx_to_secid(secctx, strlen(secctx), &secid);
63131 if (ret < 0)
63132 return ret;
63133diff -urNp linux-2.6.32.43/kernel/exit.c linux-2.6.32.43/kernel/exit.c
63134--- linux-2.6.32.43/kernel/exit.c 2011-03-27 14:31:47.000000000 -0400
63135+++ linux-2.6.32.43/kernel/exit.c 2011-04-17 15:56:46.000000000 -0400
63136@@ -55,6 +55,10 @@
63137 #include <asm/pgtable.h>
63138 #include <asm/mmu_context.h>
63139
63140+#ifdef CONFIG_GRKERNSEC
63141+extern rwlock_t grsec_exec_file_lock;
63142+#endif
63143+
63144 static void exit_mm(struct task_struct * tsk);
63145
63146 static void __unhash_process(struct task_struct *p)
63147@@ -174,6 +178,8 @@ void release_task(struct task_struct * p
63148 struct task_struct *leader;
63149 int zap_leader;
63150 repeat:
63151+ gr_del_task_from_ip_table(p);
63152+
63153 tracehook_prepare_release_task(p);
63154 /* don't need to get the RCU readlock here - the process is dead and
63155 * can't be modifying its own credentials */
63156@@ -341,11 +347,22 @@ static void reparent_to_kthreadd(void)
63157 {
63158 write_lock_irq(&tasklist_lock);
63159
63160+#ifdef CONFIG_GRKERNSEC
63161+ write_lock(&grsec_exec_file_lock);
63162+ if (current->exec_file) {
63163+ fput(current->exec_file);
63164+ current->exec_file = NULL;
63165+ }
63166+ write_unlock(&grsec_exec_file_lock);
63167+#endif
63168+
63169 ptrace_unlink(current);
63170 /* Reparent to init */
63171 current->real_parent = current->parent = kthreadd_task;
63172 list_move_tail(&current->sibling, &current->real_parent->children);
63173
63174+ gr_set_kernel_label(current);
63175+
63176 /* Set the exit signal to SIGCHLD so we signal init on exit */
63177 current->exit_signal = SIGCHLD;
63178
63179@@ -397,7 +414,7 @@ int allow_signal(int sig)
63180 * know it'll be handled, so that they don't get converted to
63181 * SIGKILL or just silently dropped.
63182 */
63183- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
63184+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
63185 recalc_sigpending();
63186 spin_unlock_irq(&current->sighand->siglock);
63187 return 0;
63188@@ -433,6 +450,17 @@ void daemonize(const char *name, ...)
63189 vsnprintf(current->comm, sizeof(current->comm), name, args);
63190 va_end(args);
63191
63192+#ifdef CONFIG_GRKERNSEC
63193+ write_lock(&grsec_exec_file_lock);
63194+ if (current->exec_file) {
63195+ fput(current->exec_file);
63196+ current->exec_file = NULL;
63197+ }
63198+ write_unlock(&grsec_exec_file_lock);
63199+#endif
63200+
63201+ gr_set_kernel_label(current);
63202+
63203 /*
63204 * If we were started as result of loading a module, close all of the
63205 * user space pages. We don't need them, and if we didn't close them
63206@@ -897,17 +925,17 @@ NORET_TYPE void do_exit(long code)
63207 struct task_struct *tsk = current;
63208 int group_dead;
63209
63210- profile_task_exit(tsk);
63211-
63212- WARN_ON(atomic_read(&tsk->fs_excl));
63213-
63214+ /*
63215+ * Check this first since set_fs() below depends on
63216+ * current_thread_info(), which we better not access when we're in
63217+ * interrupt context. Other than that, we want to do the set_fs()
63218+ * as early as possible.
63219+ */
63220 if (unlikely(in_interrupt()))
63221 panic("Aiee, killing interrupt handler!");
63222- if (unlikely(!tsk->pid))
63223- panic("Attempted to kill the idle task!");
63224
63225 /*
63226- * If do_exit is called because this processes oopsed, it's possible
63227+ * If do_exit is called because this processes Oops'ed, it's possible
63228 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
63229 * continuing. Amongst other possible reasons, this is to prevent
63230 * mm_release()->clear_child_tid() from writing to a user-controlled
63231@@ -915,6 +943,13 @@ NORET_TYPE void do_exit(long code)
63232 */
63233 set_fs(USER_DS);
63234
63235+ profile_task_exit(tsk);
63236+
63237+ WARN_ON(atomic_read(&tsk->fs_excl));
63238+
63239+ if (unlikely(!tsk->pid))
63240+ panic("Attempted to kill the idle task!");
63241+
63242 tracehook_report_exit(&code);
63243
63244 validate_creds_for_do_exit(tsk);
63245@@ -973,6 +1008,9 @@ NORET_TYPE void do_exit(long code)
63246 tsk->exit_code = code;
63247 taskstats_exit(tsk, group_dead);
63248
63249+ gr_acl_handle_psacct(tsk, code);
63250+ gr_acl_handle_exit();
63251+
63252 exit_mm(tsk);
63253
63254 if (group_dead)
63255@@ -1188,7 +1226,7 @@ static int wait_task_zombie(struct wait_
63256
63257 if (unlikely(wo->wo_flags & WNOWAIT)) {
63258 int exit_code = p->exit_code;
63259- int why, status;
63260+ int why;
63261
63262 get_task_struct(p);
63263 read_unlock(&tasklist_lock);
63264diff -urNp linux-2.6.32.43/kernel/fork.c linux-2.6.32.43/kernel/fork.c
63265--- linux-2.6.32.43/kernel/fork.c 2011-03-27 14:31:47.000000000 -0400
63266+++ linux-2.6.32.43/kernel/fork.c 2011-04-17 15:56:46.000000000 -0400
63267@@ -253,7 +253,7 @@ static struct task_struct *dup_task_stru
63268 *stackend = STACK_END_MAGIC; /* for overflow detection */
63269
63270 #ifdef CONFIG_CC_STACKPROTECTOR
63271- tsk->stack_canary = get_random_int();
63272+ tsk->stack_canary = pax_get_random_long();
63273 #endif
63274
63275 /* One for us, one for whoever does the "release_task()" (usually parent) */
63276@@ -293,8 +293,8 @@ static int dup_mmap(struct mm_struct *mm
63277 mm->locked_vm = 0;
63278 mm->mmap = NULL;
63279 mm->mmap_cache = NULL;
63280- mm->free_area_cache = oldmm->mmap_base;
63281- mm->cached_hole_size = ~0UL;
63282+ mm->free_area_cache = oldmm->free_area_cache;
63283+ mm->cached_hole_size = oldmm->cached_hole_size;
63284 mm->map_count = 0;
63285 cpumask_clear(mm_cpumask(mm));
63286 mm->mm_rb = RB_ROOT;
63287@@ -335,6 +335,7 @@ static int dup_mmap(struct mm_struct *mm
63288 tmp->vm_flags &= ~VM_LOCKED;
63289 tmp->vm_mm = mm;
63290 tmp->vm_next = tmp->vm_prev = NULL;
63291+ tmp->vm_mirror = NULL;
63292 anon_vma_link(tmp);
63293 file = tmp->vm_file;
63294 if (file) {
63295@@ -384,6 +385,31 @@ static int dup_mmap(struct mm_struct *mm
63296 if (retval)
63297 goto out;
63298 }
63299+
63300+#ifdef CONFIG_PAX_SEGMEXEC
63301+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
63302+ struct vm_area_struct *mpnt_m;
63303+
63304+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
63305+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
63306+
63307+ if (!mpnt->vm_mirror)
63308+ continue;
63309+
63310+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
63311+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
63312+ mpnt->vm_mirror = mpnt_m;
63313+ } else {
63314+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
63315+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
63316+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
63317+ mpnt->vm_mirror->vm_mirror = mpnt;
63318+ }
63319+ }
63320+ BUG_ON(mpnt_m);
63321+ }
63322+#endif
63323+
63324 /* a new mm has just been created */
63325 arch_dup_mmap(oldmm, mm);
63326 retval = 0;
63327@@ -734,13 +760,14 @@ static int copy_fs(unsigned long clone_f
63328 write_unlock(&fs->lock);
63329 return -EAGAIN;
63330 }
63331- fs->users++;
63332+ atomic_inc(&fs->users);
63333 write_unlock(&fs->lock);
63334 return 0;
63335 }
63336 tsk->fs = copy_fs_struct(fs);
63337 if (!tsk->fs)
63338 return -ENOMEM;
63339+ gr_set_chroot_entries(tsk, &tsk->fs->root);
63340 return 0;
63341 }
63342
63343@@ -1033,10 +1060,13 @@ static struct task_struct *copy_process(
63344 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
63345 #endif
63346 retval = -EAGAIN;
63347+
63348+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
63349+
63350 if (atomic_read(&p->real_cred->user->processes) >=
63351 p->signal->rlim[RLIMIT_NPROC].rlim_cur) {
63352- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
63353- p->real_cred->user != INIT_USER)
63354+ if (p->real_cred->user != INIT_USER &&
63355+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
63356 goto bad_fork_free;
63357 }
63358
63359@@ -1183,6 +1213,8 @@ static struct task_struct *copy_process(
63360 goto bad_fork_free_pid;
63361 }
63362
63363+ gr_copy_label(p);
63364+
63365 p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
63366 /*
63367 * Clear TID on mm_release()?
63368@@ -1333,6 +1365,8 @@ bad_fork_cleanup_count:
63369 bad_fork_free:
63370 free_task(p);
63371 fork_out:
63372+ gr_log_forkfail(retval);
63373+
63374 return ERR_PTR(retval);
63375 }
63376
63377@@ -1426,6 +1460,8 @@ long do_fork(unsigned long clone_flags,
63378 if (clone_flags & CLONE_PARENT_SETTID)
63379 put_user(nr, parent_tidptr);
63380
63381+ gr_handle_brute_check();
63382+
63383 if (clone_flags & CLONE_VFORK) {
63384 p->vfork_done = &vfork;
63385 init_completion(&vfork);
63386@@ -1558,7 +1594,7 @@ static int unshare_fs(unsigned long unsh
63387 return 0;
63388
63389 /* don't need lock here; in the worst case we'll do useless copy */
63390- if (fs->users == 1)
63391+ if (atomic_read(&fs->users) == 1)
63392 return 0;
63393
63394 *new_fsp = copy_fs_struct(fs);
63395@@ -1681,7 +1717,8 @@ SYSCALL_DEFINE1(unshare, unsigned long,
63396 fs = current->fs;
63397 write_lock(&fs->lock);
63398 current->fs = new_fs;
63399- if (--fs->users)
63400+ gr_set_chroot_entries(current, &current->fs->root);
63401+ if (atomic_dec_return(&fs->users))
63402 new_fs = NULL;
63403 else
63404 new_fs = fs;
63405diff -urNp linux-2.6.32.43/kernel/futex.c linux-2.6.32.43/kernel/futex.c
63406--- linux-2.6.32.43/kernel/futex.c 2011-03-27 14:31:47.000000000 -0400
63407+++ linux-2.6.32.43/kernel/futex.c 2011-05-16 21:46:57.000000000 -0400
63408@@ -54,6 +54,7 @@
63409 #include <linux/mount.h>
63410 #include <linux/pagemap.h>
63411 #include <linux/syscalls.h>
63412+#include <linux/ptrace.h>
63413 #include <linux/signal.h>
63414 #include <linux/module.h>
63415 #include <linux/magic.h>
63416@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh
63417 struct page *page;
63418 int err;
63419
63420+#ifdef CONFIG_PAX_SEGMEXEC
63421+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
63422+ return -EFAULT;
63423+#endif
63424+
63425 /*
63426 * The futex address must be "naturally" aligned.
63427 */
63428@@ -1789,6 +1795,8 @@ static int futex_wait(u32 __user *uaddr,
63429 struct futex_q q;
63430 int ret;
63431
63432+ pax_track_stack();
63433+
63434 if (!bitset)
63435 return -EINVAL;
63436
63437@@ -1841,7 +1849,7 @@ retry:
63438
63439 restart = &current_thread_info()->restart_block;
63440 restart->fn = futex_wait_restart;
63441- restart->futex.uaddr = (u32 *)uaddr;
63442+ restart->futex.uaddr = uaddr;
63443 restart->futex.val = val;
63444 restart->futex.time = abs_time->tv64;
63445 restart->futex.bitset = bitset;
63446@@ -2203,6 +2211,8 @@ static int futex_wait_requeue_pi(u32 __u
63447 struct futex_q q;
63448 int res, ret;
63449
63450+ pax_track_stack();
63451+
63452 if (!bitset)
63453 return -EINVAL;
63454
63455@@ -2377,7 +2387,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63456 {
63457 struct robust_list_head __user *head;
63458 unsigned long ret;
63459+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63460 const struct cred *cred = current_cred(), *pcred;
63461+#endif
63462
63463 if (!futex_cmpxchg_enabled)
63464 return -ENOSYS;
63465@@ -2393,11 +2405,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi
63466 if (!p)
63467 goto err_unlock;
63468 ret = -EPERM;
63469+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63470+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63471+ goto err_unlock;
63472+#else
63473 pcred = __task_cred(p);
63474 if (cred->euid != pcred->euid &&
63475 cred->euid != pcred->uid &&
63476 !capable(CAP_SYS_PTRACE))
63477 goto err_unlock;
63478+#endif
63479 head = p->robust_list;
63480 rcu_read_unlock();
63481 }
63482@@ -2459,7 +2476,7 @@ retry:
63483 */
63484 static inline int fetch_robust_entry(struct robust_list __user **entry,
63485 struct robust_list __user * __user *head,
63486- int *pi)
63487+ unsigned int *pi)
63488 {
63489 unsigned long uentry;
63490
63491@@ -2640,6 +2657,7 @@ static int __init futex_init(void)
63492 {
63493 u32 curval;
63494 int i;
63495+ mm_segment_t oldfs;
63496
63497 /*
63498 * This will fail and we want it. Some arch implementations do
63499@@ -2651,7 +2669,10 @@ static int __init futex_init(void)
63500 * implementation, the non functional ones will return
63501 * -ENOSYS.
63502 */
63503+ oldfs = get_fs();
63504+ set_fs(USER_DS);
63505 curval = cmpxchg_futex_value_locked(NULL, 0, 0);
63506+ set_fs(oldfs);
63507 if (curval == -EFAULT)
63508 futex_cmpxchg_enabled = 1;
63509
63510diff -urNp linux-2.6.32.43/kernel/futex_compat.c linux-2.6.32.43/kernel/futex_compat.c
63511--- linux-2.6.32.43/kernel/futex_compat.c 2011-03-27 14:31:47.000000000 -0400
63512+++ linux-2.6.32.43/kernel/futex_compat.c 2011-04-17 15:56:46.000000000 -0400
63513@@ -10,6 +10,7 @@
63514 #include <linux/compat.h>
63515 #include <linux/nsproxy.h>
63516 #include <linux/futex.h>
63517+#include <linux/ptrace.h>
63518
63519 #include <asm/uaccess.h>
63520
63521@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp
63522 {
63523 struct compat_robust_list_head __user *head;
63524 unsigned long ret;
63525- const struct cred *cred = current_cred(), *pcred;
63526+#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP
63527+ const struct cred *cred = current_cred();
63528+ const struct cred *pcred;
63529+#endif
63530
63531 if (!futex_cmpxchg_enabled)
63532 return -ENOSYS;
63533@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp
63534 if (!p)
63535 goto err_unlock;
63536 ret = -EPERM;
63537+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
63538+ if (!ptrace_may_access(p, PTRACE_MODE_READ))
63539+ goto err_unlock;
63540+#else
63541 pcred = __task_cred(p);
63542 if (cred->euid != pcred->euid &&
63543 cred->euid != pcred->uid &&
63544 !capable(CAP_SYS_PTRACE))
63545 goto err_unlock;
63546+#endif
63547 head = p->compat_robust_list;
63548 read_unlock(&tasklist_lock);
63549 }
63550diff -urNp linux-2.6.32.43/kernel/gcov/base.c linux-2.6.32.43/kernel/gcov/base.c
63551--- linux-2.6.32.43/kernel/gcov/base.c 2011-03-27 14:31:47.000000000 -0400
63552+++ linux-2.6.32.43/kernel/gcov/base.c 2011-04-17 15:56:46.000000000 -0400
63553@@ -102,11 +102,6 @@ void gcov_enable_events(void)
63554 }
63555
63556 #ifdef CONFIG_MODULES
63557-static inline int within(void *addr, void *start, unsigned long size)
63558-{
63559- return ((addr >= start) && (addr < start + size));
63560-}
63561-
63562 /* Update list and generate events when modules are unloaded. */
63563 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
63564 void *data)
63565@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n
63566 prev = NULL;
63567 /* Remove entries located in module from linked list. */
63568 for (info = gcov_info_head; info; info = info->next) {
63569- if (within(info, mod->module_core, mod->core_size)) {
63570+ if (within_module_core_rw((unsigned long)info, mod)) {
63571 if (prev)
63572 prev->next = info->next;
63573 else
63574diff -urNp linux-2.6.32.43/kernel/hrtimer.c linux-2.6.32.43/kernel/hrtimer.c
63575--- linux-2.6.32.43/kernel/hrtimer.c 2011-03-27 14:31:47.000000000 -0400
63576+++ linux-2.6.32.43/kernel/hrtimer.c 2011-04-17 15:56:46.000000000 -0400
63577@@ -1391,7 +1391,7 @@ void hrtimer_peek_ahead_timers(void)
63578 local_irq_restore(flags);
63579 }
63580
63581-static void run_hrtimer_softirq(struct softirq_action *h)
63582+static void run_hrtimer_softirq(void)
63583 {
63584 hrtimer_peek_ahead_timers();
63585 }
63586diff -urNp linux-2.6.32.43/kernel/kallsyms.c linux-2.6.32.43/kernel/kallsyms.c
63587--- linux-2.6.32.43/kernel/kallsyms.c 2011-03-27 14:31:47.000000000 -0400
63588+++ linux-2.6.32.43/kernel/kallsyms.c 2011-04-17 15:56:46.000000000 -0400
63589@@ -11,6 +11,9 @@
63590 * Changed the compression method from stem compression to "table lookup"
63591 * compression (see scripts/kallsyms.c for a more complete description)
63592 */
63593+#ifdef CONFIG_GRKERNSEC_HIDESYM
63594+#define __INCLUDED_BY_HIDESYM 1
63595+#endif
63596 #include <linux/kallsyms.h>
63597 #include <linux/module.h>
63598 #include <linux/init.h>
63599@@ -51,12 +54,33 @@ extern const unsigned long kallsyms_mark
63600
63601 static inline int is_kernel_inittext(unsigned long addr)
63602 {
63603+ if (system_state != SYSTEM_BOOTING)
63604+ return 0;
63605+
63606 if (addr >= (unsigned long)_sinittext
63607 && addr <= (unsigned long)_einittext)
63608 return 1;
63609 return 0;
63610 }
63611
63612+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63613+#ifdef CONFIG_MODULES
63614+static inline int is_module_text(unsigned long addr)
63615+{
63616+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
63617+ return 1;
63618+
63619+ addr = ktla_ktva(addr);
63620+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
63621+}
63622+#else
63623+static inline int is_module_text(unsigned long addr)
63624+{
63625+ return 0;
63626+}
63627+#endif
63628+#endif
63629+
63630 static inline int is_kernel_text(unsigned long addr)
63631 {
63632 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
63633@@ -67,13 +91,28 @@ static inline int is_kernel_text(unsigne
63634
63635 static inline int is_kernel(unsigned long addr)
63636 {
63637+
63638+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63639+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
63640+ return 1;
63641+
63642+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
63643+#else
63644 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
63645+#endif
63646+
63647 return 1;
63648 return in_gate_area_no_task(addr);
63649 }
63650
63651 static int is_ksym_addr(unsigned long addr)
63652 {
63653+
63654+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
63655+ if (is_module_text(addr))
63656+ return 0;
63657+#endif
63658+
63659 if (all_var)
63660 return is_kernel(addr);
63661
63662@@ -413,7 +452,6 @@ static unsigned long get_ksymbol_core(st
63663
63664 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
63665 {
63666- iter->name[0] = '\0';
63667 iter->nameoff = get_symbol_offset(new_pos);
63668 iter->pos = new_pos;
63669 }
63670@@ -461,6 +499,11 @@ static int s_show(struct seq_file *m, vo
63671 {
63672 struct kallsym_iter *iter = m->private;
63673
63674+#ifdef CONFIG_GRKERNSEC_HIDESYM
63675+ if (current_uid())
63676+ return 0;
63677+#endif
63678+
63679 /* Some debugging symbols have no name. Ignore them. */
63680 if (!iter->name[0])
63681 return 0;
63682@@ -501,7 +544,7 @@ static int kallsyms_open(struct inode *i
63683 struct kallsym_iter *iter;
63684 int ret;
63685
63686- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
63687+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
63688 if (!iter)
63689 return -ENOMEM;
63690 reset_iter(iter, 0);
63691diff -urNp linux-2.6.32.43/kernel/kgdb.c linux-2.6.32.43/kernel/kgdb.c
63692--- linux-2.6.32.43/kernel/kgdb.c 2011-04-17 17:00:52.000000000 -0400
63693+++ linux-2.6.32.43/kernel/kgdb.c 2011-05-04 17:56:20.000000000 -0400
63694@@ -86,7 +86,7 @@ static int kgdb_io_module_registered;
63695 /* Guard for recursive entry */
63696 static int exception_level;
63697
63698-static struct kgdb_io *kgdb_io_ops;
63699+static const struct kgdb_io *kgdb_io_ops;
63700 static DEFINE_SPINLOCK(kgdb_registration_lock);
63701
63702 /* kgdb console driver is loaded */
63703@@ -123,7 +123,7 @@ atomic_t kgdb_active = ATOMIC_INIT(-1)
63704 */
63705 static atomic_t passive_cpu_wait[NR_CPUS];
63706 static atomic_t cpu_in_kgdb[NR_CPUS];
63707-atomic_t kgdb_setting_breakpoint;
63708+atomic_unchecked_t kgdb_setting_breakpoint;
63709
63710 struct task_struct *kgdb_usethread;
63711 struct task_struct *kgdb_contthread;
63712@@ -140,7 +140,7 @@ static unsigned long gdb_regs[(NUMREGBY
63713 sizeof(unsigned long)];
63714
63715 /* to keep track of the CPU which is doing the single stepping*/
63716-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63717+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
63718
63719 /*
63720 * If you are debugging a problem where roundup (the collection of
63721@@ -815,7 +815,7 @@ static int kgdb_io_ready(int print_wait)
63722 return 0;
63723 if (kgdb_connected)
63724 return 1;
63725- if (atomic_read(&kgdb_setting_breakpoint))
63726+ if (atomic_read_unchecked(&kgdb_setting_breakpoint))
63727 return 1;
63728 if (print_wait)
63729 printk(KERN_CRIT "KGDB: Waiting for remote debugger\n");
63730@@ -1426,8 +1426,8 @@ acquirelock:
63731 * instance of the exception handler wanted to come into the
63732 * debugger on a different CPU via a single step
63733 */
63734- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
63735- atomic_read(&kgdb_cpu_doing_single_step) != cpu) {
63736+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
63737+ atomic_read_unchecked(&kgdb_cpu_doing_single_step) != cpu) {
63738
63739 atomic_set(&kgdb_active, -1);
63740 touch_softlockup_watchdog();
63741@@ -1634,7 +1634,7 @@ static void kgdb_initial_breakpoint(void
63742 *
63743 * Register it with the KGDB core.
63744 */
63745-int kgdb_register_io_module(struct kgdb_io *new_kgdb_io_ops)
63746+int kgdb_register_io_module(const struct kgdb_io *new_kgdb_io_ops)
63747 {
63748 int err;
63749
63750@@ -1679,7 +1679,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul
63751 *
63752 * Unregister it with the KGDB core.
63753 */
63754-void kgdb_unregister_io_module(struct kgdb_io *old_kgdb_io_ops)
63755+void kgdb_unregister_io_module(const struct kgdb_io *old_kgdb_io_ops)
63756 {
63757 BUG_ON(kgdb_connected);
63758
63759@@ -1712,11 +1712,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_mod
63760 */
63761 void kgdb_breakpoint(void)
63762 {
63763- atomic_set(&kgdb_setting_breakpoint, 1);
63764+ atomic_set_unchecked(&kgdb_setting_breakpoint, 1);
63765 wmb(); /* Sync point before breakpoint */
63766 arch_kgdb_breakpoint();
63767 wmb(); /* Sync point after breakpoint */
63768- atomic_set(&kgdb_setting_breakpoint, 0);
63769+ atomic_set_unchecked(&kgdb_setting_breakpoint, 0);
63770 }
63771 EXPORT_SYMBOL_GPL(kgdb_breakpoint);
63772
63773diff -urNp linux-2.6.32.43/kernel/kmod.c linux-2.6.32.43/kernel/kmod.c
63774--- linux-2.6.32.43/kernel/kmod.c 2011-03-27 14:31:47.000000000 -0400
63775+++ linux-2.6.32.43/kernel/kmod.c 2011-04-17 15:56:46.000000000 -0400
63776@@ -65,13 +65,12 @@ char modprobe_path[KMOD_PATH_LEN] = "/sb
63777 * If module auto-loading support is disabled then this function
63778 * becomes a no-operation.
63779 */
63780-int __request_module(bool wait, const char *fmt, ...)
63781+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
63782 {
63783- va_list args;
63784 char module_name[MODULE_NAME_LEN];
63785 unsigned int max_modprobes;
63786 int ret;
63787- char *argv[] = { modprobe_path, "-q", "--", module_name, NULL };
63788+ char *argv[] = { modprobe_path, "-q", "--", module_name, module_param, NULL };
63789 static char *envp[] = { "HOME=/",
63790 "TERM=linux",
63791 "PATH=/sbin:/usr/sbin:/bin:/usr/bin",
63792@@ -84,12 +83,24 @@ int __request_module(bool wait, const ch
63793 if (ret)
63794 return ret;
63795
63796- va_start(args, fmt);
63797- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
63798- va_end(args);
63799+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
63800 if (ret >= MODULE_NAME_LEN)
63801 return -ENAMETOOLONG;
63802
63803+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63804+ if (!current_uid()) {
63805+ /* hack to workaround consolekit/udisks stupidity */
63806+ read_lock(&tasklist_lock);
63807+ if (!strcmp(current->comm, "mount") &&
63808+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
63809+ read_unlock(&tasklist_lock);
63810+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
63811+ return -EPERM;
63812+ }
63813+ read_unlock(&tasklist_lock);
63814+ }
63815+#endif
63816+
63817 /* If modprobe needs a service that is in a module, we get a recursive
63818 * loop. Limit the number of running kmod threads to max_threads/2 or
63819 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
63820@@ -121,6 +132,48 @@ int __request_module(bool wait, const ch
63821 atomic_dec(&kmod_concurrent);
63822 return ret;
63823 }
63824+
63825+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
63826+{
63827+ va_list args;
63828+ int ret;
63829+
63830+ va_start(args, fmt);
63831+ ret = ____request_module(wait, module_param, fmt, args);
63832+ va_end(args);
63833+
63834+ return ret;
63835+}
63836+
63837+int __request_module(bool wait, const char *fmt, ...)
63838+{
63839+ va_list args;
63840+ int ret;
63841+
63842+#ifdef CONFIG_GRKERNSEC_MODHARDEN
63843+ if (current_uid()) {
63844+ char module_param[MODULE_NAME_LEN];
63845+
63846+ memset(module_param, 0, sizeof(module_param));
63847+
63848+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", current_uid());
63849+
63850+ va_start(args, fmt);
63851+ ret = ____request_module(wait, module_param, fmt, args);
63852+ va_end(args);
63853+
63854+ return ret;
63855+ }
63856+#endif
63857+
63858+ va_start(args, fmt);
63859+ ret = ____request_module(wait, NULL, fmt, args);
63860+ va_end(args);
63861+
63862+ return ret;
63863+}
63864+
63865+
63866 EXPORT_SYMBOL(__request_module);
63867 #endif /* CONFIG_MODULES */
63868
63869diff -urNp linux-2.6.32.43/kernel/kprobes.c linux-2.6.32.43/kernel/kprobes.c
63870--- linux-2.6.32.43/kernel/kprobes.c 2011-03-27 14:31:47.000000000 -0400
63871+++ linux-2.6.32.43/kernel/kprobes.c 2011-04-17 15:56:46.000000000 -0400
63872@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_
63873 * kernel image and loaded module images reside. This is required
63874 * so x86_64 can correctly handle the %rip-relative fixups.
63875 */
63876- kip->insns = module_alloc(PAGE_SIZE);
63877+ kip->insns = module_alloc_exec(PAGE_SIZE);
63878 if (!kip->insns) {
63879 kfree(kip);
63880 return NULL;
63881@@ -220,7 +220,7 @@ static int __kprobes collect_one_slot(st
63882 */
63883 if (!list_is_singular(&kprobe_insn_pages)) {
63884 list_del(&kip->list);
63885- module_free(NULL, kip->insns);
63886+ module_free_exec(NULL, kip->insns);
63887 kfree(kip);
63888 }
63889 return 1;
63890@@ -1189,7 +1189,7 @@ static int __init init_kprobes(void)
63891 {
63892 int i, err = 0;
63893 unsigned long offset = 0, size = 0;
63894- char *modname, namebuf[128];
63895+ char *modname, namebuf[KSYM_NAME_LEN];
63896 const char *symbol_name;
63897 void *addr;
63898 struct kprobe_blackpoint *kb;
63899@@ -1304,7 +1304,7 @@ static int __kprobes show_kprobe_addr(st
63900 const char *sym = NULL;
63901 unsigned int i = *(loff_t *) v;
63902 unsigned long offset = 0;
63903- char *modname, namebuf[128];
63904+ char *modname, namebuf[KSYM_NAME_LEN];
63905
63906 head = &kprobe_table[i];
63907 preempt_disable();
63908diff -urNp linux-2.6.32.43/kernel/lockdep.c linux-2.6.32.43/kernel/lockdep.c
63909--- linux-2.6.32.43/kernel/lockdep.c 2011-06-25 12:55:35.000000000 -0400
63910+++ linux-2.6.32.43/kernel/lockdep.c 2011-06-25 12:56:37.000000000 -0400
63911@@ -421,20 +421,20 @@ static struct stack_trace lockdep_init_t
63912 /*
63913 * Various lockdep statistics:
63914 */
63915-atomic_t chain_lookup_hits;
63916-atomic_t chain_lookup_misses;
63917-atomic_t hardirqs_on_events;
63918-atomic_t hardirqs_off_events;
63919-atomic_t redundant_hardirqs_on;
63920-atomic_t redundant_hardirqs_off;
63921-atomic_t softirqs_on_events;
63922-atomic_t softirqs_off_events;
63923-atomic_t redundant_softirqs_on;
63924-atomic_t redundant_softirqs_off;
63925-atomic_t nr_unused_locks;
63926-atomic_t nr_cyclic_checks;
63927-atomic_t nr_find_usage_forwards_checks;
63928-atomic_t nr_find_usage_backwards_checks;
63929+atomic_unchecked_t chain_lookup_hits;
63930+atomic_unchecked_t chain_lookup_misses;
63931+atomic_unchecked_t hardirqs_on_events;
63932+atomic_unchecked_t hardirqs_off_events;
63933+atomic_unchecked_t redundant_hardirqs_on;
63934+atomic_unchecked_t redundant_hardirqs_off;
63935+atomic_unchecked_t softirqs_on_events;
63936+atomic_unchecked_t softirqs_off_events;
63937+atomic_unchecked_t redundant_softirqs_on;
63938+atomic_unchecked_t redundant_softirqs_off;
63939+atomic_unchecked_t nr_unused_locks;
63940+atomic_unchecked_t nr_cyclic_checks;
63941+atomic_unchecked_t nr_find_usage_forwards_checks;
63942+atomic_unchecked_t nr_find_usage_backwards_checks;
63943 #endif
63944
63945 /*
63946@@ -577,6 +577,10 @@ static int static_obj(void *obj)
63947 int i;
63948 #endif
63949
63950+#ifdef CONFIG_PAX_KERNEXEC
63951+ start = ktla_ktva(start);
63952+#endif
63953+
63954 /*
63955 * static variable?
63956 */
63957@@ -592,8 +596,7 @@ static int static_obj(void *obj)
63958 */
63959 for_each_possible_cpu(i) {
63960 start = (unsigned long) &__per_cpu_start + per_cpu_offset(i);
63961- end = (unsigned long) &__per_cpu_start + PERCPU_ENOUGH_ROOM
63962- + per_cpu_offset(i);
63963+ end = start + PERCPU_ENOUGH_ROOM;
63964
63965 if ((addr >= start) && (addr < end))
63966 return 1;
63967@@ -710,6 +713,7 @@ register_lock_class(struct lockdep_map *
63968 if (!static_obj(lock->key)) {
63969 debug_locks_off();
63970 printk("INFO: trying to register non-static key.\n");
63971+ printk("lock:%pS key:%pS.\n", lock, lock->key);
63972 printk("the code is fine but needs lockdep annotation.\n");
63973 printk("turning off the locking correctness validator.\n");
63974 dump_stack();
63975@@ -2751,7 +2755,7 @@ static int __lock_acquire(struct lockdep
63976 if (!class)
63977 return 0;
63978 }
63979- debug_atomic_inc((atomic_t *)&class->ops);
63980+ debug_atomic_inc((atomic_unchecked_t *)&class->ops);
63981 if (very_verbose(class)) {
63982 printk("\nacquire class [%p] %s", class->key, class->name);
63983 if (class->name_version > 1)
63984diff -urNp linux-2.6.32.43/kernel/lockdep_internals.h linux-2.6.32.43/kernel/lockdep_internals.h
63985--- linux-2.6.32.43/kernel/lockdep_internals.h 2011-03-27 14:31:47.000000000 -0400
63986+++ linux-2.6.32.43/kernel/lockdep_internals.h 2011-04-17 15:56:46.000000000 -0400
63987@@ -113,26 +113,26 @@ lockdep_count_backward_deps(struct lock_
63988 /*
63989 * Various lockdep statistics:
63990 */
63991-extern atomic_t chain_lookup_hits;
63992-extern atomic_t chain_lookup_misses;
63993-extern atomic_t hardirqs_on_events;
63994-extern atomic_t hardirqs_off_events;
63995-extern atomic_t redundant_hardirqs_on;
63996-extern atomic_t redundant_hardirqs_off;
63997-extern atomic_t softirqs_on_events;
63998-extern atomic_t softirqs_off_events;
63999-extern atomic_t redundant_softirqs_on;
64000-extern atomic_t redundant_softirqs_off;
64001-extern atomic_t nr_unused_locks;
64002-extern atomic_t nr_cyclic_checks;
64003-extern atomic_t nr_cyclic_check_recursions;
64004-extern atomic_t nr_find_usage_forwards_checks;
64005-extern atomic_t nr_find_usage_forwards_recursions;
64006-extern atomic_t nr_find_usage_backwards_checks;
64007-extern atomic_t nr_find_usage_backwards_recursions;
64008-# define debug_atomic_inc(ptr) atomic_inc(ptr)
64009-# define debug_atomic_dec(ptr) atomic_dec(ptr)
64010-# define debug_atomic_read(ptr) atomic_read(ptr)
64011+extern atomic_unchecked_t chain_lookup_hits;
64012+extern atomic_unchecked_t chain_lookup_misses;
64013+extern atomic_unchecked_t hardirqs_on_events;
64014+extern atomic_unchecked_t hardirqs_off_events;
64015+extern atomic_unchecked_t redundant_hardirqs_on;
64016+extern atomic_unchecked_t redundant_hardirqs_off;
64017+extern atomic_unchecked_t softirqs_on_events;
64018+extern atomic_unchecked_t softirqs_off_events;
64019+extern atomic_unchecked_t redundant_softirqs_on;
64020+extern atomic_unchecked_t redundant_softirqs_off;
64021+extern atomic_unchecked_t nr_unused_locks;
64022+extern atomic_unchecked_t nr_cyclic_checks;
64023+extern atomic_unchecked_t nr_cyclic_check_recursions;
64024+extern atomic_unchecked_t nr_find_usage_forwards_checks;
64025+extern atomic_unchecked_t nr_find_usage_forwards_recursions;
64026+extern atomic_unchecked_t nr_find_usage_backwards_checks;
64027+extern atomic_unchecked_t nr_find_usage_backwards_recursions;
64028+# define debug_atomic_inc(ptr) atomic_inc_unchecked(ptr)
64029+# define debug_atomic_dec(ptr) atomic_dec_unchecked(ptr)
64030+# define debug_atomic_read(ptr) atomic_read_unchecked(ptr)
64031 #else
64032 # define debug_atomic_inc(ptr) do { } while (0)
64033 # define debug_atomic_dec(ptr) do { } while (0)
64034diff -urNp linux-2.6.32.43/kernel/lockdep_proc.c linux-2.6.32.43/kernel/lockdep_proc.c
64035--- linux-2.6.32.43/kernel/lockdep_proc.c 2011-03-27 14:31:47.000000000 -0400
64036+++ linux-2.6.32.43/kernel/lockdep_proc.c 2011-04-17 15:56:46.000000000 -0400
64037@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v
64038
64039 static void print_name(struct seq_file *m, struct lock_class *class)
64040 {
64041- char str[128];
64042+ char str[KSYM_NAME_LEN];
64043 const char *name = class->name;
64044
64045 if (!name) {
64046diff -urNp linux-2.6.32.43/kernel/module.c linux-2.6.32.43/kernel/module.c
64047--- linux-2.6.32.43/kernel/module.c 2011-03-27 14:31:47.000000000 -0400
64048+++ linux-2.6.32.43/kernel/module.c 2011-04-29 18:52:40.000000000 -0400
64049@@ -55,6 +55,7 @@
64050 #include <linux/async.h>
64051 #include <linux/percpu.h>
64052 #include <linux/kmemleak.h>
64053+#include <linux/grsecurity.h>
64054
64055 #define CREATE_TRACE_POINTS
64056 #include <trace/events/module.h>
64057@@ -89,7 +90,8 @@ static DECLARE_WAIT_QUEUE_HEAD(module_wq
64058 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
64059
64060 /* Bounds of module allocation, for speeding __module_address */
64061-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
64062+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
64063+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
64064
64065 int register_module_notifier(struct notifier_block * nb)
64066 {
64067@@ -245,7 +247,7 @@ bool each_symbol(bool (*fn)(const struct
64068 return true;
64069
64070 list_for_each_entry_rcu(mod, &modules, list) {
64071- struct symsearch arr[] = {
64072+ struct symsearch modarr[] = {
64073 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
64074 NOT_GPL_ONLY, false },
64075 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
64076@@ -267,7 +269,7 @@ bool each_symbol(bool (*fn)(const struct
64077 #endif
64078 };
64079
64080- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
64081+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
64082 return true;
64083 }
64084 return false;
64085@@ -442,7 +444,7 @@ static void *percpu_modalloc(unsigned lo
64086 void *ptr;
64087 int cpu;
64088
64089- if (align > PAGE_SIZE) {
64090+ if (align-1 >= PAGE_SIZE) {
64091 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
64092 name, align, PAGE_SIZE);
64093 align = PAGE_SIZE;
64094@@ -1158,7 +1160,7 @@ static const struct kernel_symbol *resol
64095 * /sys/module/foo/sections stuff
64096 * J. Corbet <corbet@lwn.net>
64097 */
64098-#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS)
64099+#if defined(CONFIG_KALLSYMS) && defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
64100
64101 static inline bool sect_empty(const Elf_Shdr *sect)
64102 {
64103@@ -1545,7 +1547,8 @@ static void free_module(struct module *m
64104 destroy_params(mod->kp, mod->num_kp);
64105
64106 /* This may be NULL, but that's OK */
64107- module_free(mod, mod->module_init);
64108+ module_free(mod, mod->module_init_rw);
64109+ module_free_exec(mod, mod->module_init_rx);
64110 kfree(mod->args);
64111 if (mod->percpu)
64112 percpu_modfree(mod->percpu);
64113@@ -1554,10 +1557,12 @@ static void free_module(struct module *m
64114 percpu_modfree(mod->refptr);
64115 #endif
64116 /* Free lock-classes: */
64117- lockdep_free_key_range(mod->module_core, mod->core_size);
64118+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
64119+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
64120
64121 /* Finally, free the core (containing the module structure) */
64122- module_free(mod, mod->module_core);
64123+ module_free_exec(mod, mod->module_core_rx);
64124+ module_free(mod, mod->module_core_rw);
64125
64126 #ifdef CONFIG_MPU
64127 update_protections(current->mm);
64128@@ -1628,8 +1633,32 @@ static int simplify_symbols(Elf_Shdr *se
64129 unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
64130 int ret = 0;
64131 const struct kernel_symbol *ksym;
64132+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64133+ int is_fs_load = 0;
64134+ int register_filesystem_found = 0;
64135+ char *p;
64136+
64137+ p = strstr(mod->args, "grsec_modharden_fs");
64138+
64139+ if (p) {
64140+ char *endptr = p + strlen("grsec_modharden_fs");
64141+ /* copy \0 as well */
64142+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
64143+ is_fs_load = 1;
64144+ }
64145+#endif
64146+
64147
64148 for (i = 1; i < n; i++) {
64149+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64150+ const char *name = strtab + sym[i].st_name;
64151+
64152+ /* it's a real shame this will never get ripped and copied
64153+ upstream! ;(
64154+ */
64155+ if (is_fs_load && !strcmp(name, "register_filesystem"))
64156+ register_filesystem_found = 1;
64157+#endif
64158 switch (sym[i].st_shndx) {
64159 case SHN_COMMON:
64160 /* We compiled with -fno-common. These are not
64161@@ -1651,7 +1680,9 @@ static int simplify_symbols(Elf_Shdr *se
64162 strtab + sym[i].st_name, mod);
64163 /* Ok if resolved. */
64164 if (ksym) {
64165+ pax_open_kernel();
64166 sym[i].st_value = ksym->value;
64167+ pax_close_kernel();
64168 break;
64169 }
64170
64171@@ -1670,11 +1701,20 @@ static int simplify_symbols(Elf_Shdr *se
64172 secbase = (unsigned long)mod->percpu;
64173 else
64174 secbase = sechdrs[sym[i].st_shndx].sh_addr;
64175+ pax_open_kernel();
64176 sym[i].st_value += secbase;
64177+ pax_close_kernel();
64178 break;
64179 }
64180 }
64181
64182+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64183+ if (is_fs_load && !register_filesystem_found) {
64184+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
64185+ ret = -EPERM;
64186+ }
64187+#endif
64188+
64189 return ret;
64190 }
64191
64192@@ -1731,11 +1771,12 @@ static void layout_sections(struct modul
64193 || s->sh_entsize != ~0UL
64194 || strstarts(secstrings + s->sh_name, ".init"))
64195 continue;
64196- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
64197+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64198+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
64199+ else
64200+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
64201 DEBUGP("\t%s\n", secstrings + s->sh_name);
64202 }
64203- if (m == 0)
64204- mod->core_text_size = mod->core_size;
64205 }
64206
64207 DEBUGP("Init section allocation order:\n");
64208@@ -1748,12 +1789,13 @@ static void layout_sections(struct modul
64209 || s->sh_entsize != ~0UL
64210 || !strstarts(secstrings + s->sh_name, ".init"))
64211 continue;
64212- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
64213- | INIT_OFFSET_MASK);
64214+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
64215+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
64216+ else
64217+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
64218+ s->sh_entsize |= INIT_OFFSET_MASK;
64219 DEBUGP("\t%s\n", secstrings + s->sh_name);
64220 }
64221- if (m == 0)
64222- mod->init_text_size = mod->init_size;
64223 }
64224 }
64225
64226@@ -1857,9 +1899,8 @@ static int is_exported(const char *name,
64227
64228 /* As per nm */
64229 static char elf_type(const Elf_Sym *sym,
64230- Elf_Shdr *sechdrs,
64231- const char *secstrings,
64232- struct module *mod)
64233+ const Elf_Shdr *sechdrs,
64234+ const char *secstrings)
64235 {
64236 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
64237 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
64238@@ -1934,7 +1975,7 @@ static unsigned long layout_symtab(struc
64239
64240 /* Put symbol section at end of init part of module. */
64241 symsect->sh_flags |= SHF_ALLOC;
64242- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
64243+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
64244 symindex) | INIT_OFFSET_MASK;
64245 DEBUGP("\t%s\n", secstrings + symsect->sh_name);
64246
64247@@ -1951,19 +1992,19 @@ static unsigned long layout_symtab(struc
64248 }
64249
64250 /* Append room for core symbols at end of core part. */
64251- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
64252- mod->core_size = symoffs + ndst * sizeof(Elf_Sym);
64253+ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
64254+ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym);
64255
64256 /* Put string table section at end of init part of module. */
64257 strsect->sh_flags |= SHF_ALLOC;
64258- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
64259+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
64260 strindex) | INIT_OFFSET_MASK;
64261 DEBUGP("\t%s\n", secstrings + strsect->sh_name);
64262
64263 /* Append room for core symbols' strings at end of core part. */
64264- *pstroffs = mod->core_size;
64265+ *pstroffs = mod->core_size_rx;
64266 __set_bit(0, strmap);
64267- mod->core_size += bitmap_weight(strmap, strsect->sh_size);
64268+ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size);
64269
64270 return symoffs;
64271 }
64272@@ -1987,12 +2028,14 @@ static void add_kallsyms(struct module *
64273 mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
64274 mod->strtab = (void *)sechdrs[strindex].sh_addr;
64275
64276+ pax_open_kernel();
64277+
64278 /* Set types up while we still have access to sections. */
64279 for (i = 0; i < mod->num_symtab; i++)
64280 mod->symtab[i].st_info
64281- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod);
64282+ = elf_type(&mod->symtab[i], sechdrs, secstrings);
64283
64284- mod->core_symtab = dst = mod->module_core + symoffs;
64285+ mod->core_symtab = dst = mod->module_core_rx + symoffs;
64286 src = mod->symtab;
64287 *dst = *src;
64288 for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) {
64289@@ -2004,10 +2047,12 @@ static void add_kallsyms(struct module *
64290 }
64291 mod->core_num_syms = ndst;
64292
64293- mod->core_strtab = s = mod->module_core + stroffs;
64294+ mod->core_strtab = s = mod->module_core_rx + stroffs;
64295 for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i)
64296 if (test_bit(i, strmap))
64297 *++s = mod->strtab[i];
64298+
64299+ pax_close_kernel();
64300 }
64301 #else
64302 static inline unsigned long layout_symtab(struct module *mod,
64303@@ -2044,16 +2089,30 @@ static void dynamic_debug_setup(struct _
64304 #endif
64305 }
64306
64307-static void *module_alloc_update_bounds(unsigned long size)
64308+static void *module_alloc_update_bounds_rw(unsigned long size)
64309 {
64310 void *ret = module_alloc(size);
64311
64312 if (ret) {
64313 /* Update module bounds. */
64314- if ((unsigned long)ret < module_addr_min)
64315- module_addr_min = (unsigned long)ret;
64316- if ((unsigned long)ret + size > module_addr_max)
64317- module_addr_max = (unsigned long)ret + size;
64318+ if ((unsigned long)ret < module_addr_min_rw)
64319+ module_addr_min_rw = (unsigned long)ret;
64320+ if ((unsigned long)ret + size > module_addr_max_rw)
64321+ module_addr_max_rw = (unsigned long)ret + size;
64322+ }
64323+ return ret;
64324+}
64325+
64326+static void *module_alloc_update_bounds_rx(unsigned long size)
64327+{
64328+ void *ret = module_alloc_exec(size);
64329+
64330+ if (ret) {
64331+ /* Update module bounds. */
64332+ if ((unsigned long)ret < module_addr_min_rx)
64333+ module_addr_min_rx = (unsigned long)ret;
64334+ if ((unsigned long)ret + size > module_addr_max_rx)
64335+ module_addr_max_rx = (unsigned long)ret + size;
64336 }
64337 return ret;
64338 }
64339@@ -2065,8 +2124,8 @@ static void kmemleak_load_module(struct
64340 unsigned int i;
64341
64342 /* only scan the sections containing data */
64343- kmemleak_scan_area(mod->module_core, (unsigned long)mod -
64344- (unsigned long)mod->module_core,
64345+ kmemleak_scan_area(mod->module_core_rw, (unsigned long)mod -
64346+ (unsigned long)mod->module_core_rw,
64347 sizeof(struct module), GFP_KERNEL);
64348
64349 for (i = 1; i < hdr->e_shnum; i++) {
64350@@ -2076,8 +2135,8 @@ static void kmemleak_load_module(struct
64351 && strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
64352 continue;
64353
64354- kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
64355- (unsigned long)mod->module_core,
64356+ kmemleak_scan_area(mod->module_core_rw, sechdrs[i].sh_addr -
64357+ (unsigned long)mod->module_core_rw,
64358 sechdrs[i].sh_size, GFP_KERNEL);
64359 }
64360 }
64361@@ -2263,7 +2322,7 @@ static noinline struct module *load_modu
64362 secstrings, &stroffs, strmap);
64363
64364 /* Do the allocs. */
64365- ptr = module_alloc_update_bounds(mod->core_size);
64366+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
64367 /*
64368 * The pointer to this block is stored in the module structure
64369 * which is inside the block. Just mark it as not being a
64370@@ -2274,23 +2333,47 @@ static noinline struct module *load_modu
64371 err = -ENOMEM;
64372 goto free_percpu;
64373 }
64374- memset(ptr, 0, mod->core_size);
64375- mod->module_core = ptr;
64376+ memset(ptr, 0, mod->core_size_rw);
64377+ mod->module_core_rw = ptr;
64378
64379- ptr = module_alloc_update_bounds(mod->init_size);
64380+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
64381 /*
64382 * The pointer to this block is stored in the module structure
64383 * which is inside the block. This block doesn't need to be
64384 * scanned as it contains data and code that will be freed
64385 * after the module is initialized.
64386 */
64387- kmemleak_ignore(ptr);
64388- if (!ptr && mod->init_size) {
64389+ kmemleak_not_leak(ptr);
64390+ if (!ptr && mod->init_size_rw) {
64391+ err = -ENOMEM;
64392+ goto free_core_rw;
64393+ }
64394+ memset(ptr, 0, mod->init_size_rw);
64395+ mod->module_init_rw = ptr;
64396+
64397+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
64398+ kmemleak_not_leak(ptr);
64399+ if (!ptr) {
64400 err = -ENOMEM;
64401- goto free_core;
64402+ goto free_init_rw;
64403 }
64404- memset(ptr, 0, mod->init_size);
64405- mod->module_init = ptr;
64406+
64407+ pax_open_kernel();
64408+ memset(ptr, 0, mod->core_size_rx);
64409+ pax_close_kernel();
64410+ mod->module_core_rx = ptr;
64411+
64412+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
64413+ kmemleak_not_leak(ptr);
64414+ if (!ptr && mod->init_size_rx) {
64415+ err = -ENOMEM;
64416+ goto free_core_rx;
64417+ }
64418+
64419+ pax_open_kernel();
64420+ memset(ptr, 0, mod->init_size_rx);
64421+ pax_close_kernel();
64422+ mod->module_init_rx = ptr;
64423
64424 /* Transfer each section which specifies SHF_ALLOC */
64425 DEBUGP("final section addresses:\n");
64426@@ -2300,17 +2383,45 @@ static noinline struct module *load_modu
64427 if (!(sechdrs[i].sh_flags & SHF_ALLOC))
64428 continue;
64429
64430- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK)
64431- dest = mod->module_init
64432- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64433- else
64434- dest = mod->module_core + sechdrs[i].sh_entsize;
64435+ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) {
64436+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
64437+ dest = mod->module_init_rw
64438+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64439+ else
64440+ dest = mod->module_init_rx
64441+ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK);
64442+ } else {
64443+ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC))
64444+ dest = mod->module_core_rw + sechdrs[i].sh_entsize;
64445+ else
64446+ dest = mod->module_core_rx + sechdrs[i].sh_entsize;
64447+ }
64448+
64449+ if (sechdrs[i].sh_type != SHT_NOBITS) {
64450
64451- if (sechdrs[i].sh_type != SHT_NOBITS)
64452- memcpy(dest, (void *)sechdrs[i].sh_addr,
64453- sechdrs[i].sh_size);
64454+#ifdef CONFIG_PAX_KERNEXEC
64455+#ifdef CONFIG_X86_64
64456+ if ((sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_EXECINSTR))
64457+ set_memory_x((unsigned long)dest, (sechdrs[i].sh_size + PAGE_SIZE) >> PAGE_SHIFT);
64458+#endif
64459+ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) {
64460+ pax_open_kernel();
64461+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64462+ pax_close_kernel();
64463+ } else
64464+#endif
64465+
64466+ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size);
64467+ }
64468 /* Update sh_addr to point to copy in image. */
64469- sechdrs[i].sh_addr = (unsigned long)dest;
64470+
64471+#ifdef CONFIG_PAX_KERNEXEC
64472+ if (sechdrs[i].sh_flags & SHF_EXECINSTR)
64473+ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest);
64474+ else
64475+#endif
64476+
64477+ sechdrs[i].sh_addr = (unsigned long)dest;
64478 DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name);
64479 }
64480 /* Module has been moved. */
64481@@ -2322,7 +2433,7 @@ static noinline struct module *load_modu
64482 mod->name);
64483 if (!mod->refptr) {
64484 err = -ENOMEM;
64485- goto free_init;
64486+ goto free_init_rx;
64487 }
64488 #endif
64489 /* Now we've moved module, initialize linked lists, etc. */
64490@@ -2351,6 +2462,31 @@ static noinline struct module *load_modu
64491 /* Set up MODINFO_ATTR fields */
64492 setup_modinfo(mod, sechdrs, infoindex);
64493
64494+ mod->args = args;
64495+
64496+#ifdef CONFIG_GRKERNSEC_MODHARDEN
64497+ {
64498+ char *p, *p2;
64499+
64500+ if (strstr(mod->args, "grsec_modharden_netdev")) {
64501+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
64502+ err = -EPERM;
64503+ goto cleanup;
64504+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
64505+ p += strlen("grsec_modharden_normal");
64506+ p2 = strstr(p, "_");
64507+ if (p2) {
64508+ *p2 = '\0';
64509+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
64510+ *p2 = '_';
64511+ }
64512+ err = -EPERM;
64513+ goto cleanup;
64514+ }
64515+ }
64516+#endif
64517+
64518+
64519 /* Fix up syms, so that st_value is a pointer to location. */
64520 err = simplify_symbols(sechdrs, symindex, strtab, versindex, pcpuindex,
64521 mod);
64522@@ -2431,8 +2567,8 @@ static noinline struct module *load_modu
64523
64524 /* Now do relocations. */
64525 for (i = 1; i < hdr->e_shnum; i++) {
64526- const char *strtab = (char *)sechdrs[strindex].sh_addr;
64527 unsigned int info = sechdrs[i].sh_info;
64528+ strtab = (char *)sechdrs[strindex].sh_addr;
64529
64530 /* Not a valid relocation section? */
64531 if (info >= hdr->e_shnum)
64532@@ -2493,16 +2629,15 @@ static noinline struct module *load_modu
64533 * Do it before processing of module parameters, so the module
64534 * can provide parameter accessor functions of its own.
64535 */
64536- if (mod->module_init)
64537- flush_icache_range((unsigned long)mod->module_init,
64538- (unsigned long)mod->module_init
64539- + mod->init_size);
64540- flush_icache_range((unsigned long)mod->module_core,
64541- (unsigned long)mod->module_core + mod->core_size);
64542+ if (mod->module_init_rx)
64543+ flush_icache_range((unsigned long)mod->module_init_rx,
64544+ (unsigned long)mod->module_init_rx
64545+ + mod->init_size_rx);
64546+ flush_icache_range((unsigned long)mod->module_core_rx,
64547+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
64548
64549 set_fs(old_fs);
64550
64551- mod->args = args;
64552 if (section_addr(hdr, sechdrs, secstrings, "__obsparm"))
64553 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
64554 mod->name);
64555@@ -2546,12 +2681,16 @@ static noinline struct module *load_modu
64556 free_unload:
64557 module_unload_free(mod);
64558 #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
64559+ free_init_rx:
64560 percpu_modfree(mod->refptr);
64561- free_init:
64562 #endif
64563- module_free(mod, mod->module_init);
64564- free_core:
64565- module_free(mod, mod->module_core);
64566+ module_free_exec(mod, mod->module_init_rx);
64567+ free_core_rx:
64568+ module_free_exec(mod, mod->module_core_rx);
64569+ free_init_rw:
64570+ module_free(mod, mod->module_init_rw);
64571+ free_core_rw:
64572+ module_free(mod, mod->module_core_rw);
64573 /* mod will be freed with core. Don't access it beyond this line! */
64574 free_percpu:
64575 if (percpu)
64576@@ -2653,10 +2792,12 @@ SYSCALL_DEFINE3(init_module, void __user
64577 mod->symtab = mod->core_symtab;
64578 mod->strtab = mod->core_strtab;
64579 #endif
64580- module_free(mod, mod->module_init);
64581- mod->module_init = NULL;
64582- mod->init_size = 0;
64583- mod->init_text_size = 0;
64584+ module_free(mod, mod->module_init_rw);
64585+ module_free_exec(mod, mod->module_init_rx);
64586+ mod->module_init_rw = NULL;
64587+ mod->module_init_rx = NULL;
64588+ mod->init_size_rw = 0;
64589+ mod->init_size_rx = 0;
64590 mutex_unlock(&module_mutex);
64591
64592 return 0;
64593@@ -2687,10 +2828,16 @@ static const char *get_ksymbol(struct mo
64594 unsigned long nextval;
64595
64596 /* At worse, next value is at end of module */
64597- if (within_module_init(addr, mod))
64598- nextval = (unsigned long)mod->module_init+mod->init_text_size;
64599+ if (within_module_init_rx(addr, mod))
64600+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
64601+ else if (within_module_init_rw(addr, mod))
64602+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
64603+ else if (within_module_core_rx(addr, mod))
64604+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
64605+ else if (within_module_core_rw(addr, mod))
64606+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
64607 else
64608- nextval = (unsigned long)mod->module_core+mod->core_text_size;
64609+ return NULL;
64610
64611 /* Scan for closest preceeding symbol, and next symbol. (ELF
64612 starts real symbols at 1). */
64613@@ -2936,7 +3083,7 @@ static int m_show(struct seq_file *m, vo
64614 char buf[8];
64615
64616 seq_printf(m, "%s %u",
64617- mod->name, mod->init_size + mod->core_size);
64618+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
64619 print_unload_info(m, mod);
64620
64621 /* Informative for users. */
64622@@ -2945,7 +3092,7 @@ static int m_show(struct seq_file *m, vo
64623 mod->state == MODULE_STATE_COMING ? "Loading":
64624 "Live");
64625 /* Used by oprofile and other similar tools. */
64626- seq_printf(m, " 0x%p", mod->module_core);
64627+ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
64628
64629 /* Taints info */
64630 if (mod->taints)
64631@@ -2981,7 +3128,17 @@ static const struct file_operations proc
64632
64633 static int __init proc_modules_init(void)
64634 {
64635+#ifndef CONFIG_GRKERNSEC_HIDESYM
64636+#ifdef CONFIG_GRKERNSEC_PROC_USER
64637+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64638+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
64639+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
64640+#else
64641 proc_create("modules", 0, NULL, &proc_modules_operations);
64642+#endif
64643+#else
64644+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
64645+#endif
64646 return 0;
64647 }
64648 module_init(proc_modules_init);
64649@@ -3040,12 +3197,12 @@ struct module *__module_address(unsigned
64650 {
64651 struct module *mod;
64652
64653- if (addr < module_addr_min || addr > module_addr_max)
64654+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
64655+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
64656 return NULL;
64657
64658 list_for_each_entry_rcu(mod, &modules, list)
64659- if (within_module_core(addr, mod)
64660- || within_module_init(addr, mod))
64661+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
64662 return mod;
64663 return NULL;
64664 }
64665@@ -3079,11 +3236,20 @@ bool is_module_text_address(unsigned lon
64666 */
64667 struct module *__module_text_address(unsigned long addr)
64668 {
64669- struct module *mod = __module_address(addr);
64670+ struct module *mod;
64671+
64672+#ifdef CONFIG_X86_32
64673+ addr = ktla_ktva(addr);
64674+#endif
64675+
64676+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
64677+ return NULL;
64678+
64679+ mod = __module_address(addr);
64680+
64681 if (mod) {
64682 /* Make sure it's within the text section. */
64683- if (!within(addr, mod->module_init, mod->init_text_size)
64684- && !within(addr, mod->module_core, mod->core_text_size))
64685+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
64686 mod = NULL;
64687 }
64688 return mod;
64689diff -urNp linux-2.6.32.43/kernel/mutex.c linux-2.6.32.43/kernel/mutex.c
64690--- linux-2.6.32.43/kernel/mutex.c 2011-03-27 14:31:47.000000000 -0400
64691+++ linux-2.6.32.43/kernel/mutex.c 2011-04-17 15:56:46.000000000 -0400
64692@@ -169,7 +169,7 @@ __mutex_lock_common(struct mutex *lock,
64693 */
64694
64695 for (;;) {
64696- struct thread_info *owner;
64697+ struct task_struct *owner;
64698
64699 /*
64700 * If we own the BKL, then don't spin. The owner of
64701@@ -214,7 +214,7 @@ __mutex_lock_common(struct mutex *lock,
64702 spin_lock_mutex(&lock->wait_lock, flags);
64703
64704 debug_mutex_lock_common(lock, &waiter);
64705- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
64706+ debug_mutex_add_waiter(lock, &waiter, task);
64707
64708 /* add waiting tasks to the end of the waitqueue (FIFO): */
64709 list_add_tail(&waiter.list, &lock->wait_list);
64710@@ -243,8 +243,7 @@ __mutex_lock_common(struct mutex *lock,
64711 * TASK_UNINTERRUPTIBLE case.)
64712 */
64713 if (unlikely(signal_pending_state(state, task))) {
64714- mutex_remove_waiter(lock, &waiter,
64715- task_thread_info(task));
64716+ mutex_remove_waiter(lock, &waiter, task);
64717 mutex_release(&lock->dep_map, 1, ip);
64718 spin_unlock_mutex(&lock->wait_lock, flags);
64719
64720@@ -265,7 +264,7 @@ __mutex_lock_common(struct mutex *lock,
64721 done:
64722 lock_acquired(&lock->dep_map, ip);
64723 /* got the lock - rejoice! */
64724- mutex_remove_waiter(lock, &waiter, current_thread_info());
64725+ mutex_remove_waiter(lock, &waiter, task);
64726 mutex_set_owner(lock);
64727
64728 /* set it to 0 if there are no waiters left: */
64729diff -urNp linux-2.6.32.43/kernel/mutex-debug.c linux-2.6.32.43/kernel/mutex-debug.c
64730--- linux-2.6.32.43/kernel/mutex-debug.c 2011-03-27 14:31:47.000000000 -0400
64731+++ linux-2.6.32.43/kernel/mutex-debug.c 2011-04-17 15:56:46.000000000 -0400
64732@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mute
64733 }
64734
64735 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64736- struct thread_info *ti)
64737+ struct task_struct *task)
64738 {
64739 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
64740
64741 /* Mark the current thread as blocked on the lock: */
64742- ti->task->blocked_on = waiter;
64743+ task->blocked_on = waiter;
64744 }
64745
64746 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64747- struct thread_info *ti)
64748+ struct task_struct *task)
64749 {
64750 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
64751- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
64752- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
64753- ti->task->blocked_on = NULL;
64754+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
64755+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
64756+ task->blocked_on = NULL;
64757
64758 list_del_init(&waiter->list);
64759 waiter->task = NULL;
64760@@ -75,7 +75,7 @@ void debug_mutex_unlock(struct mutex *lo
64761 return;
64762
64763 DEBUG_LOCKS_WARN_ON(lock->magic != lock);
64764- DEBUG_LOCKS_WARN_ON(lock->owner != current_thread_info());
64765+ DEBUG_LOCKS_WARN_ON(lock->owner != current);
64766 DEBUG_LOCKS_WARN_ON(!lock->wait_list.prev && !lock->wait_list.next);
64767 mutex_clear_owner(lock);
64768 }
64769diff -urNp linux-2.6.32.43/kernel/mutex-debug.h linux-2.6.32.43/kernel/mutex-debug.h
64770--- linux-2.6.32.43/kernel/mutex-debug.h 2011-03-27 14:31:47.000000000 -0400
64771+++ linux-2.6.32.43/kernel/mutex-debug.h 2011-04-17 15:56:46.000000000 -0400
64772@@ -20,16 +20,16 @@ extern void debug_mutex_wake_waiter(stru
64773 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
64774 extern void debug_mutex_add_waiter(struct mutex *lock,
64775 struct mutex_waiter *waiter,
64776- struct thread_info *ti);
64777+ struct task_struct *task);
64778 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
64779- struct thread_info *ti);
64780+ struct task_struct *task);
64781 extern void debug_mutex_unlock(struct mutex *lock);
64782 extern void debug_mutex_init(struct mutex *lock, const char *name,
64783 struct lock_class_key *key);
64784
64785 static inline void mutex_set_owner(struct mutex *lock)
64786 {
64787- lock->owner = current_thread_info();
64788+ lock->owner = current;
64789 }
64790
64791 static inline void mutex_clear_owner(struct mutex *lock)
64792diff -urNp linux-2.6.32.43/kernel/mutex.h linux-2.6.32.43/kernel/mutex.h
64793--- linux-2.6.32.43/kernel/mutex.h 2011-03-27 14:31:47.000000000 -0400
64794+++ linux-2.6.32.43/kernel/mutex.h 2011-04-17 15:56:46.000000000 -0400
64795@@ -19,7 +19,7 @@
64796 #ifdef CONFIG_SMP
64797 static inline void mutex_set_owner(struct mutex *lock)
64798 {
64799- lock->owner = current_thread_info();
64800+ lock->owner = current;
64801 }
64802
64803 static inline void mutex_clear_owner(struct mutex *lock)
64804diff -urNp linux-2.6.32.43/kernel/panic.c linux-2.6.32.43/kernel/panic.c
64805--- linux-2.6.32.43/kernel/panic.c 2011-03-27 14:31:47.000000000 -0400
64806+++ linux-2.6.32.43/kernel/panic.c 2011-04-17 15:56:46.000000000 -0400
64807@@ -352,7 +352,7 @@ static void warn_slowpath_common(const c
64808 const char *board;
64809
64810 printk(KERN_WARNING "------------[ cut here ]------------\n");
64811- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
64812+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
64813 board = dmi_get_system_info(DMI_PRODUCT_NAME);
64814 if (board)
64815 printk(KERN_WARNING "Hardware name: %s\n", board);
64816@@ -392,7 +392,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
64817 */
64818 void __stack_chk_fail(void)
64819 {
64820- panic("stack-protector: Kernel stack is corrupted in: %p\n",
64821+ dump_stack();
64822+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
64823 __builtin_return_address(0));
64824 }
64825 EXPORT_SYMBOL(__stack_chk_fail);
64826diff -urNp linux-2.6.32.43/kernel/params.c linux-2.6.32.43/kernel/params.c
64827--- linux-2.6.32.43/kernel/params.c 2011-03-27 14:31:47.000000000 -0400
64828+++ linux-2.6.32.43/kernel/params.c 2011-04-17 15:56:46.000000000 -0400
64829@@ -725,7 +725,7 @@ static ssize_t module_attr_store(struct
64830 return ret;
64831 }
64832
64833-static struct sysfs_ops module_sysfs_ops = {
64834+static const struct sysfs_ops module_sysfs_ops = {
64835 .show = module_attr_show,
64836 .store = module_attr_store,
64837 };
64838@@ -739,7 +739,7 @@ static int uevent_filter(struct kset *ks
64839 return 0;
64840 }
64841
64842-static struct kset_uevent_ops module_uevent_ops = {
64843+static const struct kset_uevent_ops module_uevent_ops = {
64844 .filter = uevent_filter,
64845 };
64846
64847diff -urNp linux-2.6.32.43/kernel/perf_event.c linux-2.6.32.43/kernel/perf_event.c
64848--- linux-2.6.32.43/kernel/perf_event.c 2011-04-17 17:00:52.000000000 -0400
64849+++ linux-2.6.32.43/kernel/perf_event.c 2011-05-04 17:56:28.000000000 -0400
64850@@ -77,7 +77,7 @@ int sysctl_perf_event_mlock __read_mostl
64851 */
64852 int sysctl_perf_event_sample_rate __read_mostly = 100000;
64853
64854-static atomic64_t perf_event_id;
64855+static atomic64_unchecked_t perf_event_id;
64856
64857 /*
64858 * Lock for (sysadmin-configurable) event reservations:
64859@@ -1094,9 +1094,9 @@ static void __perf_event_sync_stat(struc
64860 * In order to keep per-task stats reliable we need to flip the event
64861 * values when we flip the contexts.
64862 */
64863- value = atomic64_read(&next_event->count);
64864- value = atomic64_xchg(&event->count, value);
64865- atomic64_set(&next_event->count, value);
64866+ value = atomic64_read_unchecked(&next_event->count);
64867+ value = atomic64_xchg_unchecked(&event->count, value);
64868+ atomic64_set_unchecked(&next_event->count, value);
64869
64870 swap(event->total_time_enabled, next_event->total_time_enabled);
64871 swap(event->total_time_running, next_event->total_time_running);
64872@@ -1552,7 +1552,7 @@ static u64 perf_event_read(struct perf_e
64873 update_event_times(event);
64874 }
64875
64876- return atomic64_read(&event->count);
64877+ return atomic64_read_unchecked(&event->count);
64878 }
64879
64880 /*
64881@@ -1790,11 +1790,11 @@ static int perf_event_read_group(struct
64882 values[n++] = 1 + leader->nr_siblings;
64883 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64884 values[n++] = leader->total_time_enabled +
64885- atomic64_read(&leader->child_total_time_enabled);
64886+ atomic64_read_unchecked(&leader->child_total_time_enabled);
64887 }
64888 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64889 values[n++] = leader->total_time_running +
64890- atomic64_read(&leader->child_total_time_running);
64891+ atomic64_read_unchecked(&leader->child_total_time_running);
64892 }
64893
64894 size = n * sizeof(u64);
64895@@ -1829,11 +1829,11 @@ static int perf_event_read_one(struct pe
64896 values[n++] = perf_event_read_value(event);
64897 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64898 values[n++] = event->total_time_enabled +
64899- atomic64_read(&event->child_total_time_enabled);
64900+ atomic64_read_unchecked(&event->child_total_time_enabled);
64901 }
64902 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64903 values[n++] = event->total_time_running +
64904- atomic64_read(&event->child_total_time_running);
64905+ atomic64_read_unchecked(&event->child_total_time_running);
64906 }
64907 if (read_format & PERF_FORMAT_ID)
64908 values[n++] = primary_event_id(event);
64909@@ -1903,7 +1903,7 @@ static unsigned int perf_poll(struct fil
64910 static void perf_event_reset(struct perf_event *event)
64911 {
64912 (void)perf_event_read(event);
64913- atomic64_set(&event->count, 0);
64914+ atomic64_set_unchecked(&event->count, 0);
64915 perf_event_update_userpage(event);
64916 }
64917
64918@@ -2079,15 +2079,15 @@ void perf_event_update_userpage(struct p
64919 ++userpg->lock;
64920 barrier();
64921 userpg->index = perf_event_index(event);
64922- userpg->offset = atomic64_read(&event->count);
64923+ userpg->offset = atomic64_read_unchecked(&event->count);
64924 if (event->state == PERF_EVENT_STATE_ACTIVE)
64925- userpg->offset -= atomic64_read(&event->hw.prev_count);
64926+ userpg->offset -= atomic64_read_unchecked(&event->hw.prev_count);
64927
64928 userpg->time_enabled = event->total_time_enabled +
64929- atomic64_read(&event->child_total_time_enabled);
64930+ atomic64_read_unchecked(&event->child_total_time_enabled);
64931
64932 userpg->time_running = event->total_time_running +
64933- atomic64_read(&event->child_total_time_running);
64934+ atomic64_read_unchecked(&event->child_total_time_running);
64935
64936 barrier();
64937 ++userpg->lock;
64938@@ -2903,14 +2903,14 @@ static void perf_output_read_one(struct
64939 u64 values[4];
64940 int n = 0;
64941
64942- values[n++] = atomic64_read(&event->count);
64943+ values[n++] = atomic64_read_unchecked(&event->count);
64944 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
64945 values[n++] = event->total_time_enabled +
64946- atomic64_read(&event->child_total_time_enabled);
64947+ atomic64_read_unchecked(&event->child_total_time_enabled);
64948 }
64949 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
64950 values[n++] = event->total_time_running +
64951- atomic64_read(&event->child_total_time_running);
64952+ atomic64_read_unchecked(&event->child_total_time_running);
64953 }
64954 if (read_format & PERF_FORMAT_ID)
64955 values[n++] = primary_event_id(event);
64956@@ -2940,7 +2940,7 @@ static void perf_output_read_group(struc
64957 if (leader != event)
64958 leader->pmu->read(leader);
64959
64960- values[n++] = atomic64_read(&leader->count);
64961+ values[n++] = atomic64_read_unchecked(&leader->count);
64962 if (read_format & PERF_FORMAT_ID)
64963 values[n++] = primary_event_id(leader);
64964
64965@@ -2952,7 +2952,7 @@ static void perf_output_read_group(struc
64966 if (sub != event)
64967 sub->pmu->read(sub);
64968
64969- values[n++] = atomic64_read(&sub->count);
64970+ values[n++] = atomic64_read_unchecked(&sub->count);
64971 if (read_format & PERF_FORMAT_ID)
64972 values[n++] = primary_event_id(sub);
64973
64974@@ -3787,7 +3787,7 @@ static void perf_swevent_add(struct perf
64975 {
64976 struct hw_perf_event *hwc = &event->hw;
64977
64978- atomic64_add(nr, &event->count);
64979+ atomic64_add_unchecked(nr, &event->count);
64980
64981 if (!hwc->sample_period)
64982 return;
64983@@ -4044,9 +4044,9 @@ static void cpu_clock_perf_event_update(
64984 u64 now;
64985
64986 now = cpu_clock(cpu);
64987- prev = atomic64_read(&event->hw.prev_count);
64988- atomic64_set(&event->hw.prev_count, now);
64989- atomic64_add(now - prev, &event->count);
64990+ prev = atomic64_read_unchecked(&event->hw.prev_count);
64991+ atomic64_set_unchecked(&event->hw.prev_count, now);
64992+ atomic64_add_unchecked(now - prev, &event->count);
64993 }
64994
64995 static int cpu_clock_perf_event_enable(struct perf_event *event)
64996@@ -4054,7 +4054,7 @@ static int cpu_clock_perf_event_enable(s
64997 struct hw_perf_event *hwc = &event->hw;
64998 int cpu = raw_smp_processor_id();
64999
65000- atomic64_set(&hwc->prev_count, cpu_clock(cpu));
65001+ atomic64_set_unchecked(&hwc->prev_count, cpu_clock(cpu));
65002 perf_swevent_start_hrtimer(event);
65003
65004 return 0;
65005@@ -4086,9 +4086,9 @@ static void task_clock_perf_event_update
65006 u64 prev;
65007 s64 delta;
65008
65009- prev = atomic64_xchg(&event->hw.prev_count, now);
65010+ prev = atomic64_xchg_unchecked(&event->hw.prev_count, now);
65011 delta = now - prev;
65012- atomic64_add(delta, &event->count);
65013+ atomic64_add_unchecked(delta, &event->count);
65014 }
65015
65016 static int task_clock_perf_event_enable(struct perf_event *event)
65017@@ -4098,7 +4098,7 @@ static int task_clock_perf_event_enable(
65018
65019 now = event->ctx->time;
65020
65021- atomic64_set(&hwc->prev_count, now);
65022+ atomic64_set_unchecked(&hwc->prev_count, now);
65023
65024 perf_swevent_start_hrtimer(event);
65025
65026@@ -4293,7 +4293,7 @@ perf_event_alloc(struct perf_event_attr
65027 event->parent = parent_event;
65028
65029 event->ns = get_pid_ns(current->nsproxy->pid_ns);
65030- event->id = atomic64_inc_return(&perf_event_id);
65031+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
65032
65033 event->state = PERF_EVENT_STATE_INACTIVE;
65034
65035@@ -4724,15 +4724,15 @@ static void sync_child_event(struct perf
65036 if (child_event->attr.inherit_stat)
65037 perf_event_read_event(child_event, child);
65038
65039- child_val = atomic64_read(&child_event->count);
65040+ child_val = atomic64_read_unchecked(&child_event->count);
65041
65042 /*
65043 * Add back the child's count to the parent's count:
65044 */
65045- atomic64_add(child_val, &parent_event->count);
65046- atomic64_add(child_event->total_time_enabled,
65047+ atomic64_add_unchecked(child_val, &parent_event->count);
65048+ atomic64_add_unchecked(child_event->total_time_enabled,
65049 &parent_event->child_total_time_enabled);
65050- atomic64_add(child_event->total_time_running,
65051+ atomic64_add_unchecked(child_event->total_time_running,
65052 &parent_event->child_total_time_running);
65053
65054 /*
65055diff -urNp linux-2.6.32.43/kernel/pid.c linux-2.6.32.43/kernel/pid.c
65056--- linux-2.6.32.43/kernel/pid.c 2011-04-22 19:16:29.000000000 -0400
65057+++ linux-2.6.32.43/kernel/pid.c 2011-07-14 19:15:33.000000000 -0400
65058@@ -33,6 +33,7 @@
65059 #include <linux/rculist.h>
65060 #include <linux/bootmem.h>
65061 #include <linux/hash.h>
65062+#include <linux/security.h>
65063 #include <linux/pid_namespace.h>
65064 #include <linux/init_task.h>
65065 #include <linux/syscalls.h>
65066@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT
65067
65068 int pid_max = PID_MAX_DEFAULT;
65069
65070-#define RESERVED_PIDS 300
65071+#define RESERVED_PIDS 500
65072
65073 int pid_max_min = RESERVED_PIDS + 1;
65074 int pid_max_max = PID_MAX_LIMIT;
65075@@ -383,7 +384,14 @@ EXPORT_SYMBOL(pid_task);
65076 */
65077 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
65078 {
65079- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65080+ struct task_struct *task;
65081+
65082+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
65083+
65084+ if (gr_pid_is_chrooted(task))
65085+ return NULL;
65086+
65087+ return task;
65088 }
65089
65090 struct task_struct *find_task_by_vpid(pid_t vnr)
65091@@ -391,6 +399,13 @@ struct task_struct *find_task_by_vpid(pi
65092 return find_task_by_pid_ns(vnr, current->nsproxy->pid_ns);
65093 }
65094
65095+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
65096+{
65097+ struct task_struct *task;
65098+
65099+ return pid_task(find_pid_ns(vnr, current->nsproxy->pid_ns), PIDTYPE_PID);
65100+}
65101+
65102 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
65103 {
65104 struct pid *pid;
65105diff -urNp linux-2.6.32.43/kernel/posix-cpu-timers.c linux-2.6.32.43/kernel/posix-cpu-timers.c
65106--- linux-2.6.32.43/kernel/posix-cpu-timers.c 2011-03-27 14:31:47.000000000 -0400
65107+++ linux-2.6.32.43/kernel/posix-cpu-timers.c 2011-08-06 09:33:44.000000000 -0400
65108@@ -6,6 +6,7 @@
65109 #include <linux/posix-timers.h>
65110 #include <linux/errno.h>
65111 #include <linux/math64.h>
65112+#include <linux/security.h>
65113 #include <asm/uaccess.h>
65114 #include <linux/kernel_stat.h>
65115 #include <trace/events/timer.h>
65116@@ -1697,7 +1698,7 @@ static long thread_cpu_nsleep_restart(st
65117
65118 static __init int init_posix_cpu_timers(void)
65119 {
65120- struct k_clock process = {
65121+ static struct k_clock process = {
65122 .clock_getres = process_cpu_clock_getres,
65123 .clock_get = process_cpu_clock_get,
65124 .clock_set = do_posix_clock_nosettime,
65125@@ -1705,7 +1706,7 @@ static __init int init_posix_cpu_timers(
65126 .nsleep = process_cpu_nsleep,
65127 .nsleep_restart = process_cpu_nsleep_restart,
65128 };
65129- struct k_clock thread = {
65130+ static struct k_clock thread = {
65131 .clock_getres = thread_cpu_clock_getres,
65132 .clock_get = thread_cpu_clock_get,
65133 .clock_set = do_posix_clock_nosettime,
65134diff -urNp linux-2.6.32.43/kernel/posix-timers.c linux-2.6.32.43/kernel/posix-timers.c
65135--- linux-2.6.32.43/kernel/posix-timers.c 2011-03-27 14:31:47.000000000 -0400
65136+++ linux-2.6.32.43/kernel/posix-timers.c 2011-08-06 09:34:14.000000000 -0400
65137@@ -42,6 +42,7 @@
65138 #include <linux/compiler.h>
65139 #include <linux/idr.h>
65140 #include <linux/posix-timers.h>
65141+#include <linux/grsecurity.h>
65142 #include <linux/syscalls.h>
65143 #include <linux/wait.h>
65144 #include <linux/workqueue.h>
65145@@ -131,7 +132,7 @@ static DEFINE_SPINLOCK(idr_lock);
65146 * which we beg off on and pass to do_sys_settimeofday().
65147 */
65148
65149-static struct k_clock posix_clocks[MAX_CLOCKS];
65150+static struct k_clock *posix_clocks[MAX_CLOCKS];
65151
65152 /*
65153 * These ones are defined below.
65154@@ -157,8 +158,8 @@ static inline void unlock_timer(struct k
65155 */
65156 #define CLOCK_DISPATCH(clock, call, arglist) \
65157 ((clock) < 0 ? posix_cpu_##call arglist : \
65158- (posix_clocks[clock].call != NULL \
65159- ? (*posix_clocks[clock].call) arglist : common_##call arglist))
65160+ (posix_clocks[clock]->call != NULL \
65161+ ? (*posix_clocks[clock]->call) arglist : common_##call arglist))
65162
65163 /*
65164 * Default clock hook functions when the struct k_clock passed
65165@@ -172,7 +173,7 @@ static inline int common_clock_getres(co
65166 struct timespec *tp)
65167 {
65168 tp->tv_sec = 0;
65169- tp->tv_nsec = posix_clocks[which_clock].res;
65170+ tp->tv_nsec = posix_clocks[which_clock]->res;
65171 return 0;
65172 }
65173
65174@@ -217,9 +218,11 @@ static inline int invalid_clockid(const
65175 return 0;
65176 if ((unsigned) which_clock >= MAX_CLOCKS)
65177 return 1;
65178- if (posix_clocks[which_clock].clock_getres != NULL)
65179+ if (!posix_clocks[which_clock])
65180 return 0;
65181- if (posix_clocks[which_clock].res != 0)
65182+ if (posix_clocks[which_clock]->clock_getres != NULL)
65183+ return 0;
65184+ if (posix_clocks[which_clock]->res != 0)
65185 return 0;
65186 return 1;
65187 }
65188@@ -266,29 +269,29 @@ int posix_get_coarse_res(const clockid_t
65189 */
65190 static __init int init_posix_timers(void)
65191 {
65192- struct k_clock clock_realtime = {
65193+ static struct k_clock clock_realtime = {
65194 .clock_getres = hrtimer_get_res,
65195 };
65196- struct k_clock clock_monotonic = {
65197+ static struct k_clock clock_monotonic = {
65198 .clock_getres = hrtimer_get_res,
65199 .clock_get = posix_ktime_get_ts,
65200 .clock_set = do_posix_clock_nosettime,
65201 };
65202- struct k_clock clock_monotonic_raw = {
65203+ static struct k_clock clock_monotonic_raw = {
65204 .clock_getres = hrtimer_get_res,
65205 .clock_get = posix_get_monotonic_raw,
65206 .clock_set = do_posix_clock_nosettime,
65207 .timer_create = no_timer_create,
65208 .nsleep = no_nsleep,
65209 };
65210- struct k_clock clock_realtime_coarse = {
65211+ static struct k_clock clock_realtime_coarse = {
65212 .clock_getres = posix_get_coarse_res,
65213 .clock_get = posix_get_realtime_coarse,
65214 .clock_set = do_posix_clock_nosettime,
65215 .timer_create = no_timer_create,
65216 .nsleep = no_nsleep,
65217 };
65218- struct k_clock clock_monotonic_coarse = {
65219+ static struct k_clock clock_monotonic_coarse = {
65220 .clock_getres = posix_get_coarse_res,
65221 .clock_get = posix_get_monotonic_coarse,
65222 .clock_set = do_posix_clock_nosettime,
65223@@ -296,6 +299,8 @@ static __init int init_posix_timers(void
65224 .nsleep = no_nsleep,
65225 };
65226
65227+ pax_track_stack();
65228+
65229 register_posix_clock(CLOCK_REALTIME, &clock_realtime);
65230 register_posix_clock(CLOCK_MONOTONIC, &clock_monotonic);
65231 register_posix_clock(CLOCK_MONOTONIC_RAW, &clock_monotonic_raw);
65232@@ -484,7 +489,7 @@ void register_posix_clock(const clockid_
65233 return;
65234 }
65235
65236- posix_clocks[clock_id] = *new_clock;
65237+ posix_clocks[clock_id] = new_clock;
65238 }
65239 EXPORT_SYMBOL_GPL(register_posix_clock);
65240
65241@@ -948,6 +953,13 @@ SYSCALL_DEFINE2(clock_settime, const clo
65242 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
65243 return -EFAULT;
65244
65245+ /* only the CLOCK_REALTIME clock can be set, all other clocks
65246+ have their clock_set fptr set to a nosettime dummy function
65247+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
65248+ call common_clock_set, which calls do_sys_settimeofday, which
65249+ we hook
65250+ */
65251+
65252 return CLOCK_DISPATCH(which_clock, clock_set, (which_clock, &new_tp));
65253 }
65254
65255diff -urNp linux-2.6.32.43/kernel/power/hibernate.c linux-2.6.32.43/kernel/power/hibernate.c
65256--- linux-2.6.32.43/kernel/power/hibernate.c 2011-03-27 14:31:47.000000000 -0400
65257+++ linux-2.6.32.43/kernel/power/hibernate.c 2011-04-17 15:56:46.000000000 -0400
65258@@ -48,14 +48,14 @@ enum {
65259
65260 static int hibernation_mode = HIBERNATION_SHUTDOWN;
65261
65262-static struct platform_hibernation_ops *hibernation_ops;
65263+static const struct platform_hibernation_ops *hibernation_ops;
65264
65265 /**
65266 * hibernation_set_ops - set the global hibernate operations
65267 * @ops: the hibernation operations to use in subsequent hibernation transitions
65268 */
65269
65270-void hibernation_set_ops(struct platform_hibernation_ops *ops)
65271+void hibernation_set_ops(const struct platform_hibernation_ops *ops)
65272 {
65273 if (ops && !(ops->begin && ops->end && ops->pre_snapshot
65274 && ops->prepare && ops->finish && ops->enter && ops->pre_restore
65275diff -urNp linux-2.6.32.43/kernel/power/poweroff.c linux-2.6.32.43/kernel/power/poweroff.c
65276--- linux-2.6.32.43/kernel/power/poweroff.c 2011-03-27 14:31:47.000000000 -0400
65277+++ linux-2.6.32.43/kernel/power/poweroff.c 2011-04-17 15:56:46.000000000 -0400
65278@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof
65279 .enable_mask = SYSRQ_ENABLE_BOOT,
65280 };
65281
65282-static int pm_sysrq_init(void)
65283+static int __init pm_sysrq_init(void)
65284 {
65285 register_sysrq_key('o', &sysrq_poweroff_op);
65286 return 0;
65287diff -urNp linux-2.6.32.43/kernel/power/process.c linux-2.6.32.43/kernel/power/process.c
65288--- linux-2.6.32.43/kernel/power/process.c 2011-03-27 14:31:47.000000000 -0400
65289+++ linux-2.6.32.43/kernel/power/process.c 2011-04-17 15:56:46.000000000 -0400
65290@@ -37,12 +37,15 @@ static int try_to_freeze_tasks(bool sig_
65291 struct timeval start, end;
65292 u64 elapsed_csecs64;
65293 unsigned int elapsed_csecs;
65294+ bool timedout = false;
65295
65296 do_gettimeofday(&start);
65297
65298 end_time = jiffies + TIMEOUT;
65299 do {
65300 todo = 0;
65301+ if (time_after(jiffies, end_time))
65302+ timedout = true;
65303 read_lock(&tasklist_lock);
65304 do_each_thread(g, p) {
65305 if (frozen(p) || !freezeable(p))
65306@@ -57,15 +60,17 @@ static int try_to_freeze_tasks(bool sig_
65307 * It is "frozen enough". If the task does wake
65308 * up, it will immediately call try_to_freeze.
65309 */
65310- if (!task_is_stopped_or_traced(p) &&
65311- !freezer_should_skip(p))
65312+ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) {
65313 todo++;
65314+ if (timedout) {
65315+ printk(KERN_ERR "Task refusing to freeze:\n");
65316+ sched_show_task(p);
65317+ }
65318+ }
65319 } while_each_thread(g, p);
65320 read_unlock(&tasklist_lock);
65321 yield(); /* Yield is okay here */
65322- if (time_after(jiffies, end_time))
65323- break;
65324- } while (todo);
65325+ } while (todo && !timedout);
65326
65327 do_gettimeofday(&end);
65328 elapsed_csecs64 = timeval_to_ns(&end) - timeval_to_ns(&start);
65329diff -urNp linux-2.6.32.43/kernel/power/suspend.c linux-2.6.32.43/kernel/power/suspend.c
65330--- linux-2.6.32.43/kernel/power/suspend.c 2011-03-27 14:31:47.000000000 -0400
65331+++ linux-2.6.32.43/kernel/power/suspend.c 2011-04-17 15:56:46.000000000 -0400
65332@@ -23,13 +23,13 @@ const char *const pm_states[PM_SUSPEND_M
65333 [PM_SUSPEND_MEM] = "mem",
65334 };
65335
65336-static struct platform_suspend_ops *suspend_ops;
65337+static const struct platform_suspend_ops *suspend_ops;
65338
65339 /**
65340 * suspend_set_ops - Set the global suspend method table.
65341 * @ops: Pointer to ops structure.
65342 */
65343-void suspend_set_ops(struct platform_suspend_ops *ops)
65344+void suspend_set_ops(const struct platform_suspend_ops *ops)
65345 {
65346 mutex_lock(&pm_mutex);
65347 suspend_ops = ops;
65348diff -urNp linux-2.6.32.43/kernel/printk.c linux-2.6.32.43/kernel/printk.c
65349--- linux-2.6.32.43/kernel/printk.c 2011-03-27 14:31:47.000000000 -0400
65350+++ linux-2.6.32.43/kernel/printk.c 2011-04-17 15:56:46.000000000 -0400
65351@@ -278,6 +278,11 @@ int do_syslog(int type, char __user *buf
65352 char c;
65353 int error = 0;
65354
65355+#ifdef CONFIG_GRKERNSEC_DMESG
65356+ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN))
65357+ return -EPERM;
65358+#endif
65359+
65360 error = security_syslog(type);
65361 if (error)
65362 return error;
65363diff -urNp linux-2.6.32.43/kernel/profile.c linux-2.6.32.43/kernel/profile.c
65364--- linux-2.6.32.43/kernel/profile.c 2011-03-27 14:31:47.000000000 -0400
65365+++ linux-2.6.32.43/kernel/profile.c 2011-05-04 17:56:28.000000000 -0400
65366@@ -39,7 +39,7 @@ struct profile_hit {
65367 /* Oprofile timer tick hook */
65368 static int (*timer_hook)(struct pt_regs *) __read_mostly;
65369
65370-static atomic_t *prof_buffer;
65371+static atomic_unchecked_t *prof_buffer;
65372 static unsigned long prof_len, prof_shift;
65373
65374 int prof_on __read_mostly;
65375@@ -283,7 +283,7 @@ static void profile_flip_buffers(void)
65376 hits[i].pc = 0;
65377 continue;
65378 }
65379- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65380+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65381 hits[i].hits = hits[i].pc = 0;
65382 }
65383 }
65384@@ -346,9 +346,9 @@ void profile_hits(int type, void *__pc,
65385 * Add the current hit(s) and flush the write-queue out
65386 * to the global buffer:
65387 */
65388- atomic_add(nr_hits, &prof_buffer[pc]);
65389+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
65390 for (i = 0; i < NR_PROFILE_HIT; ++i) {
65391- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
65392+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
65393 hits[i].pc = hits[i].hits = 0;
65394 }
65395 out:
65396@@ -426,7 +426,7 @@ void profile_hits(int type, void *__pc,
65397 if (prof_on != type || !prof_buffer)
65398 return;
65399 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
65400- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65401+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
65402 }
65403 #endif /* !CONFIG_SMP */
65404 EXPORT_SYMBOL_GPL(profile_hits);
65405@@ -517,7 +517,7 @@ read_profile(struct file *file, char __u
65406 return -EFAULT;
65407 buf++; p++; count--; read++;
65408 }
65409- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
65410+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
65411 if (copy_to_user(buf, (void *)pnt, count))
65412 return -EFAULT;
65413 read += count;
65414@@ -548,7 +548,7 @@ static ssize_t write_profile(struct file
65415 }
65416 #endif
65417 profile_discard_flip_buffers();
65418- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
65419+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
65420 return count;
65421 }
65422
65423diff -urNp linux-2.6.32.43/kernel/ptrace.c linux-2.6.32.43/kernel/ptrace.c
65424--- linux-2.6.32.43/kernel/ptrace.c 2011-03-27 14:31:47.000000000 -0400
65425+++ linux-2.6.32.43/kernel/ptrace.c 2011-05-22 23:02:06.000000000 -0400
65426@@ -117,7 +117,8 @@ int ptrace_check_attach(struct task_stru
65427 return ret;
65428 }
65429
65430-int __ptrace_may_access(struct task_struct *task, unsigned int mode)
65431+static int __ptrace_may_access(struct task_struct *task, unsigned int mode,
65432+ unsigned int log)
65433 {
65434 const struct cred *cred = current_cred(), *tcred;
65435
65436@@ -141,7 +142,9 @@ int __ptrace_may_access(struct task_stru
65437 cred->gid != tcred->egid ||
65438 cred->gid != tcred->sgid ||
65439 cred->gid != tcred->gid) &&
65440- !capable(CAP_SYS_PTRACE)) {
65441+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
65442+ (log && !capable(CAP_SYS_PTRACE)))
65443+ ) {
65444 rcu_read_unlock();
65445 return -EPERM;
65446 }
65447@@ -149,7 +152,9 @@ int __ptrace_may_access(struct task_stru
65448 smp_rmb();
65449 if (task->mm)
65450 dumpable = get_dumpable(task->mm);
65451- if (!dumpable && !capable(CAP_SYS_PTRACE))
65452+ if (!dumpable &&
65453+ ((!log && !capable_nolog(CAP_SYS_PTRACE)) ||
65454+ (log && !capable(CAP_SYS_PTRACE))))
65455 return -EPERM;
65456
65457 return security_ptrace_access_check(task, mode);
65458@@ -159,7 +164,16 @@ bool ptrace_may_access(struct task_struc
65459 {
65460 int err;
65461 task_lock(task);
65462- err = __ptrace_may_access(task, mode);
65463+ err = __ptrace_may_access(task, mode, 0);
65464+ task_unlock(task);
65465+ return !err;
65466+}
65467+
65468+bool ptrace_may_access_log(struct task_struct *task, unsigned int mode)
65469+{
65470+ int err;
65471+ task_lock(task);
65472+ err = __ptrace_may_access(task, mode, 1);
65473 task_unlock(task);
65474 return !err;
65475 }
65476@@ -186,7 +200,7 @@ int ptrace_attach(struct task_struct *ta
65477 goto out;
65478
65479 task_lock(task);
65480- retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
65481+ retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH, 1);
65482 task_unlock(task);
65483 if (retval)
65484 goto unlock_creds;
65485@@ -199,7 +213,7 @@ int ptrace_attach(struct task_struct *ta
65486 goto unlock_tasklist;
65487
65488 task->ptrace = PT_PTRACED;
65489- if (capable(CAP_SYS_PTRACE))
65490+ if (capable_nolog(CAP_SYS_PTRACE))
65491 task->ptrace |= PT_PTRACE_CAP;
65492
65493 __ptrace_link(task, current);
65494@@ -351,6 +365,8 @@ int ptrace_readdata(struct task_struct *
65495 {
65496 int copied = 0;
65497
65498+ pax_track_stack();
65499+
65500 while (len > 0) {
65501 char buf[128];
65502 int this_len, retval;
65503@@ -376,6 +392,8 @@ int ptrace_writedata(struct task_struct
65504 {
65505 int copied = 0;
65506
65507+ pax_track_stack();
65508+
65509 while (len > 0) {
65510 char buf[128];
65511 int this_len, retval;
65512@@ -517,6 +535,8 @@ int ptrace_request(struct task_struct *c
65513 int ret = -EIO;
65514 siginfo_t siginfo;
65515
65516+ pax_track_stack();
65517+
65518 switch (request) {
65519 case PTRACE_PEEKTEXT:
65520 case PTRACE_PEEKDATA:
65521@@ -532,18 +552,18 @@ int ptrace_request(struct task_struct *c
65522 ret = ptrace_setoptions(child, data);
65523 break;
65524 case PTRACE_GETEVENTMSG:
65525- ret = put_user(child->ptrace_message, (unsigned long __user *) data);
65526+ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data);
65527 break;
65528
65529 case PTRACE_GETSIGINFO:
65530 ret = ptrace_getsiginfo(child, &siginfo);
65531 if (!ret)
65532- ret = copy_siginfo_to_user((siginfo_t __user *) data,
65533+ ret = copy_siginfo_to_user((__force siginfo_t __user *) data,
65534 &siginfo);
65535 break;
65536
65537 case PTRACE_SETSIGINFO:
65538- if (copy_from_user(&siginfo, (siginfo_t __user *) data,
65539+ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data,
65540 sizeof siginfo))
65541 ret = -EFAULT;
65542 else
65543@@ -621,14 +641,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l
65544 goto out;
65545 }
65546
65547+ if (gr_handle_ptrace(child, request)) {
65548+ ret = -EPERM;
65549+ goto out_put_task_struct;
65550+ }
65551+
65552 if (request == PTRACE_ATTACH) {
65553 ret = ptrace_attach(child);
65554 /*
65555 * Some architectures need to do book-keeping after
65556 * a ptrace attach.
65557 */
65558- if (!ret)
65559+ if (!ret) {
65560 arch_ptrace_attach(child);
65561+ gr_audit_ptrace(child);
65562+ }
65563 goto out_put_task_struct;
65564 }
65565
65566@@ -653,7 +680,7 @@ int generic_ptrace_peekdata(struct task_
65567 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
65568 if (copied != sizeof(tmp))
65569 return -EIO;
65570- return put_user(tmp, (unsigned long __user *)data);
65571+ return put_user(tmp, (__force unsigned long __user *)data);
65572 }
65573
65574 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
65575@@ -675,6 +702,8 @@ int compat_ptrace_request(struct task_st
65576 siginfo_t siginfo;
65577 int ret;
65578
65579+ pax_track_stack();
65580+
65581 switch (request) {
65582 case PTRACE_PEEKTEXT:
65583 case PTRACE_PEEKDATA:
65584@@ -740,14 +769,21 @@ asmlinkage long compat_sys_ptrace(compat
65585 goto out;
65586 }
65587
65588+ if (gr_handle_ptrace(child, request)) {
65589+ ret = -EPERM;
65590+ goto out_put_task_struct;
65591+ }
65592+
65593 if (request == PTRACE_ATTACH) {
65594 ret = ptrace_attach(child);
65595 /*
65596 * Some architectures need to do book-keeping after
65597 * a ptrace attach.
65598 */
65599- if (!ret)
65600+ if (!ret) {
65601 arch_ptrace_attach(child);
65602+ gr_audit_ptrace(child);
65603+ }
65604 goto out_put_task_struct;
65605 }
65606
65607diff -urNp linux-2.6.32.43/kernel/rcutorture.c linux-2.6.32.43/kernel/rcutorture.c
65608--- linux-2.6.32.43/kernel/rcutorture.c 2011-03-27 14:31:47.000000000 -0400
65609+++ linux-2.6.32.43/kernel/rcutorture.c 2011-05-04 17:56:28.000000000 -0400
65610@@ -118,12 +118,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_
65611 { 0 };
65612 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
65613 { 0 };
65614-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65615-static atomic_t n_rcu_torture_alloc;
65616-static atomic_t n_rcu_torture_alloc_fail;
65617-static atomic_t n_rcu_torture_free;
65618-static atomic_t n_rcu_torture_mberror;
65619-static atomic_t n_rcu_torture_error;
65620+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
65621+static atomic_unchecked_t n_rcu_torture_alloc;
65622+static atomic_unchecked_t n_rcu_torture_alloc_fail;
65623+static atomic_unchecked_t n_rcu_torture_free;
65624+static atomic_unchecked_t n_rcu_torture_mberror;
65625+static atomic_unchecked_t n_rcu_torture_error;
65626 static long n_rcu_torture_timers;
65627 static struct list_head rcu_torture_removed;
65628 static cpumask_var_t shuffle_tmp_mask;
65629@@ -187,11 +187,11 @@ rcu_torture_alloc(void)
65630
65631 spin_lock_bh(&rcu_torture_lock);
65632 if (list_empty(&rcu_torture_freelist)) {
65633- atomic_inc(&n_rcu_torture_alloc_fail);
65634+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
65635 spin_unlock_bh(&rcu_torture_lock);
65636 return NULL;
65637 }
65638- atomic_inc(&n_rcu_torture_alloc);
65639+ atomic_inc_unchecked(&n_rcu_torture_alloc);
65640 p = rcu_torture_freelist.next;
65641 list_del_init(p);
65642 spin_unlock_bh(&rcu_torture_lock);
65643@@ -204,7 +204,7 @@ rcu_torture_alloc(void)
65644 static void
65645 rcu_torture_free(struct rcu_torture *p)
65646 {
65647- atomic_inc(&n_rcu_torture_free);
65648+ atomic_inc_unchecked(&n_rcu_torture_free);
65649 spin_lock_bh(&rcu_torture_lock);
65650 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
65651 spin_unlock_bh(&rcu_torture_lock);
65652@@ -319,7 +319,7 @@ rcu_torture_cb(struct rcu_head *p)
65653 i = rp->rtort_pipe_count;
65654 if (i > RCU_TORTURE_PIPE_LEN)
65655 i = RCU_TORTURE_PIPE_LEN;
65656- atomic_inc(&rcu_torture_wcount[i]);
65657+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65658 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65659 rp->rtort_mbtest = 0;
65660 rcu_torture_free(rp);
65661@@ -359,7 +359,7 @@ static void rcu_sync_torture_deferred_fr
65662 i = rp->rtort_pipe_count;
65663 if (i > RCU_TORTURE_PIPE_LEN)
65664 i = RCU_TORTURE_PIPE_LEN;
65665- atomic_inc(&rcu_torture_wcount[i]);
65666+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65667 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
65668 rp->rtort_mbtest = 0;
65669 list_del(&rp->rtort_free);
65670@@ -653,7 +653,7 @@ rcu_torture_writer(void *arg)
65671 i = old_rp->rtort_pipe_count;
65672 if (i > RCU_TORTURE_PIPE_LEN)
65673 i = RCU_TORTURE_PIPE_LEN;
65674- atomic_inc(&rcu_torture_wcount[i]);
65675+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
65676 old_rp->rtort_pipe_count++;
65677 cur_ops->deferred_free(old_rp);
65678 }
65679@@ -718,7 +718,7 @@ static void rcu_torture_timer(unsigned l
65680 return;
65681 }
65682 if (p->rtort_mbtest == 0)
65683- atomic_inc(&n_rcu_torture_mberror);
65684+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65685 spin_lock(&rand_lock);
65686 cur_ops->read_delay(&rand);
65687 n_rcu_torture_timers++;
65688@@ -776,7 +776,7 @@ rcu_torture_reader(void *arg)
65689 continue;
65690 }
65691 if (p->rtort_mbtest == 0)
65692- atomic_inc(&n_rcu_torture_mberror);
65693+ atomic_inc_unchecked(&n_rcu_torture_mberror);
65694 cur_ops->read_delay(&rand);
65695 preempt_disable();
65696 pipe_count = p->rtort_pipe_count;
65697@@ -834,17 +834,17 @@ rcu_torture_printk(char *page)
65698 rcu_torture_current,
65699 rcu_torture_current_version,
65700 list_empty(&rcu_torture_freelist),
65701- atomic_read(&n_rcu_torture_alloc),
65702- atomic_read(&n_rcu_torture_alloc_fail),
65703- atomic_read(&n_rcu_torture_free),
65704- atomic_read(&n_rcu_torture_mberror),
65705+ atomic_read_unchecked(&n_rcu_torture_alloc),
65706+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
65707+ atomic_read_unchecked(&n_rcu_torture_free),
65708+ atomic_read_unchecked(&n_rcu_torture_mberror),
65709 n_rcu_torture_timers);
65710- if (atomic_read(&n_rcu_torture_mberror) != 0)
65711+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0)
65712 cnt += sprintf(&page[cnt], " !!!");
65713 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
65714 if (i > 1) {
65715 cnt += sprintf(&page[cnt], "!!! ");
65716- atomic_inc(&n_rcu_torture_error);
65717+ atomic_inc_unchecked(&n_rcu_torture_error);
65718 WARN_ON_ONCE(1);
65719 }
65720 cnt += sprintf(&page[cnt], "Reader Pipe: ");
65721@@ -858,7 +858,7 @@ rcu_torture_printk(char *page)
65722 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
65723 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65724 cnt += sprintf(&page[cnt], " %d",
65725- atomic_read(&rcu_torture_wcount[i]));
65726+ atomic_read_unchecked(&rcu_torture_wcount[i]));
65727 }
65728 cnt += sprintf(&page[cnt], "\n");
65729 if (cur_ops->stats)
65730@@ -1084,7 +1084,7 @@ rcu_torture_cleanup(void)
65731
65732 if (cur_ops->cleanup)
65733 cur_ops->cleanup();
65734- if (atomic_read(&n_rcu_torture_error))
65735+ if (atomic_read_unchecked(&n_rcu_torture_error))
65736 rcu_torture_print_module_parms("End of test: FAILURE");
65737 else
65738 rcu_torture_print_module_parms("End of test: SUCCESS");
65739@@ -1138,13 +1138,13 @@ rcu_torture_init(void)
65740
65741 rcu_torture_current = NULL;
65742 rcu_torture_current_version = 0;
65743- atomic_set(&n_rcu_torture_alloc, 0);
65744- atomic_set(&n_rcu_torture_alloc_fail, 0);
65745- atomic_set(&n_rcu_torture_free, 0);
65746- atomic_set(&n_rcu_torture_mberror, 0);
65747- atomic_set(&n_rcu_torture_error, 0);
65748+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
65749+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
65750+ atomic_set_unchecked(&n_rcu_torture_free, 0);
65751+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
65752+ atomic_set_unchecked(&n_rcu_torture_error, 0);
65753 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
65754- atomic_set(&rcu_torture_wcount[i], 0);
65755+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
65756 for_each_possible_cpu(cpu) {
65757 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
65758 per_cpu(rcu_torture_count, cpu)[i] = 0;
65759diff -urNp linux-2.6.32.43/kernel/rcutree.c linux-2.6.32.43/kernel/rcutree.c
65760--- linux-2.6.32.43/kernel/rcutree.c 2011-03-27 14:31:47.000000000 -0400
65761+++ linux-2.6.32.43/kernel/rcutree.c 2011-04-17 15:56:46.000000000 -0400
65762@@ -1303,7 +1303,7 @@ __rcu_process_callbacks(struct rcu_state
65763 /*
65764 * Do softirq processing for the current CPU.
65765 */
65766-static void rcu_process_callbacks(struct softirq_action *unused)
65767+static void rcu_process_callbacks(void)
65768 {
65769 /*
65770 * Memory references from any prior RCU read-side critical sections
65771diff -urNp linux-2.6.32.43/kernel/rcutree_plugin.h linux-2.6.32.43/kernel/rcutree_plugin.h
65772--- linux-2.6.32.43/kernel/rcutree_plugin.h 2011-03-27 14:31:47.000000000 -0400
65773+++ linux-2.6.32.43/kernel/rcutree_plugin.h 2011-04-17 15:56:46.000000000 -0400
65774@@ -145,7 +145,7 @@ static void rcu_preempt_note_context_swi
65775 */
65776 void __rcu_read_lock(void)
65777 {
65778- ACCESS_ONCE(current->rcu_read_lock_nesting)++;
65779+ ACCESS_ONCE_RW(current->rcu_read_lock_nesting)++;
65780 barrier(); /* needed if we ever invoke rcu_read_lock in rcutree.c */
65781 }
65782 EXPORT_SYMBOL_GPL(__rcu_read_lock);
65783@@ -251,7 +251,7 @@ void __rcu_read_unlock(void)
65784 struct task_struct *t = current;
65785
65786 barrier(); /* needed if we ever invoke rcu_read_unlock in rcutree.c */
65787- if (--ACCESS_ONCE(t->rcu_read_lock_nesting) == 0 &&
65788+ if (--ACCESS_ONCE_RW(t->rcu_read_lock_nesting) == 0 &&
65789 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
65790 rcu_read_unlock_special(t);
65791 }
65792diff -urNp linux-2.6.32.43/kernel/relay.c linux-2.6.32.43/kernel/relay.c
65793--- linux-2.6.32.43/kernel/relay.c 2011-03-27 14:31:47.000000000 -0400
65794+++ linux-2.6.32.43/kernel/relay.c 2011-05-16 21:46:57.000000000 -0400
65795@@ -1222,7 +1222,7 @@ static int subbuf_splice_actor(struct fi
65796 unsigned int flags,
65797 int *nonpad_ret)
65798 {
65799- unsigned int pidx, poff, total_len, subbuf_pages, nr_pages, ret;
65800+ unsigned int pidx, poff, total_len, subbuf_pages, nr_pages;
65801 struct rchan_buf *rbuf = in->private_data;
65802 unsigned int subbuf_size = rbuf->chan->subbuf_size;
65803 uint64_t pos = (uint64_t) *ppos;
65804@@ -1241,6 +1241,9 @@ static int subbuf_splice_actor(struct fi
65805 .ops = &relay_pipe_buf_ops,
65806 .spd_release = relay_page_release,
65807 };
65808+ ssize_t ret;
65809+
65810+ pax_track_stack();
65811
65812 if (rbuf->subbufs_produced == rbuf->subbufs_consumed)
65813 return 0;
65814diff -urNp linux-2.6.32.43/kernel/resource.c linux-2.6.32.43/kernel/resource.c
65815--- linux-2.6.32.43/kernel/resource.c 2011-03-27 14:31:47.000000000 -0400
65816+++ linux-2.6.32.43/kernel/resource.c 2011-04-17 15:56:46.000000000 -0400
65817@@ -132,8 +132,18 @@ static const struct file_operations proc
65818
65819 static int __init ioresources_init(void)
65820 {
65821+#ifdef CONFIG_GRKERNSEC_PROC_ADD
65822+#ifdef CONFIG_GRKERNSEC_PROC_USER
65823+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
65824+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
65825+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
65826+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
65827+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
65828+#endif
65829+#else
65830 proc_create("ioports", 0, NULL, &proc_ioports_operations);
65831 proc_create("iomem", 0, NULL, &proc_iomem_operations);
65832+#endif
65833 return 0;
65834 }
65835 __initcall(ioresources_init);
65836diff -urNp linux-2.6.32.43/kernel/rtmutex.c linux-2.6.32.43/kernel/rtmutex.c
65837--- linux-2.6.32.43/kernel/rtmutex.c 2011-03-27 14:31:47.000000000 -0400
65838+++ linux-2.6.32.43/kernel/rtmutex.c 2011-04-17 15:56:46.000000000 -0400
65839@@ -511,7 +511,7 @@ static void wakeup_next_waiter(struct rt
65840 */
65841 spin_lock_irqsave(&pendowner->pi_lock, flags);
65842
65843- WARN_ON(!pendowner->pi_blocked_on);
65844+ BUG_ON(!pendowner->pi_blocked_on);
65845 WARN_ON(pendowner->pi_blocked_on != waiter);
65846 WARN_ON(pendowner->pi_blocked_on->lock != lock);
65847
65848diff -urNp linux-2.6.32.43/kernel/rtmutex-tester.c linux-2.6.32.43/kernel/rtmutex-tester.c
65849--- linux-2.6.32.43/kernel/rtmutex-tester.c 2011-03-27 14:31:47.000000000 -0400
65850+++ linux-2.6.32.43/kernel/rtmutex-tester.c 2011-05-04 17:56:28.000000000 -0400
65851@@ -21,7 +21,7 @@
65852 #define MAX_RT_TEST_MUTEXES 8
65853
65854 static spinlock_t rttest_lock;
65855-static atomic_t rttest_event;
65856+static atomic_unchecked_t rttest_event;
65857
65858 struct test_thread_data {
65859 int opcode;
65860@@ -64,7 +64,7 @@ static int handle_op(struct test_thread_
65861
65862 case RTTEST_LOCKCONT:
65863 td->mutexes[td->opdata] = 1;
65864- td->event = atomic_add_return(1, &rttest_event);
65865+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65866 return 0;
65867
65868 case RTTEST_RESET:
65869@@ -82,7 +82,7 @@ static int handle_op(struct test_thread_
65870 return 0;
65871
65872 case RTTEST_RESETEVENT:
65873- atomic_set(&rttest_event, 0);
65874+ atomic_set_unchecked(&rttest_event, 0);
65875 return 0;
65876
65877 default:
65878@@ -99,9 +99,9 @@ static int handle_op(struct test_thread_
65879 return ret;
65880
65881 td->mutexes[id] = 1;
65882- td->event = atomic_add_return(1, &rttest_event);
65883+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65884 rt_mutex_lock(&mutexes[id]);
65885- td->event = atomic_add_return(1, &rttest_event);
65886+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65887 td->mutexes[id] = 4;
65888 return 0;
65889
65890@@ -112,9 +112,9 @@ static int handle_op(struct test_thread_
65891 return ret;
65892
65893 td->mutexes[id] = 1;
65894- td->event = atomic_add_return(1, &rttest_event);
65895+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65896 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
65897- td->event = atomic_add_return(1, &rttest_event);
65898+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65899 td->mutexes[id] = ret ? 0 : 4;
65900 return ret ? -EINTR : 0;
65901
65902@@ -123,9 +123,9 @@ static int handle_op(struct test_thread_
65903 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
65904 return ret;
65905
65906- td->event = atomic_add_return(1, &rttest_event);
65907+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65908 rt_mutex_unlock(&mutexes[id]);
65909- td->event = atomic_add_return(1, &rttest_event);
65910+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65911 td->mutexes[id] = 0;
65912 return 0;
65913
65914@@ -187,7 +187,7 @@ void schedule_rt_mutex_test(struct rt_mu
65915 break;
65916
65917 td->mutexes[dat] = 2;
65918- td->event = atomic_add_return(1, &rttest_event);
65919+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65920 break;
65921
65922 case RTTEST_LOCKBKL:
65923@@ -208,7 +208,7 @@ void schedule_rt_mutex_test(struct rt_mu
65924 return;
65925
65926 td->mutexes[dat] = 3;
65927- td->event = atomic_add_return(1, &rttest_event);
65928+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65929 break;
65930
65931 case RTTEST_LOCKNOWAIT:
65932@@ -220,7 +220,7 @@ void schedule_rt_mutex_test(struct rt_mu
65933 return;
65934
65935 td->mutexes[dat] = 1;
65936- td->event = atomic_add_return(1, &rttest_event);
65937+ td->event = atomic_add_return_unchecked(1, &rttest_event);
65938 return;
65939
65940 case RTTEST_LOCKBKL:
65941diff -urNp linux-2.6.32.43/kernel/sched.c linux-2.6.32.43/kernel/sched.c
65942--- linux-2.6.32.43/kernel/sched.c 2011-03-27 14:31:47.000000000 -0400
65943+++ linux-2.6.32.43/kernel/sched.c 2011-05-22 23:02:06.000000000 -0400
65944@@ -5043,7 +5043,7 @@ out:
65945 * In CONFIG_NO_HZ case, the idle load balance owner will do the
65946 * rebalancing for all the cpus for whom scheduler ticks are stopped.
65947 */
65948-static void run_rebalance_domains(struct softirq_action *h)
65949+static void run_rebalance_domains(void)
65950 {
65951 int this_cpu = smp_processor_id();
65952 struct rq *this_rq = cpu_rq(this_cpu);
65953@@ -5700,6 +5700,8 @@ asmlinkage void __sched schedule(void)
65954 struct rq *rq;
65955 int cpu;
65956
65957+ pax_track_stack();
65958+
65959 need_resched:
65960 preempt_disable();
65961 cpu = smp_processor_id();
65962@@ -5770,7 +5772,7 @@ EXPORT_SYMBOL(schedule);
65963 * Look out! "owner" is an entirely speculative pointer
65964 * access and not reliable.
65965 */
65966-int mutex_spin_on_owner(struct mutex *lock, struct thread_info *owner)
65967+int mutex_spin_on_owner(struct mutex *lock, struct task_struct *owner)
65968 {
65969 unsigned int cpu;
65970 struct rq *rq;
65971@@ -5784,10 +5786,10 @@ int mutex_spin_on_owner(struct mutex *lo
65972 * DEBUG_PAGEALLOC could have unmapped it if
65973 * the mutex owner just released it and exited.
65974 */
65975- if (probe_kernel_address(&owner->cpu, cpu))
65976+ if (probe_kernel_address(&task_thread_info(owner)->cpu, cpu))
65977 return 0;
65978 #else
65979- cpu = owner->cpu;
65980+ cpu = task_thread_info(owner)->cpu;
65981 #endif
65982
65983 /*
65984@@ -5816,7 +5818,7 @@ int mutex_spin_on_owner(struct mutex *lo
65985 /*
65986 * Is that owner really running on that cpu?
65987 */
65988- if (task_thread_info(rq->curr) != owner || need_resched())
65989+ if (rq->curr != owner || need_resched())
65990 return 0;
65991
65992 cpu_relax();
65993@@ -6359,6 +6361,8 @@ int can_nice(const struct task_struct *p
65994 /* convert nice value [19,-20] to rlimit style value [1,40] */
65995 int nice_rlim = 20 - nice;
65996
65997+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
65998+
65999 return (nice_rlim <= p->signal->rlim[RLIMIT_NICE].rlim_cur ||
66000 capable(CAP_SYS_NICE));
66001 }
66002@@ -6392,7 +6396,8 @@ SYSCALL_DEFINE1(nice, int, increment)
66003 if (nice > 19)
66004 nice = 19;
66005
66006- if (increment < 0 && !can_nice(current, nice))
66007+ if (increment < 0 && (!can_nice(current, nice) ||
66008+ gr_handle_chroot_nice()))
66009 return -EPERM;
66010
66011 retval = security_task_setnice(current, nice);
66012@@ -8774,7 +8779,7 @@ static void init_sched_groups_power(int
66013 long power;
66014 int weight;
66015
66016- WARN_ON(!sd || !sd->groups);
66017+ BUG_ON(!sd || !sd->groups);
66018
66019 if (cpu != group_first_cpu(sd->groups))
66020 return;
66021diff -urNp linux-2.6.32.43/kernel/signal.c linux-2.6.32.43/kernel/signal.c
66022--- linux-2.6.32.43/kernel/signal.c 2011-04-17 17:00:52.000000000 -0400
66023+++ linux-2.6.32.43/kernel/signal.c 2011-07-14 20:33:33.000000000 -0400
66024@@ -41,12 +41,12 @@
66025
66026 static struct kmem_cache *sigqueue_cachep;
66027
66028-static void __user *sig_handler(struct task_struct *t, int sig)
66029+static __sighandler_t sig_handler(struct task_struct *t, int sig)
66030 {
66031 return t->sighand->action[sig - 1].sa.sa_handler;
66032 }
66033
66034-static int sig_handler_ignored(void __user *handler, int sig)
66035+static int sig_handler_ignored(__sighandler_t handler, int sig)
66036 {
66037 /* Is it explicitly or implicitly ignored? */
66038 return handler == SIG_IGN ||
66039@@ -56,7 +56,7 @@ static int sig_handler_ignored(void __us
66040 static int sig_task_ignored(struct task_struct *t, int sig,
66041 int from_ancestor_ns)
66042 {
66043- void __user *handler;
66044+ __sighandler_t handler;
66045
66046 handler = sig_handler(t, sig);
66047
66048@@ -207,6 +207,9 @@ static struct sigqueue *__sigqueue_alloc
66049 */
66050 user = get_uid(__task_cred(t)->user);
66051 atomic_inc(&user->sigpending);
66052+
66053+ if (!override_rlimit)
66054+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
66055 if (override_rlimit ||
66056 atomic_read(&user->sigpending) <=
66057 t->signal->rlim[RLIMIT_SIGPENDING].rlim_cur)
66058@@ -327,7 +330,7 @@ flush_signal_handlers(struct task_struct
66059
66060 int unhandled_signal(struct task_struct *tsk, int sig)
66061 {
66062- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
66063+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
66064 if (is_global_init(tsk))
66065 return 1;
66066 if (handler != SIG_IGN && handler != SIG_DFL)
66067@@ -627,6 +630,12 @@ static int check_kill_permission(int sig
66068 }
66069 }
66070
66071+ /* allow glibc communication via tgkill to other threads in our
66072+ thread group */
66073+ if ((info->si_code != SI_TKILL || sig != (SIGRTMIN+1) ||
66074+ task_tgid_vnr(t) != info->si_pid) && gr_handle_signal(t, sig))
66075+ return -EPERM;
66076+
66077 return security_task_kill(t, info, sig, 0);
66078 }
66079
66080@@ -968,7 +977,7 @@ __group_send_sig_info(int sig, struct si
66081 return send_signal(sig, info, p, 1);
66082 }
66083
66084-static int
66085+int
66086 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
66087 {
66088 return send_signal(sig, info, t, 0);
66089@@ -1005,6 +1014,7 @@ force_sig_info(int sig, struct siginfo *
66090 unsigned long int flags;
66091 int ret, blocked, ignored;
66092 struct k_sigaction *action;
66093+ int is_unhandled = 0;
66094
66095 spin_lock_irqsave(&t->sighand->siglock, flags);
66096 action = &t->sighand->action[sig-1];
66097@@ -1019,9 +1029,18 @@ force_sig_info(int sig, struct siginfo *
66098 }
66099 if (action->sa.sa_handler == SIG_DFL)
66100 t->signal->flags &= ~SIGNAL_UNKILLABLE;
66101+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
66102+ is_unhandled = 1;
66103 ret = specific_send_sig_info(sig, info, t);
66104 spin_unlock_irqrestore(&t->sighand->siglock, flags);
66105
66106+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
66107+ normal operation */
66108+ if (is_unhandled) {
66109+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
66110+ gr_handle_crash(t, sig);
66111+ }
66112+
66113 return ret;
66114 }
66115
66116@@ -1081,8 +1100,11 @@ int group_send_sig_info(int sig, struct
66117 {
66118 int ret = check_kill_permission(sig, info, p);
66119
66120- if (!ret && sig)
66121+ if (!ret && sig) {
66122 ret = do_send_sig_info(sig, info, p, true);
66123+ if (!ret)
66124+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
66125+ }
66126
66127 return ret;
66128 }
66129@@ -1644,6 +1666,8 @@ void ptrace_notify(int exit_code)
66130 {
66131 siginfo_t info;
66132
66133+ pax_track_stack();
66134+
66135 BUG_ON((exit_code & (0x7f | ~0xffff)) != SIGTRAP);
66136
66137 memset(&info, 0, sizeof info);
66138@@ -2275,7 +2299,15 @@ do_send_specific(pid_t tgid, pid_t pid,
66139 int error = -ESRCH;
66140
66141 rcu_read_lock();
66142- p = find_task_by_vpid(pid);
66143+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66144+ /* allow glibc communication via tgkill to other threads in our
66145+ thread group */
66146+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
66147+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
66148+ p = find_task_by_vpid_unrestricted(pid);
66149+ else
66150+#endif
66151+ p = find_task_by_vpid(pid);
66152 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
66153 error = check_kill_permission(sig, info, p);
66154 /*
66155diff -urNp linux-2.6.32.43/kernel/smp.c linux-2.6.32.43/kernel/smp.c
66156--- linux-2.6.32.43/kernel/smp.c 2011-03-27 14:31:47.000000000 -0400
66157+++ linux-2.6.32.43/kernel/smp.c 2011-04-17 15:56:46.000000000 -0400
66158@@ -522,22 +522,22 @@ int smp_call_function(void (*func)(void
66159 }
66160 EXPORT_SYMBOL(smp_call_function);
66161
66162-void ipi_call_lock(void)
66163+void ipi_call_lock(void) __acquires(call_function.lock)
66164 {
66165 spin_lock(&call_function.lock);
66166 }
66167
66168-void ipi_call_unlock(void)
66169+void ipi_call_unlock(void) __releases(call_function.lock)
66170 {
66171 spin_unlock(&call_function.lock);
66172 }
66173
66174-void ipi_call_lock_irq(void)
66175+void ipi_call_lock_irq(void) __acquires(call_function.lock)
66176 {
66177 spin_lock_irq(&call_function.lock);
66178 }
66179
66180-void ipi_call_unlock_irq(void)
66181+void ipi_call_unlock_irq(void) __releases(call_function.lock)
66182 {
66183 spin_unlock_irq(&call_function.lock);
66184 }
66185diff -urNp linux-2.6.32.43/kernel/softirq.c linux-2.6.32.43/kernel/softirq.c
66186--- linux-2.6.32.43/kernel/softirq.c 2011-03-27 14:31:47.000000000 -0400
66187+++ linux-2.6.32.43/kernel/softirq.c 2011-08-05 20:33:55.000000000 -0400
66188@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec
66189
66190 static DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
66191
66192-char *softirq_to_name[NR_SOFTIRQS] = {
66193+const char * const softirq_to_name[NR_SOFTIRQS] = {
66194 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
66195 "TASKLET", "SCHED", "HRTIMER", "RCU"
66196 };
66197@@ -206,7 +206,7 @@ EXPORT_SYMBOL(local_bh_enable_ip);
66198
66199 asmlinkage void __do_softirq(void)
66200 {
66201- struct softirq_action *h;
66202+ const struct softirq_action *h;
66203 __u32 pending;
66204 int max_restart = MAX_SOFTIRQ_RESTART;
66205 int cpu;
66206@@ -233,7 +233,7 @@ restart:
66207 kstat_incr_softirqs_this_cpu(h - softirq_vec);
66208
66209 trace_softirq_entry(h, softirq_vec);
66210- h->action(h);
66211+ h->action();
66212 trace_softirq_exit(h, softirq_vec);
66213 if (unlikely(prev_count != preempt_count())) {
66214 printk(KERN_ERR "huh, entered softirq %td %s %p"
66215@@ -363,9 +363,11 @@ void raise_softirq(unsigned int nr)
66216 local_irq_restore(flags);
66217 }
66218
66219-void open_softirq(int nr, void (*action)(struct softirq_action *))
66220+void open_softirq(int nr, void (*action)(void))
66221 {
66222- softirq_vec[nr].action = action;
66223+ pax_open_kernel();
66224+ *(void **)&softirq_vec[nr].action = action;
66225+ pax_close_kernel();
66226 }
66227
66228 /*
66229@@ -419,7 +421,7 @@ void __tasklet_hi_schedule_first(struct
66230
66231 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
66232
66233-static void tasklet_action(struct softirq_action *a)
66234+static void tasklet_action(void)
66235 {
66236 struct tasklet_struct *list;
66237
66238@@ -454,7 +456,7 @@ static void tasklet_action(struct softir
66239 }
66240 }
66241
66242-static void tasklet_hi_action(struct softirq_action *a)
66243+static void tasklet_hi_action(void)
66244 {
66245 struct tasklet_struct *list;
66246
66247diff -urNp linux-2.6.32.43/kernel/sys.c linux-2.6.32.43/kernel/sys.c
66248--- linux-2.6.32.43/kernel/sys.c 2011-03-27 14:31:47.000000000 -0400
66249+++ linux-2.6.32.43/kernel/sys.c 2011-04-17 15:56:46.000000000 -0400
66250@@ -133,6 +133,12 @@ static int set_one_prio(struct task_stru
66251 error = -EACCES;
66252 goto out;
66253 }
66254+
66255+ if (gr_handle_chroot_setpriority(p, niceval)) {
66256+ error = -EACCES;
66257+ goto out;
66258+ }
66259+
66260 no_nice = security_task_setnice(p, niceval);
66261 if (no_nice) {
66262 error = no_nice;
66263@@ -190,10 +196,10 @@ SYSCALL_DEFINE3(setpriority, int, which,
66264 !(user = find_user(who)))
66265 goto out_unlock; /* No processes for this user */
66266
66267- do_each_thread(g, p)
66268+ do_each_thread(g, p) {
66269 if (__task_cred(p)->uid == who)
66270 error = set_one_prio(p, niceval, error);
66271- while_each_thread(g, p);
66272+ } while_each_thread(g, p);
66273 if (who != cred->uid)
66274 free_uid(user); /* For find_user() */
66275 break;
66276@@ -253,13 +259,13 @@ SYSCALL_DEFINE2(getpriority, int, which,
66277 !(user = find_user(who)))
66278 goto out_unlock; /* No processes for this user */
66279
66280- do_each_thread(g, p)
66281+ do_each_thread(g, p) {
66282 if (__task_cred(p)->uid == who) {
66283 niceval = 20 - task_nice(p);
66284 if (niceval > retval)
66285 retval = niceval;
66286 }
66287- while_each_thread(g, p);
66288+ } while_each_thread(g, p);
66289 if (who != cred->uid)
66290 free_uid(user); /* for find_user() */
66291 break;
66292@@ -509,6 +515,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g
66293 goto error;
66294 }
66295
66296+ if (gr_check_group_change(new->gid, new->egid, -1))
66297+ goto error;
66298+
66299 if (rgid != (gid_t) -1 ||
66300 (egid != (gid_t) -1 && egid != old->gid))
66301 new->sgid = new->egid;
66302@@ -542,6 +551,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
66303 goto error;
66304
66305 retval = -EPERM;
66306+
66307+ if (gr_check_group_change(gid, gid, gid))
66308+ goto error;
66309+
66310 if (capable(CAP_SETGID))
66311 new->gid = new->egid = new->sgid = new->fsgid = gid;
66312 else if (gid == old->gid || gid == old->sgid)
66313@@ -627,6 +640,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u
66314 goto error;
66315 }
66316
66317+ if (gr_check_user_change(new->uid, new->euid, -1))
66318+ goto error;
66319+
66320 if (new->uid != old->uid) {
66321 retval = set_user(new);
66322 if (retval < 0)
66323@@ -675,6 +691,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
66324 goto error;
66325
66326 retval = -EPERM;
66327+
66328+ if (gr_check_crash_uid(uid))
66329+ goto error;
66330+ if (gr_check_user_change(uid, uid, uid))
66331+ goto error;
66332+
66333 if (capable(CAP_SETUID)) {
66334 new->suid = new->uid = uid;
66335 if (uid != old->uid) {
66336@@ -732,6 +754,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid,
66337 goto error;
66338 }
66339
66340+ if (gr_check_user_change(ruid, euid, -1))
66341+ goto error;
66342+
66343 if (ruid != (uid_t) -1) {
66344 new->uid = ruid;
66345 if (ruid != old->uid) {
66346@@ -800,6 +825,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid,
66347 goto error;
66348 }
66349
66350+ if (gr_check_group_change(rgid, egid, -1))
66351+ goto error;
66352+
66353 if (rgid != (gid_t) -1)
66354 new->gid = rgid;
66355 if (egid != (gid_t) -1)
66356@@ -849,6 +877,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
66357 if (security_task_setuid(uid, (uid_t)-1, (uid_t)-1, LSM_SETID_FS) < 0)
66358 goto error;
66359
66360+ if (gr_check_user_change(-1, -1, uid))
66361+ goto error;
66362+
66363 if (uid == old->uid || uid == old->euid ||
66364 uid == old->suid || uid == old->fsuid ||
66365 capable(CAP_SETUID)) {
66366@@ -889,6 +920,9 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
66367 if (gid == old->gid || gid == old->egid ||
66368 gid == old->sgid || gid == old->fsgid ||
66369 capable(CAP_SETGID)) {
66370+ if (gr_check_group_change(-1, -1, gid))
66371+ goto error;
66372+
66373 if (gid != old_fsgid) {
66374 new->fsgid = gid;
66375 goto change_okay;
66376@@ -1454,7 +1488,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi
66377 error = get_dumpable(me->mm);
66378 break;
66379 case PR_SET_DUMPABLE:
66380- if (arg2 < 0 || arg2 > 1) {
66381+ if (arg2 > 1) {
66382 error = -EINVAL;
66383 break;
66384 }
66385diff -urNp linux-2.6.32.43/kernel/sysctl.c linux-2.6.32.43/kernel/sysctl.c
66386--- linux-2.6.32.43/kernel/sysctl.c 2011-03-27 14:31:47.000000000 -0400
66387+++ linux-2.6.32.43/kernel/sysctl.c 2011-04-17 15:56:46.000000000 -0400
66388@@ -63,6 +63,13 @@
66389 static int deprecated_sysctl_warning(struct __sysctl_args *args);
66390
66391 #if defined(CONFIG_SYSCTL)
66392+#include <linux/grsecurity.h>
66393+#include <linux/grinternal.h>
66394+
66395+extern __u32 gr_handle_sysctl(const ctl_table *table, const int op);
66396+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
66397+ const int op);
66398+extern int gr_handle_chroot_sysctl(const int op);
66399
66400 /* External variables not in a header file. */
66401 extern int C_A_D;
66402@@ -168,6 +175,7 @@ static int proc_do_cad_pid(struct ctl_ta
66403 static int proc_taint(struct ctl_table *table, int write,
66404 void __user *buffer, size_t *lenp, loff_t *ppos);
66405 #endif
66406+extern ctl_table grsecurity_table[];
66407
66408 static struct ctl_table root_table[];
66409 static struct ctl_table_root sysctl_table_root;
66410@@ -200,6 +208,21 @@ extern struct ctl_table epoll_table[];
66411 int sysctl_legacy_va_layout;
66412 #endif
66413
66414+#ifdef CONFIG_PAX_SOFTMODE
66415+static ctl_table pax_table[] = {
66416+ {
66417+ .ctl_name = CTL_UNNUMBERED,
66418+ .procname = "softmode",
66419+ .data = &pax_softmode,
66420+ .maxlen = sizeof(unsigned int),
66421+ .mode = 0600,
66422+ .proc_handler = &proc_dointvec,
66423+ },
66424+
66425+ { .ctl_name = 0 }
66426+};
66427+#endif
66428+
66429 extern int prove_locking;
66430 extern int lock_stat;
66431
66432@@ -251,6 +274,24 @@ static int max_wakeup_granularity_ns = N
66433 #endif
66434
66435 static struct ctl_table kern_table[] = {
66436+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66437+ {
66438+ .ctl_name = CTL_UNNUMBERED,
66439+ .procname = "grsecurity",
66440+ .mode = 0500,
66441+ .child = grsecurity_table,
66442+ },
66443+#endif
66444+
66445+#ifdef CONFIG_PAX_SOFTMODE
66446+ {
66447+ .ctl_name = CTL_UNNUMBERED,
66448+ .procname = "pax",
66449+ .mode = 0500,
66450+ .child = pax_table,
66451+ },
66452+#endif
66453+
66454 {
66455 .ctl_name = CTL_UNNUMBERED,
66456 .procname = "sched_child_runs_first",
66457@@ -567,8 +608,8 @@ static struct ctl_table kern_table[] = {
66458 .data = &modprobe_path,
66459 .maxlen = KMOD_PATH_LEN,
66460 .mode = 0644,
66461- .proc_handler = &proc_dostring,
66462- .strategy = &sysctl_string,
66463+ .proc_handler = &proc_dostring_modpriv,
66464+ .strategy = &sysctl_string_modpriv,
66465 },
66466 {
66467 .ctl_name = CTL_UNNUMBERED,
66468@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = {
66469 .mode = 0644,
66470 .proc_handler = &proc_dointvec
66471 },
66472+ {
66473+ .procname = "heap_stack_gap",
66474+ .data = &sysctl_heap_stack_gap,
66475+ .maxlen = sizeof(sysctl_heap_stack_gap),
66476+ .mode = 0644,
66477+ .proc_handler = proc_doulongvec_minmax,
66478+ },
66479 #else
66480 {
66481 .ctl_name = CTL_UNNUMBERED,
66482@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl
66483 return 0;
66484 }
66485
66486+static int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op);
66487+
66488 static int parse_table(int __user *name, int nlen,
66489 void __user *oldval, size_t __user *oldlenp,
66490 void __user *newval, size_t newlen,
66491@@ -1821,7 +1871,7 @@ repeat:
66492 if (n == table->ctl_name) {
66493 int error;
66494 if (table->child) {
66495- if (sysctl_perm(root, table, MAY_EXEC))
66496+ if (sysctl_perm_nochk(root, table, MAY_EXEC))
66497 return -EPERM;
66498 name++;
66499 nlen--;
66500@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r
66501 int error;
66502 int mode;
66503
66504+ if (table->parent != NULL && table->parent->procname != NULL &&
66505+ table->procname != NULL &&
66506+ gr_handle_sysctl_mod(table->parent->procname, table->procname, op))
66507+ return -EACCES;
66508+ if (gr_handle_chroot_sysctl(op))
66509+ return -EACCES;
66510+ error = gr_handle_sysctl(table, op);
66511+ if (error)
66512+ return error;
66513+
66514+ error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66515+ if (error)
66516+ return error;
66517+
66518+ if (root->permissions)
66519+ mode = root->permissions(root, current->nsproxy, table);
66520+ else
66521+ mode = table->mode;
66522+
66523+ return test_perm(mode, op);
66524+}
66525+
66526+int sysctl_perm_nochk(struct ctl_table_root *root, struct ctl_table *table, int op)
66527+{
66528+ int error;
66529+ int mode;
66530+
66531 error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC));
66532 if (error)
66533 return error;
66534@@ -2335,6 +2412,16 @@ int proc_dostring(struct ctl_table *tabl
66535 buffer, lenp, ppos);
66536 }
66537
66538+int proc_dostring_modpriv(struct ctl_table *table, int write,
66539+ void __user *buffer, size_t *lenp, loff_t *ppos)
66540+{
66541+ if (write && !capable(CAP_SYS_MODULE))
66542+ return -EPERM;
66543+
66544+ return _proc_do_string(table->data, table->maxlen, write,
66545+ buffer, lenp, ppos);
66546+}
66547+
66548
66549 static int do_proc_dointvec_conv(int *negp, unsigned long *lvalp,
66550 int *valp,
66551@@ -2609,7 +2696,7 @@ static int __do_proc_doulongvec_minmax(v
66552 vleft = table->maxlen / sizeof(unsigned long);
66553 left = *lenp;
66554
66555- for (; left && vleft--; i++, min++, max++, first=0) {
66556+ for (; left && vleft--; i++, first=0) {
66557 if (write) {
66558 while (left) {
66559 char c;
66560@@ -2910,6 +2997,12 @@ int proc_dostring(struct ctl_table *tabl
66561 return -ENOSYS;
66562 }
66563
66564+int proc_dostring_modpriv(struct ctl_table *table, int write,
66565+ void __user *buffer, size_t *lenp, loff_t *ppos)
66566+{
66567+ return -ENOSYS;
66568+}
66569+
66570 int proc_dointvec(struct ctl_table *table, int write,
66571 void __user *buffer, size_t *lenp, loff_t *ppos)
66572 {
66573@@ -3038,6 +3131,16 @@ int sysctl_string(struct ctl_table *tabl
66574 return 1;
66575 }
66576
66577+int sysctl_string_modpriv(struct ctl_table *table,
66578+ void __user *oldval, size_t __user *oldlenp,
66579+ void __user *newval, size_t newlen)
66580+{
66581+ if (newval && newlen && !capable(CAP_SYS_MODULE))
66582+ return -EPERM;
66583+
66584+ return sysctl_string(table, oldval, oldlenp, newval, newlen);
66585+}
66586+
66587 /*
66588 * This function makes sure that all of the integers in the vector
66589 * are between the minimum and maximum values given in the arrays
66590@@ -3182,6 +3285,13 @@ int sysctl_string(struct ctl_table *tabl
66591 return -ENOSYS;
66592 }
66593
66594+int sysctl_string_modpriv(struct ctl_table *table,
66595+ void __user *oldval, size_t __user *oldlenp,
66596+ void __user *newval, size_t newlen)
66597+{
66598+ return -ENOSYS;
66599+}
66600+
66601 int sysctl_intvec(struct ctl_table *table,
66602 void __user *oldval, size_t __user *oldlenp,
66603 void __user *newval, size_t newlen)
66604@@ -3246,6 +3356,7 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
66605 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
66606 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
66607 EXPORT_SYMBOL(proc_dostring);
66608+EXPORT_SYMBOL(proc_dostring_modpriv);
66609 EXPORT_SYMBOL(proc_doulongvec_minmax);
66610 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
66611 EXPORT_SYMBOL(register_sysctl_table);
66612@@ -3254,5 +3365,6 @@ EXPORT_SYMBOL(sysctl_intvec);
66613 EXPORT_SYMBOL(sysctl_jiffies);
66614 EXPORT_SYMBOL(sysctl_ms_jiffies);
66615 EXPORT_SYMBOL(sysctl_string);
66616+EXPORT_SYMBOL(sysctl_string_modpriv);
66617 EXPORT_SYMBOL(sysctl_data);
66618 EXPORT_SYMBOL(unregister_sysctl_table);
66619diff -urNp linux-2.6.32.43/kernel/sysctl_check.c linux-2.6.32.43/kernel/sysctl_check.c
66620--- linux-2.6.32.43/kernel/sysctl_check.c 2011-03-27 14:31:47.000000000 -0400
66621+++ linux-2.6.32.43/kernel/sysctl_check.c 2011-04-17 15:56:46.000000000 -0400
66622@@ -1489,10 +1489,12 @@ int sysctl_check_table(struct nsproxy *n
66623 } else {
66624 if ((table->strategy == sysctl_data) ||
66625 (table->strategy == sysctl_string) ||
66626+ (table->strategy == sysctl_string_modpriv) ||
66627 (table->strategy == sysctl_intvec) ||
66628 (table->strategy == sysctl_jiffies) ||
66629 (table->strategy == sysctl_ms_jiffies) ||
66630 (table->proc_handler == proc_dostring) ||
66631+ (table->proc_handler == proc_dostring_modpriv) ||
66632 (table->proc_handler == proc_dointvec) ||
66633 (table->proc_handler == proc_dointvec_minmax) ||
66634 (table->proc_handler == proc_dointvec_jiffies) ||
66635diff -urNp linux-2.6.32.43/kernel/taskstats.c linux-2.6.32.43/kernel/taskstats.c
66636--- linux-2.6.32.43/kernel/taskstats.c 2011-07-13 17:23:04.000000000 -0400
66637+++ linux-2.6.32.43/kernel/taskstats.c 2011-07-13 17:23:19.000000000 -0400
66638@@ -26,9 +26,12 @@
66639 #include <linux/cgroup.h>
66640 #include <linux/fs.h>
66641 #include <linux/file.h>
66642+#include <linux/grsecurity.h>
66643 #include <net/genetlink.h>
66644 #include <asm/atomic.h>
66645
66646+extern int gr_is_taskstats_denied(int pid);
66647+
66648 /*
66649 * Maximum length of a cpumask that can be specified in
66650 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
66651@@ -442,6 +445,9 @@ static int taskstats_user_cmd(struct sk_
66652 size_t size;
66653 cpumask_var_t mask;
66654
66655+ if (gr_is_taskstats_denied(current->pid))
66656+ return -EACCES;
66657+
66658 if (!alloc_cpumask_var(&mask, GFP_KERNEL))
66659 return -ENOMEM;
66660
66661diff -urNp linux-2.6.32.43/kernel/time/tick-broadcast.c linux-2.6.32.43/kernel/time/tick-broadcast.c
66662--- linux-2.6.32.43/kernel/time/tick-broadcast.c 2011-05-23 16:56:59.000000000 -0400
66663+++ linux-2.6.32.43/kernel/time/tick-broadcast.c 2011-05-23 16:57:13.000000000 -0400
66664@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl
66665 * then clear the broadcast bit.
66666 */
66667 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
66668- int cpu = smp_processor_id();
66669+ cpu = smp_processor_id();
66670
66671 cpumask_clear_cpu(cpu, tick_get_broadcast_mask());
66672 tick_broadcast_clear_oneshot(cpu);
66673diff -urNp linux-2.6.32.43/kernel/time/timekeeping.c linux-2.6.32.43/kernel/time/timekeeping.c
66674--- linux-2.6.32.43/kernel/time/timekeeping.c 2011-06-25 12:55:35.000000000 -0400
66675+++ linux-2.6.32.43/kernel/time/timekeeping.c 2011-06-25 12:56:37.000000000 -0400
66676@@ -14,6 +14,7 @@
66677 #include <linux/init.h>
66678 #include <linux/mm.h>
66679 #include <linux/sched.h>
66680+#include <linux/grsecurity.h>
66681 #include <linux/sysdev.h>
66682 #include <linux/clocksource.h>
66683 #include <linux/jiffies.h>
66684@@ -180,7 +181,7 @@ void update_xtime_cache(u64 nsec)
66685 */
66686 struct timespec ts = xtime;
66687 timespec_add_ns(&ts, nsec);
66688- ACCESS_ONCE(xtime_cache) = ts;
66689+ ACCESS_ONCE_RW(xtime_cache) = ts;
66690 }
66691
66692 /* must hold xtime_lock */
66693@@ -333,6 +334,8 @@ int do_settimeofday(struct timespec *tv)
66694 if ((unsigned long)tv->tv_nsec >= NSEC_PER_SEC)
66695 return -EINVAL;
66696
66697+ gr_log_timechange();
66698+
66699 write_seqlock_irqsave(&xtime_lock, flags);
66700
66701 timekeeping_forward_now();
66702diff -urNp linux-2.6.32.43/kernel/time/timer_list.c linux-2.6.32.43/kernel/time/timer_list.c
66703--- linux-2.6.32.43/kernel/time/timer_list.c 2011-03-27 14:31:47.000000000 -0400
66704+++ linux-2.6.32.43/kernel/time/timer_list.c 2011-04-17 15:56:46.000000000 -0400
66705@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base,
66706
66707 static void print_name_offset(struct seq_file *m, void *sym)
66708 {
66709+#ifdef CONFIG_GRKERNSEC_HIDESYM
66710+ SEQ_printf(m, "<%p>", NULL);
66711+#else
66712 char symname[KSYM_NAME_LEN];
66713
66714 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
66715 SEQ_printf(m, "<%p>", sym);
66716 else
66717 SEQ_printf(m, "%s", symname);
66718+#endif
66719 }
66720
66721 static void
66722@@ -112,7 +116,11 @@ next_one:
66723 static void
66724 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
66725 {
66726+#ifdef CONFIG_GRKERNSEC_HIDESYM
66727+ SEQ_printf(m, " .base: %p\n", NULL);
66728+#else
66729 SEQ_printf(m, " .base: %p\n", base);
66730+#endif
66731 SEQ_printf(m, " .index: %d\n",
66732 base->index);
66733 SEQ_printf(m, " .resolution: %Lu nsecs\n",
66734@@ -289,7 +297,11 @@ static int __init init_timer_list_procfs
66735 {
66736 struct proc_dir_entry *pe;
66737
66738+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66739+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
66740+#else
66741 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
66742+#endif
66743 if (!pe)
66744 return -ENOMEM;
66745 return 0;
66746diff -urNp linux-2.6.32.43/kernel/time/timer_stats.c linux-2.6.32.43/kernel/time/timer_stats.c
66747--- linux-2.6.32.43/kernel/time/timer_stats.c 2011-03-27 14:31:47.000000000 -0400
66748+++ linux-2.6.32.43/kernel/time/timer_stats.c 2011-05-04 17:56:28.000000000 -0400
66749@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
66750 static unsigned long nr_entries;
66751 static struct entry entries[MAX_ENTRIES];
66752
66753-static atomic_t overflow_count;
66754+static atomic_unchecked_t overflow_count;
66755
66756 /*
66757 * The entries are in a hash-table, for fast lookup:
66758@@ -140,7 +140,7 @@ static void reset_entries(void)
66759 nr_entries = 0;
66760 memset(entries, 0, sizeof(entries));
66761 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
66762- atomic_set(&overflow_count, 0);
66763+ atomic_set_unchecked(&overflow_count, 0);
66764 }
66765
66766 static struct entry *alloc_entry(void)
66767@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *time
66768 if (likely(entry))
66769 entry->count++;
66770 else
66771- atomic_inc(&overflow_count);
66772+ atomic_inc_unchecked(&overflow_count);
66773
66774 out_unlock:
66775 spin_unlock_irqrestore(lock, flags);
66776@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time
66777
66778 static void print_name_offset(struct seq_file *m, unsigned long addr)
66779 {
66780+#ifdef CONFIG_GRKERNSEC_HIDESYM
66781+ seq_printf(m, "<%p>", NULL);
66782+#else
66783 char symname[KSYM_NAME_LEN];
66784
66785 if (lookup_symbol_name(addr, symname) < 0)
66786 seq_printf(m, "<%p>", (void *)addr);
66787 else
66788 seq_printf(m, "%s", symname);
66789+#endif
66790 }
66791
66792 static int tstats_show(struct seq_file *m, void *v)
66793@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *
66794
66795 seq_puts(m, "Timer Stats Version: v0.2\n");
66796 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
66797- if (atomic_read(&overflow_count))
66798+ if (atomic_read_unchecked(&overflow_count))
66799 seq_printf(m, "Overflow: %d entries\n",
66800- atomic_read(&overflow_count));
66801+ atomic_read_unchecked(&overflow_count));
66802
66803 for (i = 0; i < nr_entries; i++) {
66804 entry = entries + i;
66805@@ -415,7 +419,11 @@ static int __init init_tstats_procfs(voi
66806 {
66807 struct proc_dir_entry *pe;
66808
66809+#ifdef CONFIG_GRKERNSEC_PROC_ADD
66810+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
66811+#else
66812 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
66813+#endif
66814 if (!pe)
66815 return -ENOMEM;
66816 return 0;
66817diff -urNp linux-2.6.32.43/kernel/time.c linux-2.6.32.43/kernel/time.c
66818--- linux-2.6.32.43/kernel/time.c 2011-03-27 14:31:47.000000000 -0400
66819+++ linux-2.6.32.43/kernel/time.c 2011-04-17 15:56:46.000000000 -0400
66820@@ -165,6 +165,11 @@ int do_sys_settimeofday(struct timespec
66821 return error;
66822
66823 if (tz) {
66824+ /* we log in do_settimeofday called below, so don't log twice
66825+ */
66826+ if (!tv)
66827+ gr_log_timechange();
66828+
66829 /* SMP safe, global irq locking makes it work. */
66830 sys_tz = *tz;
66831 update_vsyscall_tz();
66832@@ -240,7 +245,7 @@ EXPORT_SYMBOL(current_fs_time);
66833 * Avoid unnecessary multiplications/divisions in the
66834 * two most common HZ cases:
66835 */
66836-unsigned int inline jiffies_to_msecs(const unsigned long j)
66837+inline unsigned int jiffies_to_msecs(const unsigned long j)
66838 {
66839 #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
66840 return (MSEC_PER_SEC / HZ) * j;
66841@@ -256,7 +261,7 @@ unsigned int inline jiffies_to_msecs(con
66842 }
66843 EXPORT_SYMBOL(jiffies_to_msecs);
66844
66845-unsigned int inline jiffies_to_usecs(const unsigned long j)
66846+inline unsigned int jiffies_to_usecs(const unsigned long j)
66847 {
66848 #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
66849 return (USEC_PER_SEC / HZ) * j;
66850diff -urNp linux-2.6.32.43/kernel/timer.c linux-2.6.32.43/kernel/timer.c
66851--- linux-2.6.32.43/kernel/timer.c 2011-03-27 14:31:47.000000000 -0400
66852+++ linux-2.6.32.43/kernel/timer.c 2011-04-17 15:56:46.000000000 -0400
66853@@ -1213,7 +1213,7 @@ void update_process_times(int user_tick)
66854 /*
66855 * This function runs timers and the timer-tq in bottom half context.
66856 */
66857-static void run_timer_softirq(struct softirq_action *h)
66858+static void run_timer_softirq(void)
66859 {
66860 struct tvec_base *base = __get_cpu_var(tvec_bases);
66861
66862diff -urNp linux-2.6.32.43/kernel/trace/blktrace.c linux-2.6.32.43/kernel/trace/blktrace.c
66863--- linux-2.6.32.43/kernel/trace/blktrace.c 2011-03-27 14:31:47.000000000 -0400
66864+++ linux-2.6.32.43/kernel/trace/blktrace.c 2011-05-04 17:56:28.000000000 -0400
66865@@ -313,7 +313,7 @@ static ssize_t blk_dropped_read(struct f
66866 struct blk_trace *bt = filp->private_data;
66867 char buf[16];
66868
66869- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
66870+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
66871
66872 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
66873 }
66874@@ -376,7 +376,7 @@ static int blk_subbuf_start_callback(str
66875 return 1;
66876
66877 bt = buf->chan->private_data;
66878- atomic_inc(&bt->dropped);
66879+ atomic_inc_unchecked(&bt->dropped);
66880 return 0;
66881 }
66882
66883@@ -477,7 +477,7 @@ int do_blk_trace_setup(struct request_qu
66884
66885 bt->dir = dir;
66886 bt->dev = dev;
66887- atomic_set(&bt->dropped, 0);
66888+ atomic_set_unchecked(&bt->dropped, 0);
66889
66890 ret = -EIO;
66891 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
66892diff -urNp linux-2.6.32.43/kernel/trace/ftrace.c linux-2.6.32.43/kernel/trace/ftrace.c
66893--- linux-2.6.32.43/kernel/trace/ftrace.c 2011-06-25 12:55:35.000000000 -0400
66894+++ linux-2.6.32.43/kernel/trace/ftrace.c 2011-06-25 12:56:37.000000000 -0400
66895@@ -1100,13 +1100,18 @@ ftrace_code_disable(struct module *mod,
66896
66897 ip = rec->ip;
66898
66899+ ret = ftrace_arch_code_modify_prepare();
66900+ FTRACE_WARN_ON(ret);
66901+ if (ret)
66902+ return 0;
66903+
66904 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
66905+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
66906 if (ret) {
66907 ftrace_bug(ret, ip);
66908 rec->flags |= FTRACE_FL_FAILED;
66909- return 0;
66910 }
66911- return 1;
66912+ return ret ? 0 : 1;
66913 }
66914
66915 /*
66916diff -urNp linux-2.6.32.43/kernel/trace/ring_buffer.c linux-2.6.32.43/kernel/trace/ring_buffer.c
66917--- linux-2.6.32.43/kernel/trace/ring_buffer.c 2011-03-27 14:31:47.000000000 -0400
66918+++ linux-2.6.32.43/kernel/trace/ring_buffer.c 2011-04-17 15:56:46.000000000 -0400
66919@@ -606,7 +606,7 @@ static struct list_head *rb_list_head(st
66920 * the reader page). But if the next page is a header page,
66921 * its flags will be non zero.
66922 */
66923-static int inline
66924+static inline int
66925 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer,
66926 struct buffer_page *page, struct list_head *list)
66927 {
66928diff -urNp linux-2.6.32.43/kernel/trace/trace.c linux-2.6.32.43/kernel/trace/trace.c
66929--- linux-2.6.32.43/kernel/trace/trace.c 2011-03-27 14:31:47.000000000 -0400
66930+++ linux-2.6.32.43/kernel/trace/trace.c 2011-05-16 21:46:57.000000000 -0400
66931@@ -3193,6 +3193,8 @@ static ssize_t tracing_splice_read_pipe(
66932 size_t rem;
66933 unsigned int i;
66934
66935+ pax_track_stack();
66936+
66937 /* copy the tracer to avoid using a global lock all around */
66938 mutex_lock(&trace_types_lock);
66939 if (unlikely(old_tracer != current_trace && current_trace)) {
66940@@ -3659,6 +3661,8 @@ tracing_buffers_splice_read(struct file
66941 int entries, size, i;
66942 size_t ret;
66943
66944+ pax_track_stack();
66945+
66946 if (*ppos & (PAGE_SIZE - 1)) {
66947 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
66948 return -EINVAL;
66949@@ -3816,10 +3820,9 @@ static const struct file_operations trac
66950 };
66951 #endif
66952
66953-static struct dentry *d_tracer;
66954-
66955 struct dentry *tracing_init_dentry(void)
66956 {
66957+ static struct dentry *d_tracer;
66958 static int once;
66959
66960 if (d_tracer)
66961@@ -3839,10 +3842,9 @@ struct dentry *tracing_init_dentry(void)
66962 return d_tracer;
66963 }
66964
66965-static struct dentry *d_percpu;
66966-
66967 struct dentry *tracing_dentry_percpu(void)
66968 {
66969+ static struct dentry *d_percpu;
66970 static int once;
66971 struct dentry *d_tracer;
66972
66973diff -urNp linux-2.6.32.43/kernel/trace/trace_events.c linux-2.6.32.43/kernel/trace/trace_events.c
66974--- linux-2.6.32.43/kernel/trace/trace_events.c 2011-03-27 14:31:47.000000000 -0400
66975+++ linux-2.6.32.43/kernel/trace/trace_events.c 2011-08-05 20:33:55.000000000 -0400
66976@@ -951,13 +951,10 @@ static LIST_HEAD(ftrace_module_file_list
66977 * Modules must own their file_operations to keep up with
66978 * reference counting.
66979 */
66980+
66981 struct ftrace_module_file_ops {
66982 struct list_head list;
66983 struct module *mod;
66984- struct file_operations id;
66985- struct file_operations enable;
66986- struct file_operations format;
66987- struct file_operations filter;
66988 };
66989
66990 static void remove_subsystem_dir(const char *name)
66991@@ -1004,17 +1001,12 @@ trace_create_file_ops(struct module *mod
66992
66993 file_ops->mod = mod;
66994
66995- file_ops->id = ftrace_event_id_fops;
66996- file_ops->id.owner = mod;
66997-
66998- file_ops->enable = ftrace_enable_fops;
66999- file_ops->enable.owner = mod;
67000-
67001- file_ops->filter = ftrace_event_filter_fops;
67002- file_ops->filter.owner = mod;
67003-
67004- file_ops->format = ftrace_event_format_fops;
67005- file_ops->format.owner = mod;
67006+ pax_open_kernel();
67007+ *(void **)&mod->trace_id.owner = mod;
67008+ *(void **)&mod->trace_enable.owner = mod;
67009+ *(void **)&mod->trace_filter.owner = mod;
67010+ *(void **)&mod->trace_format.owner = mod;
67011+ pax_close_kernel();
67012
67013 list_add(&file_ops->list, &ftrace_module_file_list);
67014
67015@@ -1063,8 +1055,8 @@ static void trace_module_add_events(stru
67016 call->mod = mod;
67017 list_add(&call->list, &ftrace_events);
67018 event_create_dir(call, d_events,
67019- &file_ops->id, &file_ops->enable,
67020- &file_ops->filter, &file_ops->format);
67021+ &mod->trace_id, &mod->trace_enable,
67022+ &mod->trace_filter, &mod->trace_format);
67023 }
67024 }
67025
67026diff -urNp linux-2.6.32.43/kernel/trace/trace_mmiotrace.c linux-2.6.32.43/kernel/trace/trace_mmiotrace.c
67027--- linux-2.6.32.43/kernel/trace/trace_mmiotrace.c 2011-03-27 14:31:47.000000000 -0400
67028+++ linux-2.6.32.43/kernel/trace/trace_mmiotrace.c 2011-05-04 17:56:28.000000000 -0400
67029@@ -23,7 +23,7 @@ struct header_iter {
67030 static struct trace_array *mmio_trace_array;
67031 static bool overrun_detected;
67032 static unsigned long prev_overruns;
67033-static atomic_t dropped_count;
67034+static atomic_unchecked_t dropped_count;
67035
67036 static void mmio_reset_data(struct trace_array *tr)
67037 {
67038@@ -126,7 +126,7 @@ static void mmio_close(struct trace_iter
67039
67040 static unsigned long count_overruns(struct trace_iterator *iter)
67041 {
67042- unsigned long cnt = atomic_xchg(&dropped_count, 0);
67043+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
67044 unsigned long over = ring_buffer_overruns(iter->tr->buffer);
67045
67046 if (over > prev_overruns)
67047@@ -316,7 +316,7 @@ static void __trace_mmiotrace_rw(struct
67048 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
67049 sizeof(*entry), 0, pc);
67050 if (!event) {
67051- atomic_inc(&dropped_count);
67052+ atomic_inc_unchecked(&dropped_count);
67053 return;
67054 }
67055 entry = ring_buffer_event_data(event);
67056@@ -346,7 +346,7 @@ static void __trace_mmiotrace_map(struct
67057 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
67058 sizeof(*entry), 0, pc);
67059 if (!event) {
67060- atomic_inc(&dropped_count);
67061+ atomic_inc_unchecked(&dropped_count);
67062 return;
67063 }
67064 entry = ring_buffer_event_data(event);
67065diff -urNp linux-2.6.32.43/kernel/trace/trace_output.c linux-2.6.32.43/kernel/trace/trace_output.c
67066--- linux-2.6.32.43/kernel/trace/trace_output.c 2011-03-27 14:31:47.000000000 -0400
67067+++ linux-2.6.32.43/kernel/trace/trace_output.c 2011-04-17 15:56:46.000000000 -0400
67068@@ -237,7 +237,7 @@ int trace_seq_path(struct trace_seq *s,
67069 return 0;
67070 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
67071 if (!IS_ERR(p)) {
67072- p = mangle_path(s->buffer + s->len, p, "\n");
67073+ p = mangle_path(s->buffer + s->len, p, "\n\\");
67074 if (p) {
67075 s->len = p - s->buffer;
67076 return 1;
67077diff -urNp linux-2.6.32.43/kernel/trace/trace_stack.c linux-2.6.32.43/kernel/trace/trace_stack.c
67078--- linux-2.6.32.43/kernel/trace/trace_stack.c 2011-03-27 14:31:47.000000000 -0400
67079+++ linux-2.6.32.43/kernel/trace/trace_stack.c 2011-04-17 15:56:46.000000000 -0400
67080@@ -50,7 +50,7 @@ static inline void check_stack(void)
67081 return;
67082
67083 /* we do not handle interrupt stacks yet */
67084- if (!object_is_on_stack(&this_size))
67085+ if (!object_starts_on_stack(&this_size))
67086 return;
67087
67088 local_irq_save(flags);
67089diff -urNp linux-2.6.32.43/kernel/trace/trace_workqueue.c linux-2.6.32.43/kernel/trace/trace_workqueue.c
67090--- linux-2.6.32.43/kernel/trace/trace_workqueue.c 2011-03-27 14:31:47.000000000 -0400
67091+++ linux-2.6.32.43/kernel/trace/trace_workqueue.c 2011-04-17 15:56:46.000000000 -0400
67092@@ -21,7 +21,7 @@ struct cpu_workqueue_stats {
67093 int cpu;
67094 pid_t pid;
67095 /* Can be inserted from interrupt or user context, need to be atomic */
67096- atomic_t inserted;
67097+ atomic_unchecked_t inserted;
67098 /*
67099 * Don't need to be atomic, works are serialized in a single workqueue thread
67100 * on a single CPU.
67101@@ -58,7 +58,7 @@ probe_workqueue_insertion(struct task_st
67102 spin_lock_irqsave(&workqueue_cpu_stat(cpu)->lock, flags);
67103 list_for_each_entry(node, &workqueue_cpu_stat(cpu)->list, list) {
67104 if (node->pid == wq_thread->pid) {
67105- atomic_inc(&node->inserted);
67106+ atomic_inc_unchecked(&node->inserted);
67107 goto found;
67108 }
67109 }
67110@@ -205,7 +205,7 @@ static int workqueue_stat_show(struct se
67111 tsk = get_pid_task(pid, PIDTYPE_PID);
67112 if (tsk) {
67113 seq_printf(s, "%3d %6d %6u %s\n", cws->cpu,
67114- atomic_read(&cws->inserted), cws->executed,
67115+ atomic_read_unchecked(&cws->inserted), cws->executed,
67116 tsk->comm);
67117 put_task_struct(tsk);
67118 }
67119diff -urNp linux-2.6.32.43/kernel/user.c linux-2.6.32.43/kernel/user.c
67120--- linux-2.6.32.43/kernel/user.c 2011-03-27 14:31:47.000000000 -0400
67121+++ linux-2.6.32.43/kernel/user.c 2011-04-17 15:56:46.000000000 -0400
67122@@ -159,6 +159,7 @@ struct user_struct *alloc_uid(struct use
67123 spin_lock_irq(&uidhash_lock);
67124 up = uid_hash_find(uid, hashent);
67125 if (up) {
67126+ put_user_ns(ns);
67127 key_put(new->uid_keyring);
67128 key_put(new->session_keyring);
67129 kmem_cache_free(uid_cachep, new);
67130diff -urNp linux-2.6.32.43/lib/bug.c linux-2.6.32.43/lib/bug.c
67131--- linux-2.6.32.43/lib/bug.c 2011-03-27 14:31:47.000000000 -0400
67132+++ linux-2.6.32.43/lib/bug.c 2011-04-17 15:56:46.000000000 -0400
67133@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l
67134 return BUG_TRAP_TYPE_NONE;
67135
67136 bug = find_bug(bugaddr);
67137+ if (!bug)
67138+ return BUG_TRAP_TYPE_NONE;
67139
67140 printk(KERN_EMERG "------------[ cut here ]------------\n");
67141
67142diff -urNp linux-2.6.32.43/lib/debugobjects.c linux-2.6.32.43/lib/debugobjects.c
67143--- linux-2.6.32.43/lib/debugobjects.c 2011-07-13 17:23:04.000000000 -0400
67144+++ linux-2.6.32.43/lib/debugobjects.c 2011-07-13 17:23:19.000000000 -0400
67145@@ -277,7 +277,7 @@ static void debug_object_is_on_stack(voi
67146 if (limit > 4)
67147 return;
67148
67149- is_on_stack = object_is_on_stack(addr);
67150+ is_on_stack = object_starts_on_stack(addr);
67151 if (is_on_stack == onstack)
67152 return;
67153
67154diff -urNp linux-2.6.32.43/lib/dma-debug.c linux-2.6.32.43/lib/dma-debug.c
67155--- linux-2.6.32.43/lib/dma-debug.c 2011-03-27 14:31:47.000000000 -0400
67156+++ linux-2.6.32.43/lib/dma-debug.c 2011-04-17 15:56:46.000000000 -0400
67157@@ -861,7 +861,7 @@ out:
67158
67159 static void check_for_stack(struct device *dev, void *addr)
67160 {
67161- if (object_is_on_stack(addr))
67162+ if (object_starts_on_stack(addr))
67163 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
67164 "stack [addr=%p]\n", addr);
67165 }
67166diff -urNp linux-2.6.32.43/lib/idr.c linux-2.6.32.43/lib/idr.c
67167--- linux-2.6.32.43/lib/idr.c 2011-03-27 14:31:47.000000000 -0400
67168+++ linux-2.6.32.43/lib/idr.c 2011-04-17 15:56:46.000000000 -0400
67169@@ -156,7 +156,7 @@ static int sub_alloc(struct idr *idp, in
67170 id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
67171
67172 /* if already at the top layer, we need to grow */
67173- if (id >= 1 << (idp->layers * IDR_BITS)) {
67174+ if (id >= (1 << (idp->layers * IDR_BITS))) {
67175 *starting_id = id;
67176 return IDR_NEED_TO_GROW;
67177 }
67178diff -urNp linux-2.6.32.43/lib/inflate.c linux-2.6.32.43/lib/inflate.c
67179--- linux-2.6.32.43/lib/inflate.c 2011-03-27 14:31:47.000000000 -0400
67180+++ linux-2.6.32.43/lib/inflate.c 2011-04-17 15:56:46.000000000 -0400
67181@@ -266,7 +266,7 @@ static void free(void *where)
67182 malloc_ptr = free_mem_ptr;
67183 }
67184 #else
67185-#define malloc(a) kmalloc(a, GFP_KERNEL)
67186+#define malloc(a) kmalloc((a), GFP_KERNEL)
67187 #define free(a) kfree(a)
67188 #endif
67189
67190diff -urNp linux-2.6.32.43/lib/Kconfig.debug linux-2.6.32.43/lib/Kconfig.debug
67191--- linux-2.6.32.43/lib/Kconfig.debug 2011-03-27 14:31:47.000000000 -0400
67192+++ linux-2.6.32.43/lib/Kconfig.debug 2011-04-17 15:56:46.000000000 -0400
67193@@ -905,7 +905,7 @@ config LATENCYTOP
67194 select STACKTRACE
67195 select SCHEDSTATS
67196 select SCHED_DEBUG
67197- depends on HAVE_LATENCYTOP_SUPPORT
67198+ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM
67199 help
67200 Enable this option if you want to use the LatencyTOP tool
67201 to find out which userspace is blocking on what kernel operations.
67202diff -urNp linux-2.6.32.43/lib/kobject.c linux-2.6.32.43/lib/kobject.c
67203--- linux-2.6.32.43/lib/kobject.c 2011-03-27 14:31:47.000000000 -0400
67204+++ linux-2.6.32.43/lib/kobject.c 2011-04-17 15:56:46.000000000 -0400
67205@@ -700,7 +700,7 @@ static ssize_t kobj_attr_store(struct ko
67206 return ret;
67207 }
67208
67209-struct sysfs_ops kobj_sysfs_ops = {
67210+const struct sysfs_ops kobj_sysfs_ops = {
67211 .show = kobj_attr_show,
67212 .store = kobj_attr_store,
67213 };
67214@@ -789,7 +789,7 @@ static struct kobj_type kset_ktype = {
67215 * If the kset was not able to be created, NULL will be returned.
67216 */
67217 static struct kset *kset_create(const char *name,
67218- struct kset_uevent_ops *uevent_ops,
67219+ const struct kset_uevent_ops *uevent_ops,
67220 struct kobject *parent_kobj)
67221 {
67222 struct kset *kset;
67223@@ -832,7 +832,7 @@ static struct kset *kset_create(const ch
67224 * If the kset was not able to be created, NULL will be returned.
67225 */
67226 struct kset *kset_create_and_add(const char *name,
67227- struct kset_uevent_ops *uevent_ops,
67228+ const struct kset_uevent_ops *uevent_ops,
67229 struct kobject *parent_kobj)
67230 {
67231 struct kset *kset;
67232diff -urNp linux-2.6.32.43/lib/kobject_uevent.c linux-2.6.32.43/lib/kobject_uevent.c
67233--- linux-2.6.32.43/lib/kobject_uevent.c 2011-03-27 14:31:47.000000000 -0400
67234+++ linux-2.6.32.43/lib/kobject_uevent.c 2011-04-17 15:56:46.000000000 -0400
67235@@ -95,7 +95,7 @@ int kobject_uevent_env(struct kobject *k
67236 const char *subsystem;
67237 struct kobject *top_kobj;
67238 struct kset *kset;
67239- struct kset_uevent_ops *uevent_ops;
67240+ const struct kset_uevent_ops *uevent_ops;
67241 u64 seq;
67242 int i = 0;
67243 int retval = 0;
67244diff -urNp linux-2.6.32.43/lib/kref.c linux-2.6.32.43/lib/kref.c
67245--- linux-2.6.32.43/lib/kref.c 2011-03-27 14:31:47.000000000 -0400
67246+++ linux-2.6.32.43/lib/kref.c 2011-04-17 15:56:46.000000000 -0400
67247@@ -61,7 +61,7 @@ void kref_get(struct kref *kref)
67248 */
67249 int kref_put(struct kref *kref, void (*release)(struct kref *kref))
67250 {
67251- WARN_ON(release == NULL);
67252+ BUG_ON(release == NULL);
67253 WARN_ON(release == (void (*)(struct kref *))kfree);
67254
67255 if (atomic_dec_and_test(&kref->refcount)) {
67256diff -urNp linux-2.6.32.43/lib/Makefile linux-2.6.32.43/lib/Makefile
67257--- linux-2.6.32.43/lib/Makefile 2011-03-27 14:31:47.000000000 -0400
67258+++ linux-2.6.32.43/lib/Makefile 2011-08-07 19:48:09.000000000 -0400
67259@@ -10,7 +10,7 @@ endif
67260 lib-y := ctype.o string.o vsprintf.o cmdline.o \
67261 rbtree.o radix-tree.o dump_stack.o \
67262 idr.o int_sqrt.o extable.o prio_tree.o \
67263- sha1.o irq_regs.o reciprocal_div.o argv_split.o \
67264+ sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
67265 proportions.o prio_heap.o ratelimit.o show_mem.o \
67266 is_single_threaded.o plist.o decompress.o flex_array.o
67267
67268diff -urNp linux-2.6.32.43/lib/md5.c linux-2.6.32.43/lib/md5.c
67269--- linux-2.6.32.43/lib/md5.c 1969-12-31 19:00:00.000000000 -0500
67270+++ linux-2.6.32.43/lib/md5.c 2011-08-07 19:48:09.000000000 -0400
67271@@ -0,0 +1,95 @@
67272+#include <linux/kernel.h>
67273+#include <linux/module.h>
67274+#include <linux/cryptohash.h>
67275+
67276+#define F1(x, y, z) (z ^ (x & (y ^ z)))
67277+#define F2(x, y, z) F1(z, x, y)
67278+#define F3(x, y, z) (x ^ y ^ z)
67279+#define F4(x, y, z) (y ^ (x | ~z))
67280+
67281+#define MD5STEP(f, w, x, y, z, in, s) \
67282+ (w += f(x, y, z) + in, w = (w<<s | w>>(32-s)) + x)
67283+
67284+void md5_transform(__u32 *hash, __u32 const *in)
67285+{
67286+ u32 a, b, c, d;
67287+
67288+ a = hash[0];
67289+ b = hash[1];
67290+ c = hash[2];
67291+ d = hash[3];
67292+
67293+ MD5STEP(F1, a, b, c, d, in[0] + 0xd76aa478, 7);
67294+ MD5STEP(F1, d, a, b, c, in[1] + 0xe8c7b756, 12);
67295+ MD5STEP(F1, c, d, a, b, in[2] + 0x242070db, 17);
67296+ MD5STEP(F1, b, c, d, a, in[3] + 0xc1bdceee, 22);
67297+ MD5STEP(F1, a, b, c, d, in[4] + 0xf57c0faf, 7);
67298+ MD5STEP(F1, d, a, b, c, in[5] + 0x4787c62a, 12);
67299+ MD5STEP(F1, c, d, a, b, in[6] + 0xa8304613, 17);
67300+ MD5STEP(F1, b, c, d, a, in[7] + 0xfd469501, 22);
67301+ MD5STEP(F1, a, b, c, d, in[8] + 0x698098d8, 7);
67302+ MD5STEP(F1, d, a, b, c, in[9] + 0x8b44f7af, 12);
67303+ MD5STEP(F1, c, d, a, b, in[10] + 0xffff5bb1, 17);
67304+ MD5STEP(F1, b, c, d, a, in[11] + 0x895cd7be, 22);
67305+ MD5STEP(F1, a, b, c, d, in[12] + 0x6b901122, 7);
67306+ MD5STEP(F1, d, a, b, c, in[13] + 0xfd987193, 12);
67307+ MD5STEP(F1, c, d, a, b, in[14] + 0xa679438e, 17);
67308+ MD5STEP(F1, b, c, d, a, in[15] + 0x49b40821, 22);
67309+
67310+ MD5STEP(F2, a, b, c, d, in[1] + 0xf61e2562, 5);
67311+ MD5STEP(F2, d, a, b, c, in[6] + 0xc040b340, 9);
67312+ MD5STEP(F2, c, d, a, b, in[11] + 0x265e5a51, 14);
67313+ MD5STEP(F2, b, c, d, a, in[0] + 0xe9b6c7aa, 20);
67314+ MD5STEP(F2, a, b, c, d, in[5] + 0xd62f105d, 5);
67315+ MD5STEP(F2, d, a, b, c, in[10] + 0x02441453, 9);
67316+ MD5STEP(F2, c, d, a, b, in[15] + 0xd8a1e681, 14);
67317+ MD5STEP(F2, b, c, d, a, in[4] + 0xe7d3fbc8, 20);
67318+ MD5STEP(F2, a, b, c, d, in[9] + 0x21e1cde6, 5);
67319+ MD5STEP(F2, d, a, b, c, in[14] + 0xc33707d6, 9);
67320+ MD5STEP(F2, c, d, a, b, in[3] + 0xf4d50d87, 14);
67321+ MD5STEP(F2, b, c, d, a, in[8] + 0x455a14ed, 20);
67322+ MD5STEP(F2, a, b, c, d, in[13] + 0xa9e3e905, 5);
67323+ MD5STEP(F2, d, a, b, c, in[2] + 0xfcefa3f8, 9);
67324+ MD5STEP(F2, c, d, a, b, in[7] + 0x676f02d9, 14);
67325+ MD5STEP(F2, b, c, d, a, in[12] + 0x8d2a4c8a, 20);
67326+
67327+ MD5STEP(F3, a, b, c, d, in[5] + 0xfffa3942, 4);
67328+ MD5STEP(F3, d, a, b, c, in[8] + 0x8771f681, 11);
67329+ MD5STEP(F3, c, d, a, b, in[11] + 0x6d9d6122, 16);
67330+ MD5STEP(F3, b, c, d, a, in[14] + 0xfde5380c, 23);
67331+ MD5STEP(F3, a, b, c, d, in[1] + 0xa4beea44, 4);
67332+ MD5STEP(F3, d, a, b, c, in[4] + 0x4bdecfa9, 11);
67333+ MD5STEP(F3, c, d, a, b, in[7] + 0xf6bb4b60, 16);
67334+ MD5STEP(F3, b, c, d, a, in[10] + 0xbebfbc70, 23);
67335+ MD5STEP(F3, a, b, c, d, in[13] + 0x289b7ec6, 4);
67336+ MD5STEP(F3, d, a, b, c, in[0] + 0xeaa127fa, 11);
67337+ MD5STEP(F3, c, d, a, b, in[3] + 0xd4ef3085, 16);
67338+ MD5STEP(F3, b, c, d, a, in[6] + 0x04881d05, 23);
67339+ MD5STEP(F3, a, b, c, d, in[9] + 0xd9d4d039, 4);
67340+ MD5STEP(F3, d, a, b, c, in[12] + 0xe6db99e5, 11);
67341+ MD5STEP(F3, c, d, a, b, in[15] + 0x1fa27cf8, 16);
67342+ MD5STEP(F3, b, c, d, a, in[2] + 0xc4ac5665, 23);
67343+
67344+ MD5STEP(F4, a, b, c, d, in[0] + 0xf4292244, 6);
67345+ MD5STEP(F4, d, a, b, c, in[7] + 0x432aff97, 10);
67346+ MD5STEP(F4, c, d, a, b, in[14] + 0xab9423a7, 15);
67347+ MD5STEP(F4, b, c, d, a, in[5] + 0xfc93a039, 21);
67348+ MD5STEP(F4, a, b, c, d, in[12] + 0x655b59c3, 6);
67349+ MD5STEP(F4, d, a, b, c, in[3] + 0x8f0ccc92, 10);
67350+ MD5STEP(F4, c, d, a, b, in[10] + 0xffeff47d, 15);
67351+ MD5STEP(F4, b, c, d, a, in[1] + 0x85845dd1, 21);
67352+ MD5STEP(F4, a, b, c, d, in[8] + 0x6fa87e4f, 6);
67353+ MD5STEP(F4, d, a, b, c, in[15] + 0xfe2ce6e0, 10);
67354+ MD5STEP(F4, c, d, a, b, in[6] + 0xa3014314, 15);
67355+ MD5STEP(F4, b, c, d, a, in[13] + 0x4e0811a1, 21);
67356+ MD5STEP(F4, a, b, c, d, in[4] + 0xf7537e82, 6);
67357+ MD5STEP(F4, d, a, b, c, in[11] + 0xbd3af235, 10);
67358+ MD5STEP(F4, c, d, a, b, in[2] + 0x2ad7d2bb, 15);
67359+ MD5STEP(F4, b, c, d, a, in[9] + 0xeb86d391, 21);
67360+
67361+ hash[0] += a;
67362+ hash[1] += b;
67363+ hash[2] += c;
67364+ hash[3] += d;
67365+}
67366+EXPORT_SYMBOL(md5_transform);
67367diff -urNp linux-2.6.32.43/lib/parser.c linux-2.6.32.43/lib/parser.c
67368--- linux-2.6.32.43/lib/parser.c 2011-03-27 14:31:47.000000000 -0400
67369+++ linux-2.6.32.43/lib/parser.c 2011-04-17 15:56:46.000000000 -0400
67370@@ -126,7 +126,7 @@ static int match_number(substring_t *s,
67371 char *buf;
67372 int ret;
67373
67374- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL);
67375+ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL);
67376 if (!buf)
67377 return -ENOMEM;
67378 memcpy(buf, s->from, s->to - s->from);
67379diff -urNp linux-2.6.32.43/lib/radix-tree.c linux-2.6.32.43/lib/radix-tree.c
67380--- linux-2.6.32.43/lib/radix-tree.c 2011-03-27 14:31:47.000000000 -0400
67381+++ linux-2.6.32.43/lib/radix-tree.c 2011-04-17 15:56:46.000000000 -0400
67382@@ -81,7 +81,7 @@ struct radix_tree_preload {
67383 int nr;
67384 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH];
67385 };
67386-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
67387+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
67388
67389 static inline gfp_t root_gfp_mask(struct radix_tree_root *root)
67390 {
67391diff -urNp linux-2.6.32.43/lib/random32.c linux-2.6.32.43/lib/random32.c
67392--- linux-2.6.32.43/lib/random32.c 2011-03-27 14:31:47.000000000 -0400
67393+++ linux-2.6.32.43/lib/random32.c 2011-04-17 15:56:46.000000000 -0400
67394@@ -61,7 +61,7 @@ static u32 __random32(struct rnd_state *
67395 */
67396 static inline u32 __seed(u32 x, u32 m)
67397 {
67398- return (x < m) ? x + m : x;
67399+ return (x <= m) ? x + m + 1 : x;
67400 }
67401
67402 /**
67403diff -urNp linux-2.6.32.43/lib/vsprintf.c linux-2.6.32.43/lib/vsprintf.c
67404--- linux-2.6.32.43/lib/vsprintf.c 2011-03-27 14:31:47.000000000 -0400
67405+++ linux-2.6.32.43/lib/vsprintf.c 2011-04-17 15:56:46.000000000 -0400
67406@@ -16,6 +16,9 @@
67407 * - scnprintf and vscnprintf
67408 */
67409
67410+#ifdef CONFIG_GRKERNSEC_HIDESYM
67411+#define __INCLUDED_BY_HIDESYM 1
67412+#endif
67413 #include <stdarg.h>
67414 #include <linux/module.h>
67415 #include <linux/types.h>
67416@@ -546,12 +549,12 @@ static char *number(char *buf, char *end
67417 return buf;
67418 }
67419
67420-static char *string(char *buf, char *end, char *s, struct printf_spec spec)
67421+static char *string(char *buf, char *end, const char *s, struct printf_spec spec)
67422 {
67423 int len, i;
67424
67425 if ((unsigned long)s < PAGE_SIZE)
67426- s = "<NULL>";
67427+ s = "(null)";
67428
67429 len = strnlen(s, spec.precision);
67430
67431@@ -581,7 +584,7 @@ static char *symbol_string(char *buf, ch
67432 unsigned long value = (unsigned long) ptr;
67433 #ifdef CONFIG_KALLSYMS
67434 char sym[KSYM_SYMBOL_LEN];
67435- if (ext != 'f' && ext != 's')
67436+ if (ext != 'f' && ext != 's' && ext != 'a')
67437 sprint_symbol(sym, value);
67438 else
67439 kallsyms_lookup(value, NULL, NULL, NULL, sym);
67440@@ -801,6 +804,8 @@ static char *ip4_addr_string(char *buf,
67441 * - 'f' For simple symbolic function names without offset
67442 * - 'S' For symbolic direct pointers with offset
67443 * - 's' For symbolic direct pointers without offset
67444+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
67445+ * - 'a' For symbolic direct pointers without offset approved for use with GRKERNSEC_HIDESYM
67446 * - 'R' For a struct resource pointer, it prints the range of
67447 * addresses (not the name nor the flags)
67448 * - 'M' For a 6-byte MAC address, it prints the address in the
67449@@ -822,7 +827,7 @@ static char *pointer(const char *fmt, ch
67450 struct printf_spec spec)
67451 {
67452 if (!ptr)
67453- return string(buf, end, "(null)", spec);
67454+ return string(buf, end, "(nil)", spec);
67455
67456 switch (*fmt) {
67457 case 'F':
67458@@ -831,6 +836,14 @@ static char *pointer(const char *fmt, ch
67459 case 's':
67460 /* Fallthrough */
67461 case 'S':
67462+#ifdef CONFIG_GRKERNSEC_HIDESYM
67463+ break;
67464+#else
67465+ return symbol_string(buf, end, ptr, spec, *fmt);
67466+#endif
67467+ case 'a':
67468+ /* Fallthrough */
67469+ case 'A':
67470 return symbol_string(buf, end, ptr, spec, *fmt);
67471 case 'R':
67472 return resource_string(buf, end, ptr, spec);
67473@@ -1445,7 +1458,7 @@ do { \
67474 size_t len;
67475 if ((unsigned long)save_str > (unsigned long)-PAGE_SIZE
67476 || (unsigned long)save_str < PAGE_SIZE)
67477- save_str = "<NULL>";
67478+ save_str = "(null)";
67479 len = strlen(save_str);
67480 if (str + len + 1 < end)
67481 memcpy(str, save_str, len + 1);
67482@@ -1555,11 +1568,11 @@ int bstr_printf(char *buf, size_t size,
67483 typeof(type) value; \
67484 if (sizeof(type) == 8) { \
67485 args = PTR_ALIGN(args, sizeof(u32)); \
67486- *(u32 *)&value = *(u32 *)args; \
67487- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
67488+ *(u32 *)&value = *(const u32 *)args; \
67489+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
67490 } else { \
67491 args = PTR_ALIGN(args, sizeof(type)); \
67492- value = *(typeof(type) *)args; \
67493+ value = *(const typeof(type) *)args; \
67494 } \
67495 args += sizeof(type); \
67496 value; \
67497@@ -1622,7 +1635,7 @@ int bstr_printf(char *buf, size_t size,
67498 const char *str_arg = args;
67499 size_t len = strlen(str_arg);
67500 args += len + 1;
67501- str = string(str, end, (char *)str_arg, spec);
67502+ str = string(str, end, str_arg, spec);
67503 break;
67504 }
67505
67506diff -urNp linux-2.6.32.43/localversion-grsec linux-2.6.32.43/localversion-grsec
67507--- linux-2.6.32.43/localversion-grsec 1969-12-31 19:00:00.000000000 -0500
67508+++ linux-2.6.32.43/localversion-grsec 2011-04-17 15:56:46.000000000 -0400
67509@@ -0,0 +1 @@
67510+-grsec
67511diff -urNp linux-2.6.32.43/Makefile linux-2.6.32.43/Makefile
67512--- linux-2.6.32.43/Makefile 2011-07-13 17:23:04.000000000 -0400
67513+++ linux-2.6.32.43/Makefile 2011-08-07 14:32:43.000000000 -0400
67514@@ -221,8 +221,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH"
67515
67516 HOSTCC = gcc
67517 HOSTCXX = g++
67518-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
67519-HOSTCXXFLAGS = -O2
67520+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
67521+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
67522+HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks
67523
67524 # Decide whether to build built-in, modular, or both.
67525 # Normally, just do built-in.
67526@@ -342,10 +343,12 @@ LINUXINCLUDE := -Iinclude \
67527 KBUILD_CPPFLAGS := -D__KERNEL__
67528
67529 KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
67530+ -W -Wno-unused-parameter -Wno-missing-field-initializers \
67531 -fno-strict-aliasing -fno-common \
67532 -Werror-implicit-function-declaration \
67533 -Wno-format-security \
67534 -fno-delete-null-pointer-checks
67535+KBUILD_CFLAGS += $(call cc-option, -Wno-empty-body)
67536 KBUILD_AFLAGS := -D__ASSEMBLY__
67537
67538 # Read KERNELRELEASE from include/config/kernel.release (if it exists)
67539@@ -376,8 +379,8 @@ export RCS_TAR_IGNORE := --exclude SCCS
67540 # Rules shared between *config targets and build targets
67541
67542 # Basic helpers built in scripts/
67543-PHONY += scripts_basic
67544-scripts_basic:
67545+PHONY += scripts_basic gcc-plugins
67546+scripts_basic: gcc-plugins
67547 $(Q)$(MAKE) $(build)=scripts/basic
67548
67549 # To avoid any implicit rule to kick in, define an empty command.
67550@@ -403,7 +406,7 @@ endif
67551 # of make so .config is not included in this case either (for *config).
67552
67553 no-dot-config-targets := clean mrproper distclean \
67554- cscope TAGS tags help %docs check% \
67555+ cscope gtags TAGS tags help %docs check% \
67556 include/linux/version.h headers_% \
67557 kernelrelease kernelversion
67558
67559@@ -526,6 +529,25 @@ else
67560 KBUILD_CFLAGS += -O2
67561 endif
67562
67563+ifeq ($(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh $(HOSTCC)), y)
67564+CONSTIFY_PLUGIN := -fplugin=$(objtree)/tools/gcc/constify_plugin.so
67565+ifdef CONFIG_PAX_MEMORY_STACKLEAK
67566+STACKLEAK_PLUGIN := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -fplugin-arg-stackleak_plugin-track-lowest-sp=100
67567+endif
67568+KBUILD_CFLAGS += $(CONSTIFY_PLUGIN) $(STACKLEAK_PLUGIN)
67569+export CONSTIFY_PLUGIN STACKLEAK_PLUGIN
67570+gcc-plugins:
67571+ $(Q)$(MAKE) $(build)=tools/gcc
67572+else
67573+gcc-plugins:
67574+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
67575+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev.))
67576+else
67577+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
67578+endif
67579+ $(Q)echo "PAX_MEMORY_STACKLEAK and constification will be less secure"
67580+endif
67581+
67582 include $(srctree)/arch/$(SRCARCH)/Makefile
67583
67584 ifneq ($(CONFIG_FRAME_WARN),0)
67585@@ -644,7 +666,7 @@ export mod_strip_cmd
67586
67587
67588 ifeq ($(KBUILD_EXTMOD),)
67589-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
67590+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
67591
67592 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
67593 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
67594@@ -970,7 +992,7 @@ ifneq ($(KBUILD_SRC),)
67595 endif
67596
67597 # prepare2 creates a makefile if using a separate output directory
67598-prepare2: prepare3 outputmakefile
67599+prepare2: prepare3 outputmakefile gcc-plugins
67600
67601 prepare1: prepare2 include/linux/version.h include/linux/utsrelease.h \
67602 include/asm include/config/auto.conf
67603@@ -1198,7 +1220,7 @@ MRPROPER_FILES += .config .config.old in
67604 include/linux/autoconf.h include/linux/version.h \
67605 include/linux/utsrelease.h \
67606 include/linux/bounds.h include/asm*/asm-offsets.h \
67607- Module.symvers Module.markers tags TAGS cscope*
67608+ Module.symvers Module.markers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS
67609
67610 # clean - Delete most, but leave enough to build external modules
67611 #
67612@@ -1289,6 +1311,7 @@ help:
67613 @echo ' modules_prepare - Set up for building external modules'
67614 @echo ' tags/TAGS - Generate tags file for editors'
67615 @echo ' cscope - Generate cscope index'
67616+ @echo ' gtags - Generate GNU GLOBAL index'
67617 @echo ' kernelrelease - Output the release version string'
67618 @echo ' kernelversion - Output the version stored in Makefile'
67619 @echo ' headers_install - Install sanitised kernel headers to INSTALL_HDR_PATH'; \
67620@@ -1421,7 +1444,7 @@ clean: $(clean-dirs)
67621 $(call cmd,rmdirs)
67622 $(call cmd,rmfiles)
67623 @find $(KBUILD_EXTMOD) $(RCS_FIND_IGNORE) \
67624- \( -name '*.[oas]' -o -name '*.ko' -o -name '.*.cmd' \
67625+ \( -name '*.[oas]' -o -name '*.[ks]o' -o -name '.*.cmd' \
67626 -o -name '.*.d' -o -name '.*.tmp' -o -name '*.mod.c' \
67627 -o -name '*.gcno' \) -type f -print | xargs rm -f
67628
67629@@ -1445,7 +1468,7 @@ endif # KBUILD_EXTMOD
67630 quiet_cmd_tags = GEN $@
67631 cmd_tags = $(CONFIG_SHELL) $(srctree)/scripts/tags.sh $@
67632
67633-tags TAGS cscope: FORCE
67634+tags TAGS cscope gtags: FORCE
67635 $(call cmd,tags)
67636
67637 # Scripts to check various things for consistency
67638diff -urNp linux-2.6.32.43/mm/backing-dev.c linux-2.6.32.43/mm/backing-dev.c
67639--- linux-2.6.32.43/mm/backing-dev.c 2011-03-27 14:31:47.000000000 -0400
67640+++ linux-2.6.32.43/mm/backing-dev.c 2011-05-04 17:56:28.000000000 -0400
67641@@ -484,7 +484,7 @@ static void bdi_add_to_pending(struct rc
67642 * Add the default flusher task that gets created for any bdi
67643 * that has dirty data pending writeout
67644 */
67645-void static bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67646+static void bdi_add_default_flusher_task(struct backing_dev_info *bdi)
67647 {
67648 if (!bdi_cap_writeback_dirty(bdi))
67649 return;
67650diff -urNp linux-2.6.32.43/mm/filemap.c linux-2.6.32.43/mm/filemap.c
67651--- linux-2.6.32.43/mm/filemap.c 2011-03-27 14:31:47.000000000 -0400
67652+++ linux-2.6.32.43/mm/filemap.c 2011-04-17 15:56:46.000000000 -0400
67653@@ -1631,7 +1631,7 @@ int generic_file_mmap(struct file * file
67654 struct address_space *mapping = file->f_mapping;
67655
67656 if (!mapping->a_ops->readpage)
67657- return -ENOEXEC;
67658+ return -ENODEV;
67659 file_accessed(file);
67660 vma->vm_ops = &generic_file_vm_ops;
67661 vma->vm_flags |= VM_CAN_NONLINEAR;
67662@@ -2027,6 +2027,7 @@ inline int generic_write_checks(struct f
67663 *pos = i_size_read(inode);
67664
67665 if (limit != RLIM_INFINITY) {
67666+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
67667 if (*pos >= limit) {
67668 send_sig(SIGXFSZ, current, 0);
67669 return -EFBIG;
67670diff -urNp linux-2.6.32.43/mm/fremap.c linux-2.6.32.43/mm/fremap.c
67671--- linux-2.6.32.43/mm/fremap.c 2011-03-27 14:31:47.000000000 -0400
67672+++ linux-2.6.32.43/mm/fremap.c 2011-04-17 15:56:46.000000000 -0400
67673@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67674 retry:
67675 vma = find_vma(mm, start);
67676
67677+#ifdef CONFIG_PAX_SEGMEXEC
67678+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
67679+ goto out;
67680+#endif
67681+
67682 /*
67683 * Make sure the vma is shared, that it supports prefaulting,
67684 * and that the remapped range is valid and fully within
67685@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign
67686 /*
67687 * drop PG_Mlocked flag for over-mapped range
67688 */
67689- unsigned int saved_flags = vma->vm_flags;
67690+ unsigned long saved_flags = vma->vm_flags;
67691 munlock_vma_pages_range(vma, start, start + size);
67692 vma->vm_flags = saved_flags;
67693 }
67694diff -urNp linux-2.6.32.43/mm/highmem.c linux-2.6.32.43/mm/highmem.c
67695--- linux-2.6.32.43/mm/highmem.c 2011-03-27 14:31:47.000000000 -0400
67696+++ linux-2.6.32.43/mm/highmem.c 2011-04-17 15:56:46.000000000 -0400
67697@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void)
67698 * So no dangers, even with speculative execution.
67699 */
67700 page = pte_page(pkmap_page_table[i]);
67701+ pax_open_kernel();
67702 pte_clear(&init_mm, (unsigned long)page_address(page),
67703 &pkmap_page_table[i]);
67704-
67705+ pax_close_kernel();
67706 set_page_address(page, NULL);
67707 need_flush = 1;
67708 }
67709@@ -177,9 +178,11 @@ start:
67710 }
67711 }
67712 vaddr = PKMAP_ADDR(last_pkmap_nr);
67713+
67714+ pax_open_kernel();
67715 set_pte_at(&init_mm, vaddr,
67716 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
67717-
67718+ pax_close_kernel();
67719 pkmap_count[last_pkmap_nr] = 1;
67720 set_page_address(page, (void *)vaddr);
67721
67722diff -urNp linux-2.6.32.43/mm/hugetlb.c linux-2.6.32.43/mm/hugetlb.c
67723--- linux-2.6.32.43/mm/hugetlb.c 2011-07-13 17:23:04.000000000 -0400
67724+++ linux-2.6.32.43/mm/hugetlb.c 2011-07-13 17:23:19.000000000 -0400
67725@@ -1933,6 +1933,26 @@ static int unmap_ref_private(struct mm_s
67726 return 1;
67727 }
67728
67729+#ifdef CONFIG_PAX_SEGMEXEC
67730+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
67731+{
67732+ struct mm_struct *mm = vma->vm_mm;
67733+ struct vm_area_struct *vma_m;
67734+ unsigned long address_m;
67735+ pte_t *ptep_m;
67736+
67737+ vma_m = pax_find_mirror_vma(vma);
67738+ if (!vma_m)
67739+ return;
67740+
67741+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
67742+ address_m = address + SEGMEXEC_TASK_SIZE;
67743+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
67744+ get_page(page_m);
67745+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
67746+}
67747+#endif
67748+
67749 static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma,
67750 unsigned long address, pte_t *ptep, pte_t pte,
67751 struct page *pagecache_page)
67752@@ -2004,6 +2024,11 @@ retry_avoidcopy:
67753 huge_ptep_clear_flush(vma, address, ptep);
67754 set_huge_pte_at(mm, address, ptep,
67755 make_huge_pte(vma, new_page, 1));
67756+
67757+#ifdef CONFIG_PAX_SEGMEXEC
67758+ pax_mirror_huge_pte(vma, address, new_page);
67759+#endif
67760+
67761 /* Make the old page be freed below */
67762 new_page = old_page;
67763 }
67764@@ -2135,6 +2160,10 @@ retry:
67765 && (vma->vm_flags & VM_SHARED)));
67766 set_huge_pte_at(mm, address, ptep, new_pte);
67767
67768+#ifdef CONFIG_PAX_SEGMEXEC
67769+ pax_mirror_huge_pte(vma, address, page);
67770+#endif
67771+
67772 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
67773 /* Optimization, do the COW without a second fault */
67774 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
67775@@ -2163,6 +2192,28 @@ int hugetlb_fault(struct mm_struct *mm,
67776 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
67777 struct hstate *h = hstate_vma(vma);
67778
67779+#ifdef CONFIG_PAX_SEGMEXEC
67780+ struct vm_area_struct *vma_m;
67781+
67782+ vma_m = pax_find_mirror_vma(vma);
67783+ if (vma_m) {
67784+ unsigned long address_m;
67785+
67786+ if (vma->vm_start > vma_m->vm_start) {
67787+ address_m = address;
67788+ address -= SEGMEXEC_TASK_SIZE;
67789+ vma = vma_m;
67790+ h = hstate_vma(vma);
67791+ } else
67792+ address_m = address + SEGMEXEC_TASK_SIZE;
67793+
67794+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
67795+ return VM_FAULT_OOM;
67796+ address_m &= HPAGE_MASK;
67797+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
67798+ }
67799+#endif
67800+
67801 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
67802 if (!ptep)
67803 return VM_FAULT_OOM;
67804diff -urNp linux-2.6.32.43/mm/internal.h linux-2.6.32.43/mm/internal.h
67805--- linux-2.6.32.43/mm/internal.h 2011-03-27 14:31:47.000000000 -0400
67806+++ linux-2.6.32.43/mm/internal.h 2011-07-09 09:13:08.000000000 -0400
67807@@ -49,6 +49,7 @@ extern void putback_lru_page(struct page
67808 * in mm/page_alloc.c
67809 */
67810 extern void __free_pages_bootmem(struct page *page, unsigned int order);
67811+extern void free_compound_page(struct page *page);
67812 extern void prep_compound_page(struct page *page, unsigned long order);
67813
67814
67815diff -urNp linux-2.6.32.43/mm/Kconfig linux-2.6.32.43/mm/Kconfig
67816--- linux-2.6.32.43/mm/Kconfig 2011-03-27 14:31:47.000000000 -0400
67817+++ linux-2.6.32.43/mm/Kconfig 2011-04-17 15:56:46.000000000 -0400
67818@@ -228,7 +228,7 @@ config KSM
67819 config DEFAULT_MMAP_MIN_ADDR
67820 int "Low address space to protect from user allocation"
67821 depends on MMU
67822- default 4096
67823+ default 65536
67824 help
67825 This is the portion of low virtual memory which should be protected
67826 from userspace allocation. Keeping a user from writing to low pages
67827diff -urNp linux-2.6.32.43/mm/kmemleak.c linux-2.6.32.43/mm/kmemleak.c
67828--- linux-2.6.32.43/mm/kmemleak.c 2011-06-25 12:55:35.000000000 -0400
67829+++ linux-2.6.32.43/mm/kmemleak.c 2011-06-25 12:56:37.000000000 -0400
67830@@ -358,7 +358,7 @@ static void print_unreferenced(struct se
67831
67832 for (i = 0; i < object->trace_len; i++) {
67833 void *ptr = (void *)object->trace[i];
67834- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
67835+ seq_printf(seq, " [<%p>] %pA\n", ptr, ptr);
67836 }
67837 }
67838
67839diff -urNp linux-2.6.32.43/mm/maccess.c linux-2.6.32.43/mm/maccess.c
67840--- linux-2.6.32.43/mm/maccess.c 2011-03-27 14:31:47.000000000 -0400
67841+++ linux-2.6.32.43/mm/maccess.c 2011-04-17 15:56:46.000000000 -0400
67842@@ -14,7 +14,7 @@
67843 * Safely read from address @src to the buffer at @dst. If a kernel fault
67844 * happens, handle that and return -EFAULT.
67845 */
67846-long probe_kernel_read(void *dst, void *src, size_t size)
67847+long probe_kernel_read(void *dst, const void *src, size_t size)
67848 {
67849 long ret;
67850 mm_segment_t old_fs = get_fs();
67851@@ -39,7 +39,7 @@ EXPORT_SYMBOL_GPL(probe_kernel_read);
67852 * Safely write to address @dst from the buffer at @src. If a kernel fault
67853 * happens, handle that and return -EFAULT.
67854 */
67855-long notrace __weak probe_kernel_write(void *dst, void *src, size_t size)
67856+long notrace __weak probe_kernel_write(void *dst, const void *src, size_t size)
67857 {
67858 long ret;
67859 mm_segment_t old_fs = get_fs();
67860diff -urNp linux-2.6.32.43/mm/madvise.c linux-2.6.32.43/mm/madvise.c
67861--- linux-2.6.32.43/mm/madvise.c 2011-03-27 14:31:47.000000000 -0400
67862+++ linux-2.6.32.43/mm/madvise.c 2011-04-17 15:56:46.000000000 -0400
67863@@ -44,6 +44,10 @@ static long madvise_behavior(struct vm_a
67864 pgoff_t pgoff;
67865 unsigned long new_flags = vma->vm_flags;
67866
67867+#ifdef CONFIG_PAX_SEGMEXEC
67868+ struct vm_area_struct *vma_m;
67869+#endif
67870+
67871 switch (behavior) {
67872 case MADV_NORMAL:
67873 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
67874@@ -103,6 +107,13 @@ success:
67875 /*
67876 * vm_flags is protected by the mmap_sem held in write mode.
67877 */
67878+
67879+#ifdef CONFIG_PAX_SEGMEXEC
67880+ vma_m = pax_find_mirror_vma(vma);
67881+ if (vma_m)
67882+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
67883+#endif
67884+
67885 vma->vm_flags = new_flags;
67886
67887 out:
67888@@ -161,6 +172,11 @@ static long madvise_dontneed(struct vm_a
67889 struct vm_area_struct ** prev,
67890 unsigned long start, unsigned long end)
67891 {
67892+
67893+#ifdef CONFIG_PAX_SEGMEXEC
67894+ struct vm_area_struct *vma_m;
67895+#endif
67896+
67897 *prev = vma;
67898 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
67899 return -EINVAL;
67900@@ -173,6 +189,21 @@ static long madvise_dontneed(struct vm_a
67901 zap_page_range(vma, start, end - start, &details);
67902 } else
67903 zap_page_range(vma, start, end - start, NULL);
67904+
67905+#ifdef CONFIG_PAX_SEGMEXEC
67906+ vma_m = pax_find_mirror_vma(vma);
67907+ if (vma_m) {
67908+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
67909+ struct zap_details details = {
67910+ .nonlinear_vma = vma_m,
67911+ .last_index = ULONG_MAX,
67912+ };
67913+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
67914+ } else
67915+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
67916+ }
67917+#endif
67918+
67919 return 0;
67920 }
67921
67922@@ -359,6 +390,16 @@ SYSCALL_DEFINE3(madvise, unsigned long,
67923 if (end < start)
67924 goto out;
67925
67926+#ifdef CONFIG_PAX_SEGMEXEC
67927+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
67928+ if (end > SEGMEXEC_TASK_SIZE)
67929+ goto out;
67930+ } else
67931+#endif
67932+
67933+ if (end > TASK_SIZE)
67934+ goto out;
67935+
67936 error = 0;
67937 if (end == start)
67938 goto out;
67939diff -urNp linux-2.6.32.43/mm/memory.c linux-2.6.32.43/mm/memory.c
67940--- linux-2.6.32.43/mm/memory.c 2011-07-13 17:23:04.000000000 -0400
67941+++ linux-2.6.32.43/mm/memory.c 2011-07-13 17:23:23.000000000 -0400
67942@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct
67943 return;
67944
67945 pmd = pmd_offset(pud, start);
67946+
67947+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
67948 pud_clear(pud);
67949 pmd_free_tlb(tlb, pmd, start);
67950+#endif
67951+
67952 }
67953
67954 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
67955@@ -219,9 +223,12 @@ static inline void free_pud_range(struct
67956 if (end - 1 > ceiling - 1)
67957 return;
67958
67959+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
67960 pud = pud_offset(pgd, start);
67961 pgd_clear(pgd);
67962 pud_free_tlb(tlb, pud, start);
67963+#endif
67964+
67965 }
67966
67967 /*
67968@@ -1251,10 +1258,10 @@ int __get_user_pages(struct task_struct
67969 (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
67970 i = 0;
67971
67972- do {
67973+ while (nr_pages) {
67974 struct vm_area_struct *vma;
67975
67976- vma = find_extend_vma(mm, start);
67977+ vma = find_vma(mm, start);
67978 if (!vma && in_gate_area(tsk, start)) {
67979 unsigned long pg = start & PAGE_MASK;
67980 struct vm_area_struct *gate_vma = get_gate_vma(tsk);
67981@@ -1306,7 +1313,7 @@ int __get_user_pages(struct task_struct
67982 continue;
67983 }
67984
67985- if (!vma ||
67986+ if (!vma || start < vma->vm_start ||
67987 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
67988 !(vm_flags & vma->vm_flags))
67989 return i ? : -EFAULT;
67990@@ -1381,7 +1388,7 @@ int __get_user_pages(struct task_struct
67991 start += PAGE_SIZE;
67992 nr_pages--;
67993 } while (nr_pages && start < vma->vm_end);
67994- } while (nr_pages);
67995+ }
67996 return i;
67997 }
67998
67999@@ -1526,6 +1533,10 @@ static int insert_page(struct vm_area_st
68000 page_add_file_rmap(page);
68001 set_pte_at(mm, addr, pte, mk_pte(page, prot));
68002
68003+#ifdef CONFIG_PAX_SEGMEXEC
68004+ pax_mirror_file_pte(vma, addr, page, ptl);
68005+#endif
68006+
68007 retval = 0;
68008 pte_unmap_unlock(pte, ptl);
68009 return retval;
68010@@ -1560,10 +1571,22 @@ out:
68011 int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
68012 struct page *page)
68013 {
68014+
68015+#ifdef CONFIG_PAX_SEGMEXEC
68016+ struct vm_area_struct *vma_m;
68017+#endif
68018+
68019 if (addr < vma->vm_start || addr >= vma->vm_end)
68020 return -EFAULT;
68021 if (!page_count(page))
68022 return -EINVAL;
68023+
68024+#ifdef CONFIG_PAX_SEGMEXEC
68025+ vma_m = pax_find_mirror_vma(vma);
68026+ if (vma_m)
68027+ vma_m->vm_flags |= VM_INSERTPAGE;
68028+#endif
68029+
68030 vma->vm_flags |= VM_INSERTPAGE;
68031 return insert_page(vma, addr, page, vma->vm_page_prot);
68032 }
68033@@ -1649,6 +1672,7 @@ int vm_insert_mixed(struct vm_area_struc
68034 unsigned long pfn)
68035 {
68036 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
68037+ BUG_ON(vma->vm_mirror);
68038
68039 if (addr < vma->vm_start || addr >= vma->vm_end)
68040 return -EFAULT;
68041@@ -1977,6 +2001,186 @@ static inline void cow_user_page(struct
68042 copy_user_highpage(dst, src, va, vma);
68043 }
68044
68045+#ifdef CONFIG_PAX_SEGMEXEC
68046+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
68047+{
68048+ struct mm_struct *mm = vma->vm_mm;
68049+ spinlock_t *ptl;
68050+ pte_t *pte, entry;
68051+
68052+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
68053+ entry = *pte;
68054+ if (!pte_present(entry)) {
68055+ if (!pte_none(entry)) {
68056+ BUG_ON(pte_file(entry));
68057+ free_swap_and_cache(pte_to_swp_entry(entry));
68058+ pte_clear_not_present_full(mm, address, pte, 0);
68059+ }
68060+ } else {
68061+ struct page *page;
68062+
68063+ flush_cache_page(vma, address, pte_pfn(entry));
68064+ entry = ptep_clear_flush(vma, address, pte);
68065+ BUG_ON(pte_dirty(entry));
68066+ page = vm_normal_page(vma, address, entry);
68067+ if (page) {
68068+ update_hiwater_rss(mm);
68069+ if (PageAnon(page))
68070+ dec_mm_counter(mm, anon_rss);
68071+ else
68072+ dec_mm_counter(mm, file_rss);
68073+ page_remove_rmap(page);
68074+ page_cache_release(page);
68075+ }
68076+ }
68077+ pte_unmap_unlock(pte, ptl);
68078+}
68079+
68080+/* PaX: if vma is mirrored, synchronize the mirror's PTE
68081+ *
68082+ * the ptl of the lower mapped page is held on entry and is not released on exit
68083+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
68084+ */
68085+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68086+{
68087+ struct mm_struct *mm = vma->vm_mm;
68088+ unsigned long address_m;
68089+ spinlock_t *ptl_m;
68090+ struct vm_area_struct *vma_m;
68091+ pmd_t *pmd_m;
68092+ pte_t *pte_m, entry_m;
68093+
68094+ BUG_ON(!page_m || !PageAnon(page_m));
68095+
68096+ vma_m = pax_find_mirror_vma(vma);
68097+ if (!vma_m)
68098+ return;
68099+
68100+ BUG_ON(!PageLocked(page_m));
68101+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68102+ address_m = address + SEGMEXEC_TASK_SIZE;
68103+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68104+ pte_m = pte_offset_map_nested(pmd_m, address_m);
68105+ ptl_m = pte_lockptr(mm, pmd_m);
68106+ if (ptl != ptl_m) {
68107+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68108+ if (!pte_none(*pte_m))
68109+ goto out;
68110+ }
68111+
68112+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68113+ page_cache_get(page_m);
68114+ page_add_anon_rmap(page_m, vma_m, address_m);
68115+ inc_mm_counter(mm, anon_rss);
68116+ set_pte_at(mm, address_m, pte_m, entry_m);
68117+ update_mmu_cache(vma_m, address_m, entry_m);
68118+out:
68119+ if (ptl != ptl_m)
68120+ spin_unlock(ptl_m);
68121+ pte_unmap_nested(pte_m);
68122+ unlock_page(page_m);
68123+}
68124+
68125+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
68126+{
68127+ struct mm_struct *mm = vma->vm_mm;
68128+ unsigned long address_m;
68129+ spinlock_t *ptl_m;
68130+ struct vm_area_struct *vma_m;
68131+ pmd_t *pmd_m;
68132+ pte_t *pte_m, entry_m;
68133+
68134+ BUG_ON(!page_m || PageAnon(page_m));
68135+
68136+ vma_m = pax_find_mirror_vma(vma);
68137+ if (!vma_m)
68138+ return;
68139+
68140+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68141+ address_m = address + SEGMEXEC_TASK_SIZE;
68142+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68143+ pte_m = pte_offset_map_nested(pmd_m, address_m);
68144+ ptl_m = pte_lockptr(mm, pmd_m);
68145+ if (ptl != ptl_m) {
68146+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68147+ if (!pte_none(*pte_m))
68148+ goto out;
68149+ }
68150+
68151+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
68152+ page_cache_get(page_m);
68153+ page_add_file_rmap(page_m);
68154+ inc_mm_counter(mm, file_rss);
68155+ set_pte_at(mm, address_m, pte_m, entry_m);
68156+ update_mmu_cache(vma_m, address_m, entry_m);
68157+out:
68158+ if (ptl != ptl_m)
68159+ spin_unlock(ptl_m);
68160+ pte_unmap_nested(pte_m);
68161+}
68162+
68163+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
68164+{
68165+ struct mm_struct *mm = vma->vm_mm;
68166+ unsigned long address_m;
68167+ spinlock_t *ptl_m;
68168+ struct vm_area_struct *vma_m;
68169+ pmd_t *pmd_m;
68170+ pte_t *pte_m, entry_m;
68171+
68172+ vma_m = pax_find_mirror_vma(vma);
68173+ if (!vma_m)
68174+ return;
68175+
68176+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
68177+ address_m = address + SEGMEXEC_TASK_SIZE;
68178+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
68179+ pte_m = pte_offset_map_nested(pmd_m, address_m);
68180+ ptl_m = pte_lockptr(mm, pmd_m);
68181+ if (ptl != ptl_m) {
68182+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
68183+ if (!pte_none(*pte_m))
68184+ goto out;
68185+ }
68186+
68187+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
68188+ set_pte_at(mm, address_m, pte_m, entry_m);
68189+out:
68190+ if (ptl != ptl_m)
68191+ spin_unlock(ptl_m);
68192+ pte_unmap_nested(pte_m);
68193+}
68194+
68195+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
68196+{
68197+ struct page *page_m;
68198+ pte_t entry;
68199+
68200+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
68201+ goto out;
68202+
68203+ entry = *pte;
68204+ page_m = vm_normal_page(vma, address, entry);
68205+ if (!page_m)
68206+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
68207+ else if (PageAnon(page_m)) {
68208+ if (pax_find_mirror_vma(vma)) {
68209+ pte_unmap_unlock(pte, ptl);
68210+ lock_page(page_m);
68211+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
68212+ if (pte_same(entry, *pte))
68213+ pax_mirror_anon_pte(vma, address, page_m, ptl);
68214+ else
68215+ unlock_page(page_m);
68216+ }
68217+ } else
68218+ pax_mirror_file_pte(vma, address, page_m, ptl);
68219+
68220+out:
68221+ pte_unmap_unlock(pte, ptl);
68222+}
68223+#endif
68224+
68225 /*
68226 * This routine handles present pages, when users try to write
68227 * to a shared page. It is done by copying the page to a new address
68228@@ -2156,6 +2360,12 @@ gotten:
68229 */
68230 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68231 if (likely(pte_same(*page_table, orig_pte))) {
68232+
68233+#ifdef CONFIG_PAX_SEGMEXEC
68234+ if (pax_find_mirror_vma(vma))
68235+ BUG_ON(!trylock_page(new_page));
68236+#endif
68237+
68238 if (old_page) {
68239 if (!PageAnon(old_page)) {
68240 dec_mm_counter(mm, file_rss);
68241@@ -2207,6 +2417,10 @@ gotten:
68242 page_remove_rmap(old_page);
68243 }
68244
68245+#ifdef CONFIG_PAX_SEGMEXEC
68246+ pax_mirror_anon_pte(vma, address, new_page, ptl);
68247+#endif
68248+
68249 /* Free the old page.. */
68250 new_page = old_page;
68251 ret |= VM_FAULT_WRITE;
68252@@ -2606,6 +2820,11 @@ static int do_swap_page(struct mm_struct
68253 swap_free(entry);
68254 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
68255 try_to_free_swap(page);
68256+
68257+#ifdef CONFIG_PAX_SEGMEXEC
68258+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
68259+#endif
68260+
68261 unlock_page(page);
68262
68263 if (flags & FAULT_FLAG_WRITE) {
68264@@ -2617,6 +2836,11 @@ static int do_swap_page(struct mm_struct
68265
68266 /* No need to invalidate - it was non-present before */
68267 update_mmu_cache(vma, address, pte);
68268+
68269+#ifdef CONFIG_PAX_SEGMEXEC
68270+ pax_mirror_anon_pte(vma, address, page, ptl);
68271+#endif
68272+
68273 unlock:
68274 pte_unmap_unlock(page_table, ptl);
68275 out:
68276@@ -2632,40 +2856,6 @@ out_release:
68277 }
68278
68279 /*
68280- * This is like a special single-page "expand_{down|up}wards()",
68281- * except we must first make sure that 'address{-|+}PAGE_SIZE'
68282- * doesn't hit another vma.
68283- */
68284-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
68285-{
68286- address &= PAGE_MASK;
68287- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
68288- struct vm_area_struct *prev = vma->vm_prev;
68289-
68290- /*
68291- * Is there a mapping abutting this one below?
68292- *
68293- * That's only ok if it's the same stack mapping
68294- * that has gotten split..
68295- */
68296- if (prev && prev->vm_end == address)
68297- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
68298-
68299- expand_stack(vma, address - PAGE_SIZE);
68300- }
68301- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
68302- struct vm_area_struct *next = vma->vm_next;
68303-
68304- /* As VM_GROWSDOWN but s/below/above/ */
68305- if (next && next->vm_start == address + PAGE_SIZE)
68306- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
68307-
68308- expand_upwards(vma, address + PAGE_SIZE);
68309- }
68310- return 0;
68311-}
68312-
68313-/*
68314 * We enter with non-exclusive mmap_sem (to exclude vma changes,
68315 * but allow concurrent faults), and pte mapped but not yet locked.
68316 * We return with mmap_sem still held, but pte unmapped and unlocked.
68317@@ -2674,27 +2864,23 @@ static int do_anonymous_page(struct mm_s
68318 unsigned long address, pte_t *page_table, pmd_t *pmd,
68319 unsigned int flags)
68320 {
68321- struct page *page;
68322+ struct page *page = NULL;
68323 spinlock_t *ptl;
68324 pte_t entry;
68325
68326- pte_unmap(page_table);
68327-
68328- /* Check if we need to add a guard page to the stack */
68329- if (check_stack_guard_page(vma, address) < 0)
68330- return VM_FAULT_SIGBUS;
68331-
68332- /* Use the zero-page for reads */
68333 if (!(flags & FAULT_FLAG_WRITE)) {
68334 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
68335 vma->vm_page_prot));
68336- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
68337+ ptl = pte_lockptr(mm, pmd);
68338+ spin_lock(ptl);
68339 if (!pte_none(*page_table))
68340 goto unlock;
68341 goto setpte;
68342 }
68343
68344 /* Allocate our own private page. */
68345+ pte_unmap(page_table);
68346+
68347 if (unlikely(anon_vma_prepare(vma)))
68348 goto oom;
68349 page = alloc_zeroed_user_highpage_movable(vma, address);
68350@@ -2713,6 +2899,11 @@ static int do_anonymous_page(struct mm_s
68351 if (!pte_none(*page_table))
68352 goto release;
68353
68354+#ifdef CONFIG_PAX_SEGMEXEC
68355+ if (pax_find_mirror_vma(vma))
68356+ BUG_ON(!trylock_page(page));
68357+#endif
68358+
68359 inc_mm_counter(mm, anon_rss);
68360 page_add_new_anon_rmap(page, vma, address);
68361 setpte:
68362@@ -2720,6 +2911,12 @@ setpte:
68363
68364 /* No need to invalidate - it was non-present before */
68365 update_mmu_cache(vma, address, entry);
68366+
68367+#ifdef CONFIG_PAX_SEGMEXEC
68368+ if (page)
68369+ pax_mirror_anon_pte(vma, address, page, ptl);
68370+#endif
68371+
68372 unlock:
68373 pte_unmap_unlock(page_table, ptl);
68374 return 0;
68375@@ -2862,6 +3059,12 @@ static int __do_fault(struct mm_struct *
68376 */
68377 /* Only go through if we didn't race with anybody else... */
68378 if (likely(pte_same(*page_table, orig_pte))) {
68379+
68380+#ifdef CONFIG_PAX_SEGMEXEC
68381+ if (anon && pax_find_mirror_vma(vma))
68382+ BUG_ON(!trylock_page(page));
68383+#endif
68384+
68385 flush_icache_page(vma, page);
68386 entry = mk_pte(page, vma->vm_page_prot);
68387 if (flags & FAULT_FLAG_WRITE)
68388@@ -2881,6 +3084,14 @@ static int __do_fault(struct mm_struct *
68389
68390 /* no need to invalidate: a not-present page won't be cached */
68391 update_mmu_cache(vma, address, entry);
68392+
68393+#ifdef CONFIG_PAX_SEGMEXEC
68394+ if (anon)
68395+ pax_mirror_anon_pte(vma, address, page, ptl);
68396+ else
68397+ pax_mirror_file_pte(vma, address, page, ptl);
68398+#endif
68399+
68400 } else {
68401 if (charged)
68402 mem_cgroup_uncharge_page(page);
68403@@ -3028,6 +3239,12 @@ static inline int handle_pte_fault(struc
68404 if (flags & FAULT_FLAG_WRITE)
68405 flush_tlb_page(vma, address);
68406 }
68407+
68408+#ifdef CONFIG_PAX_SEGMEXEC
68409+ pax_mirror_pte(vma, address, pte, pmd, ptl);
68410+ return 0;
68411+#endif
68412+
68413 unlock:
68414 pte_unmap_unlock(pte, ptl);
68415 return 0;
68416@@ -3044,6 +3261,10 @@ int handle_mm_fault(struct mm_struct *mm
68417 pmd_t *pmd;
68418 pte_t *pte;
68419
68420+#ifdef CONFIG_PAX_SEGMEXEC
68421+ struct vm_area_struct *vma_m;
68422+#endif
68423+
68424 __set_current_state(TASK_RUNNING);
68425
68426 count_vm_event(PGFAULT);
68427@@ -3051,6 +3272,34 @@ int handle_mm_fault(struct mm_struct *mm
68428 if (unlikely(is_vm_hugetlb_page(vma)))
68429 return hugetlb_fault(mm, vma, address, flags);
68430
68431+#ifdef CONFIG_PAX_SEGMEXEC
68432+ vma_m = pax_find_mirror_vma(vma);
68433+ if (vma_m) {
68434+ unsigned long address_m;
68435+ pgd_t *pgd_m;
68436+ pud_t *pud_m;
68437+ pmd_t *pmd_m;
68438+
68439+ if (vma->vm_start > vma_m->vm_start) {
68440+ address_m = address;
68441+ address -= SEGMEXEC_TASK_SIZE;
68442+ vma = vma_m;
68443+ } else
68444+ address_m = address + SEGMEXEC_TASK_SIZE;
68445+
68446+ pgd_m = pgd_offset(mm, address_m);
68447+ pud_m = pud_alloc(mm, pgd_m, address_m);
68448+ if (!pud_m)
68449+ return VM_FAULT_OOM;
68450+ pmd_m = pmd_alloc(mm, pud_m, address_m);
68451+ if (!pmd_m)
68452+ return VM_FAULT_OOM;
68453+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m))
68454+ return VM_FAULT_OOM;
68455+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
68456+ }
68457+#endif
68458+
68459 pgd = pgd_offset(mm, address);
68460 pud = pud_alloc(mm, pgd, address);
68461 if (!pud)
68462@@ -3148,7 +3397,7 @@ static int __init gate_vma_init(void)
68463 gate_vma.vm_start = FIXADDR_USER_START;
68464 gate_vma.vm_end = FIXADDR_USER_END;
68465 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
68466- gate_vma.vm_page_prot = __P101;
68467+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
68468 /*
68469 * Make sure the vDSO gets into every core dump.
68470 * Dumping its contents makes post-mortem fully interpretable later
68471diff -urNp linux-2.6.32.43/mm/memory-failure.c linux-2.6.32.43/mm/memory-failure.c
68472--- linux-2.6.32.43/mm/memory-failure.c 2011-03-27 14:31:47.000000000 -0400
68473+++ linux-2.6.32.43/mm/memory-failure.c 2011-04-17 15:56:46.000000000 -0400
68474@@ -46,7 +46,7 @@ int sysctl_memory_failure_early_kill __r
68475
68476 int sysctl_memory_failure_recovery __read_mostly = 1;
68477
68478-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68479+atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0);
68480
68481 /*
68482 * Send all the processes who have the page mapped an ``action optional''
68483@@ -745,7 +745,7 @@ int __memory_failure(unsigned long pfn,
68484 return 0;
68485 }
68486
68487- atomic_long_add(1, &mce_bad_pages);
68488+ atomic_long_add_unchecked(1, &mce_bad_pages);
68489
68490 /*
68491 * We need/can do nothing about count=0 pages.
68492diff -urNp linux-2.6.32.43/mm/mempolicy.c linux-2.6.32.43/mm/mempolicy.c
68493--- linux-2.6.32.43/mm/mempolicy.c 2011-03-27 14:31:47.000000000 -0400
68494+++ linux-2.6.32.43/mm/mempolicy.c 2011-04-17 15:56:46.000000000 -0400
68495@@ -573,6 +573,10 @@ static int mbind_range(struct vm_area_st
68496 struct vm_area_struct *next;
68497 int err;
68498
68499+#ifdef CONFIG_PAX_SEGMEXEC
68500+ struct vm_area_struct *vma_m;
68501+#endif
68502+
68503 err = 0;
68504 for (; vma && vma->vm_start < end; vma = next) {
68505 next = vma->vm_next;
68506@@ -584,6 +588,16 @@ static int mbind_range(struct vm_area_st
68507 err = policy_vma(vma, new);
68508 if (err)
68509 break;
68510+
68511+#ifdef CONFIG_PAX_SEGMEXEC
68512+ vma_m = pax_find_mirror_vma(vma);
68513+ if (vma_m) {
68514+ err = policy_vma(vma_m, new);
68515+ if (err)
68516+ break;
68517+ }
68518+#endif
68519+
68520 }
68521 return err;
68522 }
68523@@ -1002,6 +1016,17 @@ static long do_mbind(unsigned long start
68524
68525 if (end < start)
68526 return -EINVAL;
68527+
68528+#ifdef CONFIG_PAX_SEGMEXEC
68529+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
68530+ if (end > SEGMEXEC_TASK_SIZE)
68531+ return -EINVAL;
68532+ } else
68533+#endif
68534+
68535+ if (end > TASK_SIZE)
68536+ return -EINVAL;
68537+
68538 if (end == start)
68539 return 0;
68540
68541@@ -1207,6 +1232,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68542 if (!mm)
68543 return -EINVAL;
68544
68545+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68546+ if (mm != current->mm &&
68547+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68548+ err = -EPERM;
68549+ goto out;
68550+ }
68551+#endif
68552+
68553 /*
68554 * Check if this process has the right to modify the specified
68555 * process. The right exists if the process has administrative
68556@@ -1216,8 +1249,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi
68557 rcu_read_lock();
68558 tcred = __task_cred(task);
68559 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68560- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68561- !capable(CAP_SYS_NICE)) {
68562+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68563 rcu_read_unlock();
68564 err = -EPERM;
68565 goto out;
68566@@ -2396,7 +2428,7 @@ int show_numa_map(struct seq_file *m, vo
68567
68568 if (file) {
68569 seq_printf(m, " file=");
68570- seq_path(m, &file->f_path, "\n\t= ");
68571+ seq_path(m, &file->f_path, "\n\t\\= ");
68572 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
68573 seq_printf(m, " heap");
68574 } else if (vma->vm_start <= mm->start_stack &&
68575diff -urNp linux-2.6.32.43/mm/migrate.c linux-2.6.32.43/mm/migrate.c
68576--- linux-2.6.32.43/mm/migrate.c 2011-07-13 17:23:04.000000000 -0400
68577+++ linux-2.6.32.43/mm/migrate.c 2011-07-13 17:23:23.000000000 -0400
68578@@ -916,6 +916,8 @@ static int do_pages_move(struct mm_struc
68579 unsigned long chunk_start;
68580 int err;
68581
68582+ pax_track_stack();
68583+
68584 task_nodes = cpuset_mems_allowed(task);
68585
68586 err = -ENOMEM;
68587@@ -1106,6 +1108,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68588 if (!mm)
68589 return -EINVAL;
68590
68591+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
68592+ if (mm != current->mm &&
68593+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
68594+ err = -EPERM;
68595+ goto out;
68596+ }
68597+#endif
68598+
68599 /*
68600 * Check if this process has the right to modify the specified
68601 * process. The right exists if the process has administrative
68602@@ -1115,8 +1125,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid,
68603 rcu_read_lock();
68604 tcred = __task_cred(task);
68605 if (cred->euid != tcred->suid && cred->euid != tcred->uid &&
68606- cred->uid != tcred->suid && cred->uid != tcred->uid &&
68607- !capable(CAP_SYS_NICE)) {
68608+ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) {
68609 rcu_read_unlock();
68610 err = -EPERM;
68611 goto out;
68612diff -urNp linux-2.6.32.43/mm/mlock.c linux-2.6.32.43/mm/mlock.c
68613--- linux-2.6.32.43/mm/mlock.c 2011-03-27 14:31:47.000000000 -0400
68614+++ linux-2.6.32.43/mm/mlock.c 2011-04-17 15:56:46.000000000 -0400
68615@@ -13,6 +13,7 @@
68616 #include <linux/pagemap.h>
68617 #include <linux/mempolicy.h>
68618 #include <linux/syscalls.h>
68619+#include <linux/security.h>
68620 #include <linux/sched.h>
68621 #include <linux/module.h>
68622 #include <linux/rmap.h>
68623@@ -138,13 +139,6 @@ void munlock_vma_page(struct page *page)
68624 }
68625 }
68626
68627-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
68628-{
68629- return (vma->vm_flags & VM_GROWSDOWN) &&
68630- (vma->vm_start == addr) &&
68631- !vma_stack_continue(vma->vm_prev, addr);
68632-}
68633-
68634 /**
68635 * __mlock_vma_pages_range() - mlock a range of pages in the vma.
68636 * @vma: target vma
68637@@ -177,12 +171,6 @@ static long __mlock_vma_pages_range(stru
68638 if (vma->vm_flags & VM_WRITE)
68639 gup_flags |= FOLL_WRITE;
68640
68641- /* We don't try to access the guard page of a stack vma */
68642- if (stack_guard_page(vma, start)) {
68643- addr += PAGE_SIZE;
68644- nr_pages--;
68645- }
68646-
68647 while (nr_pages > 0) {
68648 int i;
68649
68650@@ -440,7 +428,7 @@ static int do_mlock(unsigned long start,
68651 {
68652 unsigned long nstart, end, tmp;
68653 struct vm_area_struct * vma, * prev;
68654- int error;
68655+ int error = -EINVAL;
68656
68657 len = PAGE_ALIGN(len);
68658 end = start + len;
68659@@ -448,6 +436,9 @@ static int do_mlock(unsigned long start,
68660 return -EINVAL;
68661 if (end == start)
68662 return 0;
68663+ if (end > TASK_SIZE)
68664+ return -EINVAL;
68665+
68666 vma = find_vma_prev(current->mm, start, &prev);
68667 if (!vma || vma->vm_start > start)
68668 return -ENOMEM;
68669@@ -458,6 +449,11 @@ static int do_mlock(unsigned long start,
68670 for (nstart = start ; ; ) {
68671 unsigned int newflags;
68672
68673+#ifdef CONFIG_PAX_SEGMEXEC
68674+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68675+ break;
68676+#endif
68677+
68678 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
68679
68680 newflags = vma->vm_flags | VM_LOCKED;
68681@@ -507,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st
68682 lock_limit >>= PAGE_SHIFT;
68683
68684 /* check against resource limits */
68685+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
68686 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
68687 error = do_mlock(start, len, 1);
68688 up_write(&current->mm->mmap_sem);
68689@@ -528,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long,
68690 static int do_mlockall(int flags)
68691 {
68692 struct vm_area_struct * vma, * prev = NULL;
68693- unsigned int def_flags = 0;
68694
68695 if (flags & MCL_FUTURE)
68696- def_flags = VM_LOCKED;
68697- current->mm->def_flags = def_flags;
68698+ current->mm->def_flags |= VM_LOCKED;
68699+ else
68700+ current->mm->def_flags &= ~VM_LOCKED;
68701 if (flags == MCL_FUTURE)
68702 goto out;
68703
68704 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
68705- unsigned int newflags;
68706+ unsigned long newflags;
68707+
68708+#ifdef CONFIG_PAX_SEGMEXEC
68709+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
68710+ break;
68711+#endif
68712
68713+ BUG_ON(vma->vm_end > TASK_SIZE);
68714 newflags = vma->vm_flags | VM_LOCKED;
68715 if (!(flags & MCL_CURRENT))
68716 newflags &= ~VM_LOCKED;
68717@@ -570,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
68718 lock_limit >>= PAGE_SHIFT;
68719
68720 ret = -ENOMEM;
68721+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
68722 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
68723 capable(CAP_IPC_LOCK))
68724 ret = do_mlockall(flags);
68725diff -urNp linux-2.6.32.43/mm/mmap.c linux-2.6.32.43/mm/mmap.c
68726--- linux-2.6.32.43/mm/mmap.c 2011-03-27 14:31:47.000000000 -0400
68727+++ linux-2.6.32.43/mm/mmap.c 2011-04-17 15:56:46.000000000 -0400
68728@@ -45,6 +45,16 @@
68729 #define arch_rebalance_pgtables(addr, len) (addr)
68730 #endif
68731
68732+static inline void verify_mm_writelocked(struct mm_struct *mm)
68733+{
68734+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
68735+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
68736+ up_read(&mm->mmap_sem);
68737+ BUG();
68738+ }
68739+#endif
68740+}
68741+
68742 static void unmap_region(struct mm_struct *mm,
68743 struct vm_area_struct *vma, struct vm_area_struct *prev,
68744 unsigned long start, unsigned long end);
68745@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc
68746 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68747 *
68748 */
68749-pgprot_t protection_map[16] = {
68750+pgprot_t protection_map[16] __read_only = {
68751 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
68752 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
68753 };
68754
68755 pgprot_t vm_get_page_prot(unsigned long vm_flags)
68756 {
68757- return __pgprot(pgprot_val(protection_map[vm_flags &
68758+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
68759 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
68760 pgprot_val(arch_vm_get_page_prot(vm_flags)));
68761+
68762+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68763+ if (!nx_enabled &&
68764+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
68765+ (vm_flags & (VM_READ | VM_WRITE)))
68766+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
68767+#endif
68768+
68769+ return prot;
68770 }
68771 EXPORT_SYMBOL(vm_get_page_prot);
68772
68773 int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */
68774 int sysctl_overcommit_ratio = 50; /* default is 50% */
68775 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
68776+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
68777 struct percpu_counter vm_committed_as;
68778
68779 /*
68780@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma
68781 struct vm_area_struct *next = vma->vm_next;
68782
68783 might_sleep();
68784+ BUG_ON(vma->vm_mirror);
68785 if (vma->vm_ops && vma->vm_ops->close)
68786 vma->vm_ops->close(vma);
68787 if (vma->vm_file) {
68788@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
68789 * not page aligned -Ram Gupta
68790 */
68791 rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur;
68792+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
68793 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
68794 (mm->end_data - mm->start_data) > rlim)
68795 goto out;
68796@@ -704,6 +726,12 @@ static int
68797 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
68798 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68799 {
68800+
68801+#ifdef CONFIG_PAX_SEGMEXEC
68802+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
68803+ return 0;
68804+#endif
68805+
68806 if (is_mergeable_vma(vma, file, vm_flags) &&
68807 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68808 if (vma->vm_pgoff == vm_pgoff)
68809@@ -723,6 +751,12 @@ static int
68810 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
68811 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
68812 {
68813+
68814+#ifdef CONFIG_PAX_SEGMEXEC
68815+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
68816+ return 0;
68817+#endif
68818+
68819 if (is_mergeable_vma(vma, file, vm_flags) &&
68820 is_mergeable_anon_vma(anon_vma, vma->anon_vma)) {
68821 pgoff_t vm_pglen;
68822@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc
68823 struct vm_area_struct *vma_merge(struct mm_struct *mm,
68824 struct vm_area_struct *prev, unsigned long addr,
68825 unsigned long end, unsigned long vm_flags,
68826- struct anon_vma *anon_vma, struct file *file,
68827+ struct anon_vma *anon_vma, struct file *file,
68828 pgoff_t pgoff, struct mempolicy *policy)
68829 {
68830 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
68831 struct vm_area_struct *area, *next;
68832
68833+#ifdef CONFIG_PAX_SEGMEXEC
68834+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
68835+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
68836+
68837+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
68838+#endif
68839+
68840 /*
68841 * We later require that vma->vm_flags == vm_flags,
68842 * so this tests vma->vm_flags & VM_SPECIAL, too.
68843@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct
68844 if (next && next->vm_end == end) /* cases 6, 7, 8 */
68845 next = next->vm_next;
68846
68847+#ifdef CONFIG_PAX_SEGMEXEC
68848+ if (prev)
68849+ prev_m = pax_find_mirror_vma(prev);
68850+ if (area)
68851+ area_m = pax_find_mirror_vma(area);
68852+ if (next)
68853+ next_m = pax_find_mirror_vma(next);
68854+#endif
68855+
68856 /*
68857 * Can it merge with the predecessor?
68858 */
68859@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct
68860 /* cases 1, 6 */
68861 vma_adjust(prev, prev->vm_start,
68862 next->vm_end, prev->vm_pgoff, NULL);
68863- } else /* cases 2, 5, 7 */
68864+
68865+#ifdef CONFIG_PAX_SEGMEXEC
68866+ if (prev_m)
68867+ vma_adjust(prev_m, prev_m->vm_start,
68868+ next_m->vm_end, prev_m->vm_pgoff, NULL);
68869+#endif
68870+
68871+ } else { /* cases 2, 5, 7 */
68872 vma_adjust(prev, prev->vm_start,
68873 end, prev->vm_pgoff, NULL);
68874+
68875+#ifdef CONFIG_PAX_SEGMEXEC
68876+ if (prev_m)
68877+ vma_adjust(prev_m, prev_m->vm_start,
68878+ end_m, prev_m->vm_pgoff, NULL);
68879+#endif
68880+
68881+ }
68882 return prev;
68883 }
68884
68885@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct
68886 mpol_equal(policy, vma_policy(next)) &&
68887 can_vma_merge_before(next, vm_flags,
68888 anon_vma, file, pgoff+pglen)) {
68889- if (prev && addr < prev->vm_end) /* case 4 */
68890+ if (prev && addr < prev->vm_end) { /* case 4 */
68891 vma_adjust(prev, prev->vm_start,
68892 addr, prev->vm_pgoff, NULL);
68893- else /* cases 3, 8 */
68894+
68895+#ifdef CONFIG_PAX_SEGMEXEC
68896+ if (prev_m)
68897+ vma_adjust(prev_m, prev_m->vm_start,
68898+ addr_m, prev_m->vm_pgoff, NULL);
68899+#endif
68900+
68901+ } else { /* cases 3, 8 */
68902 vma_adjust(area, addr, next->vm_end,
68903 next->vm_pgoff - pglen, NULL);
68904+
68905+#ifdef CONFIG_PAX_SEGMEXEC
68906+ if (area_m)
68907+ vma_adjust(area_m, addr_m, next_m->vm_end,
68908+ next_m->vm_pgoff - pglen, NULL);
68909+#endif
68910+
68911+ }
68912 return area;
68913 }
68914
68915@@ -898,14 +978,11 @@ none:
68916 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
68917 struct file *file, long pages)
68918 {
68919- const unsigned long stack_flags
68920- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
68921-
68922 if (file) {
68923 mm->shared_vm += pages;
68924 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
68925 mm->exec_vm += pages;
68926- } else if (flags & stack_flags)
68927+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
68928 mm->stack_vm += pages;
68929 if (flags & (VM_RESERVED|VM_IO))
68930 mm->reserved_vm += pages;
68931@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file
68932 * (the exception is when the underlying filesystem is noexec
68933 * mounted, in which case we dont add PROT_EXEC.)
68934 */
68935- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
68936+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
68937 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
68938 prot |= PROT_EXEC;
68939
68940@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file
68941 /* Obtain the address to map to. we verify (or select) it and ensure
68942 * that it represents a valid section of the address space.
68943 */
68944- addr = get_unmapped_area(file, addr, len, pgoff, flags);
68945+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
68946 if (addr & ~PAGE_MASK)
68947 return addr;
68948
68949@@ -969,6 +1046,36 @@ unsigned long do_mmap_pgoff(struct file
68950 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
68951 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
68952
68953+#ifdef CONFIG_PAX_MPROTECT
68954+ if (mm->pax_flags & MF_PAX_MPROTECT) {
68955+#ifndef CONFIG_PAX_MPROTECT_COMPAT
68956+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
68957+ gr_log_rwxmmap(file);
68958+
68959+#ifdef CONFIG_PAX_EMUPLT
68960+ vm_flags &= ~VM_EXEC;
68961+#else
68962+ return -EPERM;
68963+#endif
68964+
68965+ }
68966+
68967+ if (!(vm_flags & VM_EXEC))
68968+ vm_flags &= ~VM_MAYEXEC;
68969+#else
68970+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
68971+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
68972+#endif
68973+ else
68974+ vm_flags &= ~VM_MAYWRITE;
68975+ }
68976+#endif
68977+
68978+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
68979+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
68980+ vm_flags &= ~VM_PAGEEXEC;
68981+#endif
68982+
68983 if (flags & MAP_LOCKED)
68984 if (!can_do_mlock())
68985 return -EPERM;
68986@@ -980,6 +1087,7 @@ unsigned long do_mmap_pgoff(struct file
68987 locked += mm->locked_vm;
68988 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
68989 lock_limit >>= PAGE_SHIFT;
68990+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
68991 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
68992 return -EAGAIN;
68993 }
68994@@ -1053,6 +1161,9 @@ unsigned long do_mmap_pgoff(struct file
68995 if (error)
68996 return error;
68997
68998+ if (!gr_acl_handle_mmap(file, prot))
68999+ return -EACCES;
69000+
69001 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
69002 }
69003 EXPORT_SYMBOL(do_mmap_pgoff);
69004@@ -1065,10 +1176,10 @@ EXPORT_SYMBOL(do_mmap_pgoff);
69005 */
69006 int vma_wants_writenotify(struct vm_area_struct *vma)
69007 {
69008- unsigned int vm_flags = vma->vm_flags;
69009+ unsigned long vm_flags = vma->vm_flags;
69010
69011 /* If it was private or non-writable, the write bit is already clear */
69012- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
69013+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
69014 return 0;
69015
69016 /* The backer wishes to know when pages are first written to? */
69017@@ -1117,14 +1228,24 @@ unsigned long mmap_region(struct file *f
69018 unsigned long charged = 0;
69019 struct inode *inode = file ? file->f_path.dentry->d_inode : NULL;
69020
69021+#ifdef CONFIG_PAX_SEGMEXEC
69022+ struct vm_area_struct *vma_m = NULL;
69023+#endif
69024+
69025+ /*
69026+ * mm->mmap_sem is required to protect against another thread
69027+ * changing the mappings in case we sleep.
69028+ */
69029+ verify_mm_writelocked(mm);
69030+
69031 /* Clear old maps */
69032 error = -ENOMEM;
69033-munmap_back:
69034 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69035 if (vma && vma->vm_start < addr + len) {
69036 if (do_munmap(mm, addr, len))
69037 return -ENOMEM;
69038- goto munmap_back;
69039+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69040+ BUG_ON(vma && vma->vm_start < addr + len);
69041 }
69042
69043 /* Check against address space limit. */
69044@@ -1173,6 +1294,16 @@ munmap_back:
69045 goto unacct_error;
69046 }
69047
69048+#ifdef CONFIG_PAX_SEGMEXEC
69049+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
69050+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69051+ if (!vma_m) {
69052+ error = -ENOMEM;
69053+ goto free_vma;
69054+ }
69055+ }
69056+#endif
69057+
69058 vma->vm_mm = mm;
69059 vma->vm_start = addr;
69060 vma->vm_end = addr + len;
69061@@ -1195,6 +1326,19 @@ munmap_back:
69062 error = file->f_op->mmap(file, vma);
69063 if (error)
69064 goto unmap_and_free_vma;
69065+
69066+#ifdef CONFIG_PAX_SEGMEXEC
69067+ if (vma_m && (vm_flags & VM_EXECUTABLE))
69068+ added_exe_file_vma(mm);
69069+#endif
69070+
69071+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
69072+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
69073+ vma->vm_flags |= VM_PAGEEXEC;
69074+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69075+ }
69076+#endif
69077+
69078 if (vm_flags & VM_EXECUTABLE)
69079 added_exe_file_vma(mm);
69080
69081@@ -1218,6 +1362,11 @@ munmap_back:
69082 vma_link(mm, vma, prev, rb_link, rb_parent);
69083 file = vma->vm_file;
69084
69085+#ifdef CONFIG_PAX_SEGMEXEC
69086+ if (vma_m)
69087+ pax_mirror_vma(vma_m, vma);
69088+#endif
69089+
69090 /* Once vma denies write, undo our temporary denial count */
69091 if (correct_wcount)
69092 atomic_inc(&inode->i_writecount);
69093@@ -1226,6 +1375,7 @@ out:
69094
69095 mm->total_vm += len >> PAGE_SHIFT;
69096 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
69097+ track_exec_limit(mm, addr, addr + len, vm_flags);
69098 if (vm_flags & VM_LOCKED) {
69099 /*
69100 * makes pages present; downgrades, drops, reacquires mmap_sem
69101@@ -1248,6 +1398,12 @@ unmap_and_free_vma:
69102 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
69103 charged = 0;
69104 free_vma:
69105+
69106+#ifdef CONFIG_PAX_SEGMEXEC
69107+ if (vma_m)
69108+ kmem_cache_free(vm_area_cachep, vma_m);
69109+#endif
69110+
69111 kmem_cache_free(vm_area_cachep, vma);
69112 unacct_error:
69113 if (charged)
69114@@ -1255,6 +1411,44 @@ unacct_error:
69115 return error;
69116 }
69117
69118+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len)
69119+{
69120+ if (!vma) {
69121+#ifdef CONFIG_STACK_GROWSUP
69122+ if (addr > sysctl_heap_stack_gap)
69123+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
69124+ else
69125+ vma = find_vma(current->mm, 0);
69126+ if (vma && (vma->vm_flags & VM_GROWSUP))
69127+ return false;
69128+#endif
69129+ return true;
69130+ }
69131+
69132+ if (addr + len > vma->vm_start)
69133+ return false;
69134+
69135+ if (vma->vm_flags & VM_GROWSDOWN)
69136+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
69137+#ifdef CONFIG_STACK_GROWSUP
69138+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
69139+ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap;
69140+#endif
69141+
69142+ return true;
69143+}
69144+
69145+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len)
69146+{
69147+ if (vma->vm_start < len)
69148+ return -ENOMEM;
69149+ if (!(vma->vm_flags & VM_GROWSDOWN))
69150+ return vma->vm_start - len;
69151+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
69152+ return vma->vm_start - len - sysctl_heap_stack_gap;
69153+ return -ENOMEM;
69154+}
69155+
69156 /* Get an address range which is currently unmapped.
69157 * For shmat() with addr=0.
69158 *
69159@@ -1281,18 +1475,23 @@ arch_get_unmapped_area(struct file *filp
69160 if (flags & MAP_FIXED)
69161 return addr;
69162
69163+#ifdef CONFIG_PAX_RANDMMAP
69164+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69165+#endif
69166+
69167 if (addr) {
69168 addr = PAGE_ALIGN(addr);
69169- vma = find_vma(mm, addr);
69170- if (TASK_SIZE - len >= addr &&
69171- (!vma || addr + len <= vma->vm_start))
69172- return addr;
69173+ if (TASK_SIZE - len >= addr) {
69174+ vma = find_vma(mm, addr);
69175+ if (check_heap_stack_gap(vma, addr, len))
69176+ return addr;
69177+ }
69178 }
69179 if (len > mm->cached_hole_size) {
69180- start_addr = addr = mm->free_area_cache;
69181+ start_addr = addr = mm->free_area_cache;
69182 } else {
69183- start_addr = addr = TASK_UNMAPPED_BASE;
69184- mm->cached_hole_size = 0;
69185+ start_addr = addr = mm->mmap_base;
69186+ mm->cached_hole_size = 0;
69187 }
69188
69189 full_search:
69190@@ -1303,34 +1502,40 @@ full_search:
69191 * Start a new search - just in case we missed
69192 * some holes.
69193 */
69194- if (start_addr != TASK_UNMAPPED_BASE) {
69195- addr = TASK_UNMAPPED_BASE;
69196- start_addr = addr;
69197+ if (start_addr != mm->mmap_base) {
69198+ start_addr = addr = mm->mmap_base;
69199 mm->cached_hole_size = 0;
69200 goto full_search;
69201 }
69202 return -ENOMEM;
69203 }
69204- if (!vma || addr + len <= vma->vm_start) {
69205- /*
69206- * Remember the place where we stopped the search:
69207- */
69208- mm->free_area_cache = addr + len;
69209- return addr;
69210- }
69211+ if (check_heap_stack_gap(vma, addr, len))
69212+ break;
69213 if (addr + mm->cached_hole_size < vma->vm_start)
69214 mm->cached_hole_size = vma->vm_start - addr;
69215 addr = vma->vm_end;
69216 }
69217+
69218+ /*
69219+ * Remember the place where we stopped the search:
69220+ */
69221+ mm->free_area_cache = addr + len;
69222+ return addr;
69223 }
69224 #endif
69225
69226 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
69227 {
69228+
69229+#ifdef CONFIG_PAX_SEGMEXEC
69230+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69231+ return;
69232+#endif
69233+
69234 /*
69235 * Is this a new hole at the lowest possible address?
69236 */
69237- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) {
69238+ if (addr >= mm->mmap_base && addr < mm->free_area_cache) {
69239 mm->free_area_cache = addr;
69240 mm->cached_hole_size = ~0UL;
69241 }
69242@@ -1348,7 +1553,7 @@ arch_get_unmapped_area_topdown(struct fi
69243 {
69244 struct vm_area_struct *vma;
69245 struct mm_struct *mm = current->mm;
69246- unsigned long addr = addr0;
69247+ unsigned long base = mm->mmap_base, addr = addr0;
69248
69249 /* requested length too big for entire address space */
69250 if (len > TASK_SIZE)
69251@@ -1357,13 +1562,18 @@ arch_get_unmapped_area_topdown(struct fi
69252 if (flags & MAP_FIXED)
69253 return addr;
69254
69255+#ifdef CONFIG_PAX_RANDMMAP
69256+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
69257+#endif
69258+
69259 /* requesting a specific address */
69260 if (addr) {
69261 addr = PAGE_ALIGN(addr);
69262- vma = find_vma(mm, addr);
69263- if (TASK_SIZE - len >= addr &&
69264- (!vma || addr + len <= vma->vm_start))
69265- return addr;
69266+ if (TASK_SIZE - len >= addr) {
69267+ vma = find_vma(mm, addr);
69268+ if (check_heap_stack_gap(vma, addr, len))
69269+ return addr;
69270+ }
69271 }
69272
69273 /* check if free_area_cache is useful for us */
69274@@ -1378,7 +1588,7 @@ arch_get_unmapped_area_topdown(struct fi
69275 /* make sure it can fit in the remaining address space */
69276 if (addr > len) {
69277 vma = find_vma(mm, addr-len);
69278- if (!vma || addr <= vma->vm_start)
69279+ if (check_heap_stack_gap(vma, addr - len, len))
69280 /* remember the address as a hint for next time */
69281 return (mm->free_area_cache = addr-len);
69282 }
69283@@ -1395,7 +1605,7 @@ arch_get_unmapped_area_topdown(struct fi
69284 * return with success:
69285 */
69286 vma = find_vma(mm, addr);
69287- if (!vma || addr+len <= vma->vm_start)
69288+ if (check_heap_stack_gap(vma, addr, len))
69289 /* remember the address as a hint for next time */
69290 return (mm->free_area_cache = addr);
69291
69292@@ -1404,8 +1614,8 @@ arch_get_unmapped_area_topdown(struct fi
69293 mm->cached_hole_size = vma->vm_start - addr;
69294
69295 /* try just below the current vma->vm_start */
69296- addr = vma->vm_start-len;
69297- } while (len < vma->vm_start);
69298+ addr = skip_heap_stack_gap(vma, len);
69299+ } while (!IS_ERR_VALUE(addr));
69300
69301 bottomup:
69302 /*
69303@@ -1414,13 +1624,21 @@ bottomup:
69304 * can happen with large stack limits and large mmap()
69305 * allocations.
69306 */
69307+ mm->mmap_base = TASK_UNMAPPED_BASE;
69308+
69309+#ifdef CONFIG_PAX_RANDMMAP
69310+ if (mm->pax_flags & MF_PAX_RANDMMAP)
69311+ mm->mmap_base += mm->delta_mmap;
69312+#endif
69313+
69314+ mm->free_area_cache = mm->mmap_base;
69315 mm->cached_hole_size = ~0UL;
69316- mm->free_area_cache = TASK_UNMAPPED_BASE;
69317 addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
69318 /*
69319 * Restore the topdown base:
69320 */
69321- mm->free_area_cache = mm->mmap_base;
69322+ mm->mmap_base = base;
69323+ mm->free_area_cache = base;
69324 mm->cached_hole_size = ~0UL;
69325
69326 return addr;
69327@@ -1429,6 +1647,12 @@ bottomup:
69328
69329 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
69330 {
69331+
69332+#ifdef CONFIG_PAX_SEGMEXEC
69333+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
69334+ return;
69335+#endif
69336+
69337 /*
69338 * Is this a new hole at the highest possible address?
69339 */
69340@@ -1436,8 +1660,10 @@ void arch_unmap_area_topdown(struct mm_s
69341 mm->free_area_cache = addr;
69342
69343 /* dont allow allocations above current base */
69344- if (mm->free_area_cache > mm->mmap_base)
69345+ if (mm->free_area_cache > mm->mmap_base) {
69346 mm->free_area_cache = mm->mmap_base;
69347+ mm->cached_hole_size = ~0UL;
69348+ }
69349 }
69350
69351 unsigned long
69352@@ -1545,6 +1771,27 @@ out:
69353 return prev ? prev->vm_next : vma;
69354 }
69355
69356+#ifdef CONFIG_PAX_SEGMEXEC
69357+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
69358+{
69359+ struct vm_area_struct *vma_m;
69360+
69361+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
69362+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
69363+ BUG_ON(vma->vm_mirror);
69364+ return NULL;
69365+ }
69366+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
69367+ vma_m = vma->vm_mirror;
69368+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
69369+ BUG_ON(vma->vm_file != vma_m->vm_file);
69370+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
69371+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff || vma->anon_vma != vma_m->anon_vma);
69372+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED | VM_RESERVED));
69373+ return vma_m;
69374+}
69375+#endif
69376+
69377 /*
69378 * Verify that the stack growth is acceptable and
69379 * update accounting. This is shared with both the
69380@@ -1561,6 +1808,7 @@ static int acct_stack_growth(struct vm_a
69381 return -ENOMEM;
69382
69383 /* Stack limit test */
69384+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
69385 if (size > rlim[RLIMIT_STACK].rlim_cur)
69386 return -ENOMEM;
69387
69388@@ -1570,6 +1818,7 @@ static int acct_stack_growth(struct vm_a
69389 unsigned long limit;
69390 locked = mm->locked_vm + grow;
69391 limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
69392+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
69393 if (locked > limit && !capable(CAP_IPC_LOCK))
69394 return -ENOMEM;
69395 }
69396@@ -1600,37 +1849,48 @@ static int acct_stack_growth(struct vm_a
69397 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
69398 * vma is the last one with address > vma->vm_end. Have to extend vma.
69399 */
69400+#ifndef CONFIG_IA64
69401+static
69402+#endif
69403 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
69404 {
69405 int error;
69406+ bool locknext;
69407
69408 if (!(vma->vm_flags & VM_GROWSUP))
69409 return -EFAULT;
69410
69411+ /* Also guard against wrapping around to address 0. */
69412+ if (address < PAGE_ALIGN(address+1))
69413+ address = PAGE_ALIGN(address+1);
69414+ else
69415+ return -ENOMEM;
69416+
69417 /*
69418 * We must make sure the anon_vma is allocated
69419 * so that the anon_vma locking is not a noop.
69420 */
69421 if (unlikely(anon_vma_prepare(vma)))
69422 return -ENOMEM;
69423+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
69424+ if (locknext && anon_vma_prepare(vma->vm_next))
69425+ return -ENOMEM;
69426 anon_vma_lock(vma);
69427+ if (locknext)
69428+ anon_vma_lock(vma->vm_next);
69429
69430 /*
69431 * vma->vm_start/vm_end cannot change under us because the caller
69432 * is required to hold the mmap_sem in read mode. We need the
69433- * anon_vma lock to serialize against concurrent expand_stacks.
69434- * Also guard against wrapping around to address 0.
69435+ * anon_vma locks to serialize against concurrent expand_stacks
69436+ * and expand_upwards.
69437 */
69438- if (address < PAGE_ALIGN(address+4))
69439- address = PAGE_ALIGN(address+4);
69440- else {
69441- anon_vma_unlock(vma);
69442- return -ENOMEM;
69443- }
69444 error = 0;
69445
69446 /* Somebody else might have raced and expanded it already */
69447- if (address > vma->vm_end) {
69448+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
69449+ error = -ENOMEM;
69450+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
69451 unsigned long size, grow;
69452
69453 size = address - vma->vm_start;
69454@@ -1640,6 +1900,8 @@ int expand_upwards(struct vm_area_struct
69455 if (!error)
69456 vma->vm_end = address;
69457 }
69458+ if (locknext)
69459+ anon_vma_unlock(vma->vm_next);
69460 anon_vma_unlock(vma);
69461 return error;
69462 }
69463@@ -1652,6 +1914,8 @@ static int expand_downwards(struct vm_ar
69464 unsigned long address)
69465 {
69466 int error;
69467+ bool lockprev = false;
69468+ struct vm_area_struct *prev;
69469
69470 /*
69471 * We must make sure the anon_vma is allocated
69472@@ -1665,6 +1929,15 @@ static int expand_downwards(struct vm_ar
69473 if (error)
69474 return error;
69475
69476+ prev = vma->vm_prev;
69477+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
69478+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
69479+#endif
69480+ if (lockprev && anon_vma_prepare(prev))
69481+ return -ENOMEM;
69482+ if (lockprev)
69483+ anon_vma_lock(prev);
69484+
69485 anon_vma_lock(vma);
69486
69487 /*
69488@@ -1674,9 +1947,17 @@ static int expand_downwards(struct vm_ar
69489 */
69490
69491 /* Somebody else might have raced and expanded it already */
69492- if (address < vma->vm_start) {
69493+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
69494+ error = -ENOMEM;
69495+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
69496 unsigned long size, grow;
69497
69498+#ifdef CONFIG_PAX_SEGMEXEC
69499+ struct vm_area_struct *vma_m;
69500+
69501+ vma_m = pax_find_mirror_vma(vma);
69502+#endif
69503+
69504 size = vma->vm_end - address;
69505 grow = (vma->vm_start - address) >> PAGE_SHIFT;
69506
69507@@ -1684,9 +1965,20 @@ static int expand_downwards(struct vm_ar
69508 if (!error) {
69509 vma->vm_start = address;
69510 vma->vm_pgoff -= grow;
69511+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
69512+
69513+#ifdef CONFIG_PAX_SEGMEXEC
69514+ if (vma_m) {
69515+ vma_m->vm_start -= grow << PAGE_SHIFT;
69516+ vma_m->vm_pgoff -= grow;
69517+ }
69518+#endif
69519+
69520 }
69521 }
69522 anon_vma_unlock(vma);
69523+ if (lockprev)
69524+ anon_vma_unlock(prev);
69525 return error;
69526 }
69527
69528@@ -1762,6 +2054,13 @@ static void remove_vma_list(struct mm_st
69529 do {
69530 long nrpages = vma_pages(vma);
69531
69532+#ifdef CONFIG_PAX_SEGMEXEC
69533+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
69534+ vma = remove_vma(vma);
69535+ continue;
69536+ }
69537+#endif
69538+
69539 mm->total_vm -= nrpages;
69540 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
69541 vma = remove_vma(vma);
69542@@ -1807,6 +2106,16 @@ detach_vmas_to_be_unmapped(struct mm_str
69543 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
69544 vma->vm_prev = NULL;
69545 do {
69546+
69547+#ifdef CONFIG_PAX_SEGMEXEC
69548+ if (vma->vm_mirror) {
69549+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
69550+ vma->vm_mirror->vm_mirror = NULL;
69551+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
69552+ vma->vm_mirror = NULL;
69553+ }
69554+#endif
69555+
69556 rb_erase(&vma->vm_rb, &mm->mm_rb);
69557 mm->map_count--;
69558 tail_vma = vma;
69559@@ -1834,10 +2143,25 @@ int split_vma(struct mm_struct * mm, str
69560 struct mempolicy *pol;
69561 struct vm_area_struct *new;
69562
69563+#ifdef CONFIG_PAX_SEGMEXEC
69564+ struct vm_area_struct *vma_m, *new_m = NULL;
69565+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
69566+#endif
69567+
69568 if (is_vm_hugetlb_page(vma) && (addr &
69569 ~(huge_page_mask(hstate_vma(vma)))))
69570 return -EINVAL;
69571
69572+#ifdef CONFIG_PAX_SEGMEXEC
69573+ vma_m = pax_find_mirror_vma(vma);
69574+
69575+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
69576+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
69577+ if (mm->map_count >= sysctl_max_map_count-1)
69578+ return -ENOMEM;
69579+ } else
69580+#endif
69581+
69582 if (mm->map_count >= sysctl_max_map_count)
69583 return -ENOMEM;
69584
69585@@ -1845,6 +2169,16 @@ int split_vma(struct mm_struct * mm, str
69586 if (!new)
69587 return -ENOMEM;
69588
69589+#ifdef CONFIG_PAX_SEGMEXEC
69590+ if (vma_m) {
69591+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
69592+ if (!new_m) {
69593+ kmem_cache_free(vm_area_cachep, new);
69594+ return -ENOMEM;
69595+ }
69596+ }
69597+#endif
69598+
69599 /* most fields are the same, copy all, and then fixup */
69600 *new = *vma;
69601
69602@@ -1855,8 +2189,29 @@ int split_vma(struct mm_struct * mm, str
69603 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
69604 }
69605
69606+#ifdef CONFIG_PAX_SEGMEXEC
69607+ if (vma_m) {
69608+ *new_m = *vma_m;
69609+ new_m->vm_mirror = new;
69610+ new->vm_mirror = new_m;
69611+
69612+ if (new_below)
69613+ new_m->vm_end = addr_m;
69614+ else {
69615+ new_m->vm_start = addr_m;
69616+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
69617+ }
69618+ }
69619+#endif
69620+
69621 pol = mpol_dup(vma_policy(vma));
69622 if (IS_ERR(pol)) {
69623+
69624+#ifdef CONFIG_PAX_SEGMEXEC
69625+ if (new_m)
69626+ kmem_cache_free(vm_area_cachep, new_m);
69627+#endif
69628+
69629 kmem_cache_free(vm_area_cachep, new);
69630 return PTR_ERR(pol);
69631 }
69632@@ -1877,6 +2232,28 @@ int split_vma(struct mm_struct * mm, str
69633 else
69634 vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
69635
69636+#ifdef CONFIG_PAX_SEGMEXEC
69637+ if (vma_m) {
69638+ mpol_get(pol);
69639+ vma_set_policy(new_m, pol);
69640+
69641+ if (new_m->vm_file) {
69642+ get_file(new_m->vm_file);
69643+ if (vma_m->vm_flags & VM_EXECUTABLE)
69644+ added_exe_file_vma(mm);
69645+ }
69646+
69647+ if (new_m->vm_ops && new_m->vm_ops->open)
69648+ new_m->vm_ops->open(new_m);
69649+
69650+ if (new_below)
69651+ vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
69652+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
69653+ else
69654+ vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
69655+ }
69656+#endif
69657+
69658 return 0;
69659 }
69660
69661@@ -1885,11 +2262,30 @@ int split_vma(struct mm_struct * mm, str
69662 * work. This now handles partial unmappings.
69663 * Jeremy Fitzhardinge <jeremy@goop.org>
69664 */
69665+#ifdef CONFIG_PAX_SEGMEXEC
69666+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69667+{
69668+ int ret = __do_munmap(mm, start, len);
69669+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
69670+ return ret;
69671+
69672+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
69673+}
69674+
69675+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69676+#else
69677 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
69678+#endif
69679 {
69680 unsigned long end;
69681 struct vm_area_struct *vma, *prev, *last;
69682
69683+ /*
69684+ * mm->mmap_sem is required to protect against another thread
69685+ * changing the mappings in case we sleep.
69686+ */
69687+ verify_mm_writelocked(mm);
69688+
69689 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
69690 return -EINVAL;
69691
69692@@ -1953,6 +2349,8 @@ int do_munmap(struct mm_struct *mm, unsi
69693 /* Fix up all other VM information */
69694 remove_vma_list(mm, vma);
69695
69696+ track_exec_limit(mm, start, end, 0UL);
69697+
69698 return 0;
69699 }
69700
69701@@ -1965,22 +2363,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a
69702
69703 profile_munmap(addr);
69704
69705+#ifdef CONFIG_PAX_SEGMEXEC
69706+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
69707+ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
69708+ return -EINVAL;
69709+#endif
69710+
69711 down_write(&mm->mmap_sem);
69712 ret = do_munmap(mm, addr, len);
69713 up_write(&mm->mmap_sem);
69714 return ret;
69715 }
69716
69717-static inline void verify_mm_writelocked(struct mm_struct *mm)
69718-{
69719-#ifdef CONFIG_DEBUG_VM
69720- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
69721- WARN_ON(1);
69722- up_read(&mm->mmap_sem);
69723- }
69724-#endif
69725-}
69726-
69727 /*
69728 * this is really a simplified "do_mmap". it only handles
69729 * anonymous maps. eventually we may be able to do some
69730@@ -1994,6 +2388,7 @@ unsigned long do_brk(unsigned long addr,
69731 struct rb_node ** rb_link, * rb_parent;
69732 pgoff_t pgoff = addr >> PAGE_SHIFT;
69733 int error;
69734+ unsigned long charged;
69735
69736 len = PAGE_ALIGN(len);
69737 if (!len)
69738@@ -2005,16 +2400,30 @@ unsigned long do_brk(unsigned long addr,
69739
69740 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
69741
69742+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
69743+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
69744+ flags &= ~VM_EXEC;
69745+
69746+#ifdef CONFIG_PAX_MPROTECT
69747+ if (mm->pax_flags & MF_PAX_MPROTECT)
69748+ flags &= ~VM_MAYEXEC;
69749+#endif
69750+
69751+ }
69752+#endif
69753+
69754 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
69755 if (error & ~PAGE_MASK)
69756 return error;
69757
69758+ charged = len >> PAGE_SHIFT;
69759+
69760 /*
69761 * mlock MCL_FUTURE?
69762 */
69763 if (mm->def_flags & VM_LOCKED) {
69764 unsigned long locked, lock_limit;
69765- locked = len >> PAGE_SHIFT;
69766+ locked = charged;
69767 locked += mm->locked_vm;
69768 lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
69769 lock_limit >>= PAGE_SHIFT;
69770@@ -2031,22 +2440,22 @@ unsigned long do_brk(unsigned long addr,
69771 /*
69772 * Clear old maps. this also does some error checking for us
69773 */
69774- munmap_back:
69775 vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69776 if (vma && vma->vm_start < addr + len) {
69777 if (do_munmap(mm, addr, len))
69778 return -ENOMEM;
69779- goto munmap_back;
69780+ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent);
69781+ BUG_ON(vma && vma->vm_start < addr + len);
69782 }
69783
69784 /* Check against address space limits *after* clearing old maps... */
69785- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
69786+ if (!may_expand_vm(mm, charged))
69787 return -ENOMEM;
69788
69789 if (mm->map_count > sysctl_max_map_count)
69790 return -ENOMEM;
69791
69792- if (security_vm_enough_memory(len >> PAGE_SHIFT))
69793+ if (security_vm_enough_memory(charged))
69794 return -ENOMEM;
69795
69796 /* Can we just expand an old private anonymous mapping? */
69797@@ -2060,7 +2469,7 @@ unsigned long do_brk(unsigned long addr,
69798 */
69799 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69800 if (!vma) {
69801- vm_unacct_memory(len >> PAGE_SHIFT);
69802+ vm_unacct_memory(charged);
69803 return -ENOMEM;
69804 }
69805
69806@@ -2072,11 +2481,12 @@ unsigned long do_brk(unsigned long addr,
69807 vma->vm_page_prot = vm_get_page_prot(flags);
69808 vma_link(mm, vma, prev, rb_link, rb_parent);
69809 out:
69810- mm->total_vm += len >> PAGE_SHIFT;
69811+ mm->total_vm += charged;
69812 if (flags & VM_LOCKED) {
69813 if (!mlock_vma_pages_range(vma, addr, addr + len))
69814- mm->locked_vm += (len >> PAGE_SHIFT);
69815+ mm->locked_vm += charged;
69816 }
69817+ track_exec_limit(mm, addr, addr + len, flags);
69818 return addr;
69819 }
69820
69821@@ -2123,8 +2533,10 @@ void exit_mmap(struct mm_struct *mm)
69822 * Walk the list again, actually closing and freeing it,
69823 * with preemption enabled, without holding any MM locks.
69824 */
69825- while (vma)
69826+ while (vma) {
69827+ vma->vm_mirror = NULL;
69828 vma = remove_vma(vma);
69829+ }
69830
69831 BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
69832 }
69833@@ -2138,6 +2550,10 @@ int insert_vm_struct(struct mm_struct *
69834 struct vm_area_struct * __vma, * prev;
69835 struct rb_node ** rb_link, * rb_parent;
69836
69837+#ifdef CONFIG_PAX_SEGMEXEC
69838+ struct vm_area_struct *vma_m = NULL;
69839+#endif
69840+
69841 /*
69842 * The vm_pgoff of a purely anonymous vma should be irrelevant
69843 * until its first write fault, when page's anon_vma and index
69844@@ -2160,7 +2576,22 @@ int insert_vm_struct(struct mm_struct *
69845 if ((vma->vm_flags & VM_ACCOUNT) &&
69846 security_vm_enough_memory_mm(mm, vma_pages(vma)))
69847 return -ENOMEM;
69848+
69849+#ifdef CONFIG_PAX_SEGMEXEC
69850+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
69851+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
69852+ if (!vma_m)
69853+ return -ENOMEM;
69854+ }
69855+#endif
69856+
69857 vma_link(mm, vma, prev, rb_link, rb_parent);
69858+
69859+#ifdef CONFIG_PAX_SEGMEXEC
69860+ if (vma_m)
69861+ pax_mirror_vma(vma_m, vma);
69862+#endif
69863+
69864 return 0;
69865 }
69866
69867@@ -2178,6 +2609,8 @@ struct vm_area_struct *copy_vma(struct v
69868 struct rb_node **rb_link, *rb_parent;
69869 struct mempolicy *pol;
69870
69871+ BUG_ON(vma->vm_mirror);
69872+
69873 /*
69874 * If anonymous vma has not yet been faulted, update new pgoff
69875 * to match new location, to increase its chance of merging.
69876@@ -2221,6 +2654,35 @@ struct vm_area_struct *copy_vma(struct v
69877 return new_vma;
69878 }
69879
69880+#ifdef CONFIG_PAX_SEGMEXEC
69881+void pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
69882+{
69883+ struct vm_area_struct *prev_m;
69884+ struct rb_node **rb_link_m, *rb_parent_m;
69885+ struct mempolicy *pol_m;
69886+
69887+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
69888+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
69889+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
69890+ *vma_m = *vma;
69891+ pol_m = vma_policy(vma_m);
69892+ mpol_get(pol_m);
69893+ vma_set_policy(vma_m, pol_m);
69894+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
69895+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
69896+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
69897+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
69898+ if (vma_m->vm_file)
69899+ get_file(vma_m->vm_file);
69900+ if (vma_m->vm_ops && vma_m->vm_ops->open)
69901+ vma_m->vm_ops->open(vma_m);
69902+ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m);
69903+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
69904+ vma_m->vm_mirror = vma;
69905+ vma->vm_mirror = vma_m;
69906+}
69907+#endif
69908+
69909 /*
69910 * Return true if the calling process may expand its vm space by the passed
69911 * number of pages
69912@@ -2231,7 +2693,7 @@ int may_expand_vm(struct mm_struct *mm,
69913 unsigned long lim;
69914
69915 lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
69916-
69917+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
69918 if (cur + npages > lim)
69919 return 0;
69920 return 1;
69921@@ -2301,6 +2763,22 @@ int install_special_mapping(struct mm_st
69922 vma->vm_start = addr;
69923 vma->vm_end = addr + len;
69924
69925+#ifdef CONFIG_PAX_MPROTECT
69926+ if (mm->pax_flags & MF_PAX_MPROTECT) {
69927+#ifndef CONFIG_PAX_MPROTECT_COMPAT
69928+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
69929+ return -EPERM;
69930+ if (!(vm_flags & VM_EXEC))
69931+ vm_flags &= ~VM_MAYEXEC;
69932+#else
69933+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
69934+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
69935+#endif
69936+ else
69937+ vm_flags &= ~VM_MAYWRITE;
69938+ }
69939+#endif
69940+
69941 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
69942 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
69943
69944diff -urNp linux-2.6.32.43/mm/mprotect.c linux-2.6.32.43/mm/mprotect.c
69945--- linux-2.6.32.43/mm/mprotect.c 2011-03-27 14:31:47.000000000 -0400
69946+++ linux-2.6.32.43/mm/mprotect.c 2011-04-17 15:56:46.000000000 -0400
69947@@ -24,10 +24,16 @@
69948 #include <linux/mmu_notifier.h>
69949 #include <linux/migrate.h>
69950 #include <linux/perf_event.h>
69951+
69952+#ifdef CONFIG_PAX_MPROTECT
69953+#include <linux/elf.h>
69954+#endif
69955+
69956 #include <asm/uaccess.h>
69957 #include <asm/pgtable.h>
69958 #include <asm/cacheflush.h>
69959 #include <asm/tlbflush.h>
69960+#include <asm/mmu_context.h>
69961
69962 #ifndef pgprot_modify
69963 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
69964@@ -132,6 +138,48 @@ static void change_protection(struct vm_
69965 flush_tlb_range(vma, start, end);
69966 }
69967
69968+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
69969+/* called while holding the mmap semaphor for writing except stack expansion */
69970+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
69971+{
69972+ unsigned long oldlimit, newlimit = 0UL;
69973+
69974+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || nx_enabled)
69975+ return;
69976+
69977+ spin_lock(&mm->page_table_lock);
69978+ oldlimit = mm->context.user_cs_limit;
69979+ if ((prot & VM_EXEC) && oldlimit < end)
69980+ /* USER_CS limit moved up */
69981+ newlimit = end;
69982+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
69983+ /* USER_CS limit moved down */
69984+ newlimit = start;
69985+
69986+ if (newlimit) {
69987+ mm->context.user_cs_limit = newlimit;
69988+
69989+#ifdef CONFIG_SMP
69990+ wmb();
69991+ cpus_clear(mm->context.cpu_user_cs_mask);
69992+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
69993+#endif
69994+
69995+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
69996+ }
69997+ spin_unlock(&mm->page_table_lock);
69998+ if (newlimit == end) {
69999+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
70000+
70001+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
70002+ if (is_vm_hugetlb_page(vma))
70003+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
70004+ else
70005+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma));
70006+ }
70007+}
70008+#endif
70009+
70010 int
70011 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
70012 unsigned long start, unsigned long end, unsigned long newflags)
70013@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm
70014 int error;
70015 int dirty_accountable = 0;
70016
70017+#ifdef CONFIG_PAX_SEGMEXEC
70018+ struct vm_area_struct *vma_m = NULL;
70019+ unsigned long start_m, end_m;
70020+
70021+ start_m = start + SEGMEXEC_TASK_SIZE;
70022+ end_m = end + SEGMEXEC_TASK_SIZE;
70023+#endif
70024+
70025 if (newflags == oldflags) {
70026 *pprev = vma;
70027 return 0;
70028 }
70029
70030+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
70031+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
70032+
70033+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
70034+ return -ENOMEM;
70035+
70036+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
70037+ return -ENOMEM;
70038+ }
70039+
70040 /*
70041 * If we make a private mapping writable we increase our commit;
70042 * but (without finer accounting) cannot reduce our commit if we
70043@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm
70044 }
70045 }
70046
70047+#ifdef CONFIG_PAX_SEGMEXEC
70048+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
70049+ if (start != vma->vm_start) {
70050+ error = split_vma(mm, vma, start, 1);
70051+ if (error)
70052+ goto fail;
70053+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
70054+ *pprev = (*pprev)->vm_next;
70055+ }
70056+
70057+ if (end != vma->vm_end) {
70058+ error = split_vma(mm, vma, end, 0);
70059+ if (error)
70060+ goto fail;
70061+ }
70062+
70063+ if (pax_find_mirror_vma(vma)) {
70064+ error = __do_munmap(mm, start_m, end_m - start_m);
70065+ if (error)
70066+ goto fail;
70067+ } else {
70068+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
70069+ if (!vma_m) {
70070+ error = -ENOMEM;
70071+ goto fail;
70072+ }
70073+ vma->vm_flags = newflags;
70074+ pax_mirror_vma(vma_m, vma);
70075+ }
70076+ }
70077+#endif
70078+
70079 /*
70080 * First try to merge with previous and/or next vma.
70081 */
70082@@ -195,9 +293,21 @@ success:
70083 * vm_flags and vm_page_prot are protected by the mmap_sem
70084 * held in write mode.
70085 */
70086+
70087+#ifdef CONFIG_PAX_SEGMEXEC
70088+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
70089+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
70090+#endif
70091+
70092 vma->vm_flags = newflags;
70093+
70094+#ifdef CONFIG_PAX_MPROTECT
70095+ if (mm->binfmt && mm->binfmt->handle_mprotect)
70096+ mm->binfmt->handle_mprotect(vma, newflags);
70097+#endif
70098+
70099 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
70100- vm_get_page_prot(newflags));
70101+ vm_get_page_prot(vma->vm_flags));
70102
70103 if (vma_wants_writenotify(vma)) {
70104 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
70105@@ -239,6 +349,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70106 end = start + len;
70107 if (end <= start)
70108 return -ENOMEM;
70109+
70110+#ifdef CONFIG_PAX_SEGMEXEC
70111+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
70112+ if (end > SEGMEXEC_TASK_SIZE)
70113+ return -EINVAL;
70114+ } else
70115+#endif
70116+
70117+ if (end > TASK_SIZE)
70118+ return -EINVAL;
70119+
70120 if (!arch_validate_prot(prot))
70121 return -EINVAL;
70122
70123@@ -246,7 +367,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70124 /*
70125 * Does the application expect PROT_READ to imply PROT_EXEC:
70126 */
70127- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
70128+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
70129 prot |= PROT_EXEC;
70130
70131 vm_flags = calc_vm_prot_bits(prot);
70132@@ -278,6 +399,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70133 if (start > vma->vm_start)
70134 prev = vma;
70135
70136+#ifdef CONFIG_PAX_MPROTECT
70137+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
70138+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
70139+#endif
70140+
70141 for (nstart = start ; ; ) {
70142 unsigned long newflags;
70143
70144@@ -287,6 +413,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70145
70146 /* newflags >> 4 shift VM_MAY% in place of VM_% */
70147 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
70148+ if (prot & (PROT_WRITE | PROT_EXEC))
70149+ gr_log_rwxmprotect(vma->vm_file);
70150+
70151+ error = -EACCES;
70152+ goto out;
70153+ }
70154+
70155+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
70156 error = -EACCES;
70157 goto out;
70158 }
70159@@ -301,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long,
70160 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
70161 if (error)
70162 goto out;
70163+
70164+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
70165+
70166 nstart = tmp;
70167
70168 if (nstart < prev->vm_end)
70169diff -urNp linux-2.6.32.43/mm/mremap.c linux-2.6.32.43/mm/mremap.c
70170--- linux-2.6.32.43/mm/mremap.c 2011-04-17 17:00:52.000000000 -0400
70171+++ linux-2.6.32.43/mm/mremap.c 2011-04-17 17:03:58.000000000 -0400
70172@@ -112,6 +112,12 @@ static void move_ptes(struct vm_area_str
70173 continue;
70174 pte = ptep_clear_flush(vma, old_addr, old_pte);
70175 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
70176+
70177+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70178+ if (!nx_enabled && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
70179+ pte = pte_exprotect(pte);
70180+#endif
70181+
70182 set_pte_at(mm, new_addr, new_pte, pte);
70183 }
70184
70185@@ -271,6 +277,11 @@ static struct vm_area_struct *vma_to_res
70186 if (is_vm_hugetlb_page(vma))
70187 goto Einval;
70188
70189+#ifdef CONFIG_PAX_SEGMEXEC
70190+ if (pax_find_mirror_vma(vma))
70191+ goto Einval;
70192+#endif
70193+
70194 /* We can't remap across vm area boundaries */
70195 if (old_len > vma->vm_end - addr)
70196 goto Efault;
70197@@ -327,20 +338,25 @@ static unsigned long mremap_to(unsigned
70198 unsigned long ret = -EINVAL;
70199 unsigned long charged = 0;
70200 unsigned long map_flags;
70201+ unsigned long pax_task_size = TASK_SIZE;
70202
70203 if (new_addr & ~PAGE_MASK)
70204 goto out;
70205
70206- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
70207+#ifdef CONFIG_PAX_SEGMEXEC
70208+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70209+ pax_task_size = SEGMEXEC_TASK_SIZE;
70210+#endif
70211+
70212+ pax_task_size -= PAGE_SIZE;
70213+
70214+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
70215 goto out;
70216
70217 /* Check if the location we're moving into overlaps the
70218 * old location at all, and fail if it does.
70219 */
70220- if ((new_addr <= addr) && (new_addr+new_len) > addr)
70221- goto out;
70222-
70223- if ((addr <= new_addr) && (addr+old_len) > new_addr)
70224+ if (addr + old_len > new_addr && new_addr + new_len > addr)
70225 goto out;
70226
70227 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70228@@ -412,6 +428,7 @@ unsigned long do_mremap(unsigned long ad
70229 struct vm_area_struct *vma;
70230 unsigned long ret = -EINVAL;
70231 unsigned long charged = 0;
70232+ unsigned long pax_task_size = TASK_SIZE;
70233
70234 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
70235 goto out;
70236@@ -430,6 +447,17 @@ unsigned long do_mremap(unsigned long ad
70237 if (!new_len)
70238 goto out;
70239
70240+#ifdef CONFIG_PAX_SEGMEXEC
70241+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
70242+ pax_task_size = SEGMEXEC_TASK_SIZE;
70243+#endif
70244+
70245+ pax_task_size -= PAGE_SIZE;
70246+
70247+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
70248+ old_len > pax_task_size || addr > pax_task_size-old_len)
70249+ goto out;
70250+
70251 if (flags & MREMAP_FIXED) {
70252 if (flags & MREMAP_MAYMOVE)
70253 ret = mremap_to(addr, old_len, new_addr, new_len);
70254@@ -476,6 +504,7 @@ unsigned long do_mremap(unsigned long ad
70255 addr + new_len);
70256 }
70257 ret = addr;
70258+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
70259 goto out;
70260 }
70261 }
70262@@ -502,7 +531,13 @@ unsigned long do_mremap(unsigned long ad
70263 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
70264 if (ret)
70265 goto out;
70266+
70267+ map_flags = vma->vm_flags;
70268 ret = move_vma(vma, addr, old_len, new_len, new_addr);
70269+ if (!(ret & ~PAGE_MASK)) {
70270+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
70271+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
70272+ }
70273 }
70274 out:
70275 if (ret & ~PAGE_MASK)
70276diff -urNp linux-2.6.32.43/mm/nommu.c linux-2.6.32.43/mm/nommu.c
70277--- linux-2.6.32.43/mm/nommu.c 2011-03-27 14:31:47.000000000 -0400
70278+++ linux-2.6.32.43/mm/nommu.c 2011-04-17 15:56:46.000000000 -0400
70279@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI
70280 int sysctl_overcommit_ratio = 50; /* default is 50% */
70281 int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
70282 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
70283-int heap_stack_gap = 0;
70284
70285 atomic_long_t mmap_pages_allocated;
70286
70287@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m
70288 EXPORT_SYMBOL(find_vma);
70289
70290 /*
70291- * find a VMA
70292- * - we don't extend stack VMAs under NOMMU conditions
70293- */
70294-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
70295-{
70296- return find_vma(mm, addr);
70297-}
70298-
70299-/*
70300 * expand a stack to a given address
70301 * - not supported under NOMMU conditions
70302 */
70303diff -urNp linux-2.6.32.43/mm/page_alloc.c linux-2.6.32.43/mm/page_alloc.c
70304--- linux-2.6.32.43/mm/page_alloc.c 2011-06-25 12:55:35.000000000 -0400
70305+++ linux-2.6.32.43/mm/page_alloc.c 2011-07-09 09:13:08.000000000 -0400
70306@@ -289,7 +289,7 @@ out:
70307 * This usage means that zero-order pages may not be compound.
70308 */
70309
70310-static void free_compound_page(struct page *page)
70311+void free_compound_page(struct page *page)
70312 {
70313 __free_pages_ok(page, compound_order(page));
70314 }
70315@@ -587,6 +587,10 @@ static void __free_pages_ok(struct page
70316 int bad = 0;
70317 int wasMlocked = __TestClearPageMlocked(page);
70318
70319+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70320+ unsigned long index = 1UL << order;
70321+#endif
70322+
70323 kmemcheck_free_shadow(page, order);
70324
70325 for (i = 0 ; i < (1 << order) ; ++i)
70326@@ -599,6 +603,12 @@ static void __free_pages_ok(struct page
70327 debug_check_no_obj_freed(page_address(page),
70328 PAGE_SIZE << order);
70329 }
70330+
70331+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70332+ for (; index; --index)
70333+ sanitize_highpage(page + index - 1);
70334+#endif
70335+
70336 arch_free_page(page, order);
70337 kernel_map_pages(page, 1 << order, 0);
70338
70339@@ -702,8 +712,10 @@ static int prep_new_page(struct page *pa
70340 arch_alloc_page(page, order);
70341 kernel_map_pages(page, 1 << order, 1);
70342
70343+#ifndef CONFIG_PAX_MEMORY_SANITIZE
70344 if (gfp_flags & __GFP_ZERO)
70345 prep_zero_page(page, order, gfp_flags);
70346+#endif
70347
70348 if (order && (gfp_flags & __GFP_COMP))
70349 prep_compound_page(page, order);
70350@@ -1097,6 +1109,11 @@ static void free_hot_cold_page(struct pa
70351 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
70352 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
70353 }
70354+
70355+#ifdef CONFIG_PAX_MEMORY_SANITIZE
70356+ sanitize_highpage(page);
70357+#endif
70358+
70359 arch_free_page(page, 0);
70360 kernel_map_pages(page, 1, 0);
70361
70362@@ -2179,6 +2196,8 @@ void show_free_areas(void)
70363 int cpu;
70364 struct zone *zone;
70365
70366+ pax_track_stack();
70367+
70368 for_each_populated_zone(zone) {
70369 show_node(zone);
70370 printk("%s per-cpu:\n", zone->name);
70371@@ -3736,7 +3755,7 @@ static void __init setup_usemap(struct p
70372 zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize);
70373 }
70374 #else
70375-static void inline setup_usemap(struct pglist_data *pgdat,
70376+static inline void setup_usemap(struct pglist_data *pgdat,
70377 struct zone *zone, unsigned long zonesize) {}
70378 #endif /* CONFIG_SPARSEMEM */
70379
70380diff -urNp linux-2.6.32.43/mm/percpu.c linux-2.6.32.43/mm/percpu.c
70381--- linux-2.6.32.43/mm/percpu.c 2011-03-27 14:31:47.000000000 -0400
70382+++ linux-2.6.32.43/mm/percpu.c 2011-04-17 15:56:46.000000000 -0400
70383@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu
70384 static unsigned int pcpu_last_unit_cpu __read_mostly;
70385
70386 /* the address of the first chunk which starts with the kernel static area */
70387-void *pcpu_base_addr __read_mostly;
70388+void *pcpu_base_addr __read_only;
70389 EXPORT_SYMBOL_GPL(pcpu_base_addr);
70390
70391 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
70392diff -urNp linux-2.6.32.43/mm/rmap.c linux-2.6.32.43/mm/rmap.c
70393--- linux-2.6.32.43/mm/rmap.c 2011-03-27 14:31:47.000000000 -0400
70394+++ linux-2.6.32.43/mm/rmap.c 2011-04-17 15:56:46.000000000 -0400
70395@@ -121,6 +121,17 @@ int anon_vma_prepare(struct vm_area_stru
70396 /* page_table_lock to protect against threads */
70397 spin_lock(&mm->page_table_lock);
70398 if (likely(!vma->anon_vma)) {
70399+
70400+#ifdef CONFIG_PAX_SEGMEXEC
70401+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
70402+
70403+ if (vma_m) {
70404+ BUG_ON(vma_m->anon_vma);
70405+ vma_m->anon_vma = anon_vma;
70406+ list_add_tail(&vma_m->anon_vma_node, &anon_vma->head);
70407+ }
70408+#endif
70409+
70410 vma->anon_vma = anon_vma;
70411 list_add_tail(&vma->anon_vma_node, &anon_vma->head);
70412 allocated = NULL;
70413diff -urNp linux-2.6.32.43/mm/shmem.c linux-2.6.32.43/mm/shmem.c
70414--- linux-2.6.32.43/mm/shmem.c 2011-03-27 14:31:47.000000000 -0400
70415+++ linux-2.6.32.43/mm/shmem.c 2011-05-18 20:09:37.000000000 -0400
70416@@ -31,7 +31,7 @@
70417 #include <linux/swap.h>
70418 #include <linux/ima.h>
70419
70420-static struct vfsmount *shm_mnt;
70421+struct vfsmount *shm_mnt;
70422
70423 #ifdef CONFIG_SHMEM
70424 /*
70425@@ -1061,6 +1061,8 @@ static int shmem_writepage(struct page *
70426 goto unlock;
70427 }
70428 entry = shmem_swp_entry(info, index, NULL);
70429+ if (!entry)
70430+ goto unlock;
70431 if (entry->val) {
70432 /*
70433 * The more uptodate page coming down from a stacked
70434@@ -1144,6 +1146,8 @@ static struct page *shmem_swapin(swp_ent
70435 struct vm_area_struct pvma;
70436 struct page *page;
70437
70438+ pax_track_stack();
70439+
70440 spol = mpol_cond_copy(&mpol,
70441 mpol_shared_policy_lookup(&info->policy, idx));
70442
70443@@ -1962,7 +1966,7 @@ static int shmem_symlink(struct inode *d
70444
70445 info = SHMEM_I(inode);
70446 inode->i_size = len-1;
70447- if (len <= (char *)inode - (char *)info) {
70448+ if (len <= (char *)inode - (char *)info && len <= 64) {
70449 /* do it inline */
70450 memcpy(info, symname, len);
70451 inode->i_op = &shmem_symlink_inline_operations;
70452@@ -2310,8 +2314,7 @@ int shmem_fill_super(struct super_block
70453 int err = -ENOMEM;
70454
70455 /* Round up to L1_CACHE_BYTES to resist false sharing */
70456- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
70457- L1_CACHE_BYTES), GFP_KERNEL);
70458+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
70459 if (!sbinfo)
70460 return -ENOMEM;
70461
70462diff -urNp linux-2.6.32.43/mm/slab.c linux-2.6.32.43/mm/slab.c
70463--- linux-2.6.32.43/mm/slab.c 2011-03-27 14:31:47.000000000 -0400
70464+++ linux-2.6.32.43/mm/slab.c 2011-05-04 17:56:20.000000000 -0400
70465@@ -174,7 +174,7 @@
70466
70467 /* Legal flag mask for kmem_cache_create(). */
70468 #if DEBUG
70469-# define CREATE_MASK (SLAB_RED_ZONE | \
70470+# define CREATE_MASK (SLAB_USERCOPY | SLAB_RED_ZONE | \
70471 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
70472 SLAB_CACHE_DMA | \
70473 SLAB_STORE_USER | \
70474@@ -182,7 +182,7 @@
70475 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70476 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
70477 #else
70478-# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
70479+# define CREATE_MASK (SLAB_USERCOPY | SLAB_HWCACHE_ALIGN | \
70480 SLAB_CACHE_DMA | \
70481 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
70482 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
70483@@ -308,7 +308,7 @@ struct kmem_list3 {
70484 * Need this for bootstrapping a per node allocator.
70485 */
70486 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
70487-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
70488+struct kmem_list3 initkmem_list3[NUM_INIT_LISTS];
70489 #define CACHE_CACHE 0
70490 #define SIZE_AC MAX_NUMNODES
70491 #define SIZE_L3 (2 * MAX_NUMNODES)
70492@@ -409,10 +409,10 @@ static void kmem_list3_init(struct kmem_
70493 if ((x)->max_freeable < i) \
70494 (x)->max_freeable = i; \
70495 } while (0)
70496-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
70497-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
70498-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
70499-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
70500+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
70501+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
70502+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
70503+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
70504 #else
70505 #define STATS_INC_ACTIVE(x) do { } while (0)
70506 #define STATS_DEC_ACTIVE(x) do { } while (0)
70507@@ -558,7 +558,7 @@ static inline void *index_to_obj(struct
70508 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
70509 */
70510 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
70511- const struct slab *slab, void *obj)
70512+ const struct slab *slab, const void *obj)
70513 {
70514 u32 offset = (obj - slab->s_mem);
70515 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
70516@@ -1453,7 +1453,7 @@ void __init kmem_cache_init(void)
70517 sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
70518 sizes[INDEX_AC].cs_size,
70519 ARCH_KMALLOC_MINALIGN,
70520- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70521+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70522 NULL);
70523
70524 if (INDEX_AC != INDEX_L3) {
70525@@ -1461,7 +1461,7 @@ void __init kmem_cache_init(void)
70526 kmem_cache_create(names[INDEX_L3].name,
70527 sizes[INDEX_L3].cs_size,
70528 ARCH_KMALLOC_MINALIGN,
70529- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70530+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70531 NULL);
70532 }
70533
70534@@ -1479,7 +1479,7 @@ void __init kmem_cache_init(void)
70535 sizes->cs_cachep = kmem_cache_create(names->name,
70536 sizes->cs_size,
70537 ARCH_KMALLOC_MINALIGN,
70538- ARCH_KMALLOC_FLAGS|SLAB_PANIC,
70539+ ARCH_KMALLOC_FLAGS|SLAB_PANIC|SLAB_USERCOPY,
70540 NULL);
70541 }
70542 #ifdef CONFIG_ZONE_DMA
70543@@ -4211,10 +4211,10 @@ static int s_show(struct seq_file *m, vo
70544 }
70545 /* cpu stats */
70546 {
70547- unsigned long allochit = atomic_read(&cachep->allochit);
70548- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
70549- unsigned long freehit = atomic_read(&cachep->freehit);
70550- unsigned long freemiss = atomic_read(&cachep->freemiss);
70551+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
70552+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
70553+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
70554+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
70555
70556 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
70557 allochit, allocmiss, freehit, freemiss);
70558@@ -4471,15 +4471,66 @@ static const struct file_operations proc
70559
70560 static int __init slab_proc_init(void)
70561 {
70562- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
70563+ mode_t gr_mode = S_IRUGO;
70564+
70565+#ifdef CONFIG_GRKERNSEC_PROC_ADD
70566+ gr_mode = S_IRUSR;
70567+#endif
70568+
70569+ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations);
70570 #ifdef CONFIG_DEBUG_SLAB_LEAK
70571- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
70572+ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations);
70573 #endif
70574 return 0;
70575 }
70576 module_init(slab_proc_init);
70577 #endif
70578
70579+void check_object_size(const void *ptr, unsigned long n, bool to)
70580+{
70581+
70582+#ifdef CONFIG_PAX_USERCOPY
70583+ struct page *page;
70584+ struct kmem_cache *cachep = NULL;
70585+ struct slab *slabp;
70586+ unsigned int objnr;
70587+ unsigned long offset;
70588+
70589+ if (!n)
70590+ return;
70591+
70592+ if (ZERO_OR_NULL_PTR(ptr))
70593+ goto report;
70594+
70595+ if (!virt_addr_valid(ptr))
70596+ return;
70597+
70598+ page = virt_to_head_page(ptr);
70599+
70600+ if (!PageSlab(page)) {
70601+ if (object_is_on_stack(ptr, n) == -1)
70602+ goto report;
70603+ return;
70604+ }
70605+
70606+ cachep = page_get_cache(page);
70607+ if (!(cachep->flags & SLAB_USERCOPY))
70608+ goto report;
70609+
70610+ slabp = page_get_slab(page);
70611+ objnr = obj_to_index(cachep, slabp, ptr);
70612+ BUG_ON(objnr >= cachep->num);
70613+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
70614+ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset)
70615+ return;
70616+
70617+report:
70618+ pax_report_usercopy(ptr, n, to, cachep ? cachep->name : NULL);
70619+#endif
70620+
70621+}
70622+EXPORT_SYMBOL(check_object_size);
70623+
70624 /**
70625 * ksize - get the actual amount of memory allocated for a given object
70626 * @objp: Pointer to the object
70627diff -urNp linux-2.6.32.43/mm/slob.c linux-2.6.32.43/mm/slob.c
70628--- linux-2.6.32.43/mm/slob.c 2011-03-27 14:31:47.000000000 -0400
70629+++ linux-2.6.32.43/mm/slob.c 2011-07-06 19:53:33.000000000 -0400
70630@@ -29,7 +29,7 @@
70631 * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls
70632 * alloc_pages() directly, allocating compound pages so the page order
70633 * does not have to be separately tracked, and also stores the exact
70634- * allocation size in page->private so that it can be used to accurately
70635+ * allocation size in slob_page->size so that it can be used to accurately
70636 * provide ksize(). These objects are detected in kfree() because slob_page()
70637 * is false for them.
70638 *
70639@@ -58,6 +58,7 @@
70640 */
70641
70642 #include <linux/kernel.h>
70643+#include <linux/sched.h>
70644 #include <linux/slab.h>
70645 #include <linux/mm.h>
70646 #include <linux/swap.h> /* struct reclaim_state */
70647@@ -100,7 +101,8 @@ struct slob_page {
70648 unsigned long flags; /* mandatory */
70649 atomic_t _count; /* mandatory */
70650 slobidx_t units; /* free units left in page */
70651- unsigned long pad[2];
70652+ unsigned long pad[1];
70653+ unsigned long size; /* size when >=PAGE_SIZE */
70654 slob_t *free; /* first free slob_t in page */
70655 struct list_head list; /* linked list of free pages */
70656 };
70657@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large);
70658 */
70659 static inline int is_slob_page(struct slob_page *sp)
70660 {
70661- return PageSlab((struct page *)sp);
70662+ return PageSlab((struct page *)sp) && !sp->size;
70663 }
70664
70665 static inline void set_slob_page(struct slob_page *sp)
70666@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc
70667
70668 static inline struct slob_page *slob_page(const void *addr)
70669 {
70670- return (struct slob_page *)virt_to_page(addr);
70671+ return (struct slob_page *)virt_to_head_page(addr);
70672 }
70673
70674 /*
70675@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_
70676 /*
70677 * Return the size of a slob block.
70678 */
70679-static slobidx_t slob_units(slob_t *s)
70680+static slobidx_t slob_units(const slob_t *s)
70681 {
70682 if (s->units > 0)
70683 return s->units;
70684@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s)
70685 /*
70686 * Return the next free slob block pointer after this one.
70687 */
70688-static slob_t *slob_next(slob_t *s)
70689+static slob_t *slob_next(const slob_t *s)
70690 {
70691 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
70692 slobidx_t next;
70693@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s)
70694 /*
70695 * Returns true if s is the last free block in its page.
70696 */
70697-static int slob_last(slob_t *s)
70698+static int slob_last(const slob_t *s)
70699 {
70700 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
70701 }
70702@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i
70703 if (!page)
70704 return NULL;
70705
70706+ set_slob_page(page);
70707 return page_address(page);
70708 }
70709
70710@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp
70711 if (!b)
70712 return NULL;
70713 sp = slob_page(b);
70714- set_slob_page(sp);
70715
70716 spin_lock_irqsave(&slob_lock, flags);
70717 sp->units = SLOB_UNITS(PAGE_SIZE);
70718 sp->free = b;
70719+ sp->size = 0;
70720 INIT_LIST_HEAD(&sp->list);
70721 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
70722 set_slob_page_free(sp, slob_list);
70723@@ -475,10 +478,9 @@ out:
70724 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long)
70725 #endif
70726
70727-void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70728+static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align)
70729 {
70730- unsigned int *m;
70731- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70732+ slob_t *m;
70733 void *ret;
70734
70735 lockdep_trace_alloc(gfp);
70736@@ -491,7 +493,10 @@ void *__kmalloc_node(size_t size, gfp_t
70737
70738 if (!m)
70739 return NULL;
70740- *m = size;
70741+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
70742+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
70743+ m[0].units = size;
70744+ m[1].units = align;
70745 ret = (void *)m + align;
70746
70747 trace_kmalloc_node(_RET_IP_, ret,
70748@@ -501,16 +506,25 @@ void *__kmalloc_node(size_t size, gfp_t
70749
70750 ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
70751 if (ret) {
70752- struct page *page;
70753- page = virt_to_page(ret);
70754- page->private = size;
70755+ struct slob_page *sp;
70756+ sp = slob_page(ret);
70757+ sp->size = size;
70758 }
70759
70760 trace_kmalloc_node(_RET_IP_, ret,
70761 size, PAGE_SIZE << order, gfp, node);
70762 }
70763
70764- kmemleak_alloc(ret, size, 1, gfp);
70765+ return ret;
70766+}
70767+
70768+void *__kmalloc_node(size_t size, gfp_t gfp, int node)
70769+{
70770+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70771+ void *ret = __kmalloc_node_align(size, gfp, node, align);
70772+
70773+ if (!ZERO_OR_NULL_PTR(ret))
70774+ kmemleak_alloc(ret, size, 1, gfp);
70775 return ret;
70776 }
70777 EXPORT_SYMBOL(__kmalloc_node);
70778@@ -528,13 +542,88 @@ void kfree(const void *block)
70779 sp = slob_page(block);
70780 if (is_slob_page(sp)) {
70781 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70782- unsigned int *m = (unsigned int *)(block - align);
70783- slob_free(m, *m + align);
70784- } else
70785+ slob_t *m = (slob_t *)(block - align);
70786+ slob_free(m, m[0].units + align);
70787+ } else {
70788+ clear_slob_page(sp);
70789+ free_slob_page(sp);
70790+ sp->size = 0;
70791 put_page(&sp->page);
70792+ }
70793 }
70794 EXPORT_SYMBOL(kfree);
70795
70796+void check_object_size(const void *ptr, unsigned long n, bool to)
70797+{
70798+
70799+#ifdef CONFIG_PAX_USERCOPY
70800+ struct slob_page *sp;
70801+ const slob_t *free;
70802+ const void *base;
70803+ unsigned long flags;
70804+
70805+ if (!n)
70806+ return;
70807+
70808+ if (ZERO_OR_NULL_PTR(ptr))
70809+ goto report;
70810+
70811+ if (!virt_addr_valid(ptr))
70812+ return;
70813+
70814+ sp = slob_page(ptr);
70815+ if (!PageSlab((struct page*)sp)) {
70816+ if (object_is_on_stack(ptr, n) == -1)
70817+ goto report;
70818+ return;
70819+ }
70820+
70821+ if (sp->size) {
70822+ base = page_address(&sp->page);
70823+ if (base <= ptr && n <= sp->size - (ptr - base))
70824+ return;
70825+ goto report;
70826+ }
70827+
70828+ /* some tricky double walking to find the chunk */
70829+ spin_lock_irqsave(&slob_lock, flags);
70830+ base = (void *)((unsigned long)ptr & PAGE_MASK);
70831+ free = sp->free;
70832+
70833+ while (!slob_last(free) && (void *)free <= ptr) {
70834+ base = free + slob_units(free);
70835+ free = slob_next(free);
70836+ }
70837+
70838+ while (base < (void *)free) {
70839+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
70840+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
70841+ int offset;
70842+
70843+ if (ptr < base + align)
70844+ break;
70845+
70846+ offset = ptr - base - align;
70847+ if (offset >= m) {
70848+ base += size;
70849+ continue;
70850+ }
70851+
70852+ if (n > m - offset)
70853+ break;
70854+
70855+ spin_unlock_irqrestore(&slob_lock, flags);
70856+ return;
70857+ }
70858+
70859+ spin_unlock_irqrestore(&slob_lock, flags);
70860+report:
70861+ pax_report_usercopy(ptr, n, to, NULL);
70862+#endif
70863+
70864+}
70865+EXPORT_SYMBOL(check_object_size);
70866+
70867 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
70868 size_t ksize(const void *block)
70869 {
70870@@ -547,10 +636,10 @@ size_t ksize(const void *block)
70871 sp = slob_page(block);
70872 if (is_slob_page(sp)) {
70873 int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
70874- unsigned int *m = (unsigned int *)(block - align);
70875- return SLOB_UNITS(*m) * SLOB_UNIT;
70876+ slob_t *m = (slob_t *)(block - align);
70877+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
70878 } else
70879- return sp->page.private;
70880+ return sp->size;
70881 }
70882 EXPORT_SYMBOL(ksize);
70883
70884@@ -566,8 +655,13 @@ struct kmem_cache *kmem_cache_create(con
70885 {
70886 struct kmem_cache *c;
70887
70888+#ifdef CONFIG_PAX_USERCOPY
70889+ c = __kmalloc_node_align(sizeof(struct kmem_cache),
70890+ GFP_KERNEL, -1, ARCH_KMALLOC_MINALIGN);
70891+#else
70892 c = slob_alloc(sizeof(struct kmem_cache),
70893 GFP_KERNEL, ARCH_KMALLOC_MINALIGN, -1);
70894+#endif
70895
70896 if (c) {
70897 c->name = name;
70898@@ -605,17 +699,25 @@ void *kmem_cache_alloc_node(struct kmem_
70899 {
70900 void *b;
70901
70902+#ifdef CONFIG_PAX_USERCOPY
70903+ b = __kmalloc_node_align(c->size, flags, node, c->align);
70904+#else
70905 if (c->size < PAGE_SIZE) {
70906 b = slob_alloc(c->size, flags, c->align, node);
70907 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70908 SLOB_UNITS(c->size) * SLOB_UNIT,
70909 flags, node);
70910 } else {
70911+ struct slob_page *sp;
70912+
70913 b = slob_new_pages(flags, get_order(c->size), node);
70914+ sp = slob_page(b);
70915+ sp->size = c->size;
70916 trace_kmem_cache_alloc_node(_RET_IP_, b, c->size,
70917 PAGE_SIZE << get_order(c->size),
70918 flags, node);
70919 }
70920+#endif
70921
70922 if (c->ctor)
70923 c->ctor(b);
70924@@ -627,10 +729,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
70925
70926 static void __kmem_cache_free(void *b, int size)
70927 {
70928- if (size < PAGE_SIZE)
70929+ struct slob_page *sp = slob_page(b);
70930+
70931+ if (is_slob_page(sp))
70932 slob_free(b, size);
70933- else
70934+ else {
70935+ clear_slob_page(sp);
70936+ free_slob_page(sp);
70937+ sp->size = 0;
70938 slob_free_pages(b, get_order(size));
70939+ }
70940 }
70941
70942 static void kmem_rcu_free(struct rcu_head *head)
70943@@ -643,18 +751,32 @@ static void kmem_rcu_free(struct rcu_hea
70944
70945 void kmem_cache_free(struct kmem_cache *c, void *b)
70946 {
70947+ int size = c->size;
70948+
70949+#ifdef CONFIG_PAX_USERCOPY
70950+ if (size + c->align < PAGE_SIZE) {
70951+ size += c->align;
70952+ b -= c->align;
70953+ }
70954+#endif
70955+
70956 kmemleak_free_recursive(b, c->flags);
70957 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
70958 struct slob_rcu *slob_rcu;
70959- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
70960+ slob_rcu = b + (size - sizeof(struct slob_rcu));
70961 INIT_RCU_HEAD(&slob_rcu->head);
70962- slob_rcu->size = c->size;
70963+ slob_rcu->size = size;
70964 call_rcu(&slob_rcu->head, kmem_rcu_free);
70965 } else {
70966- __kmem_cache_free(b, c->size);
70967+ __kmem_cache_free(b, size);
70968 }
70969
70970+#ifdef CONFIG_PAX_USERCOPY
70971+ trace_kfree(_RET_IP_, b);
70972+#else
70973 trace_kmem_cache_free(_RET_IP_, b);
70974+#endif
70975+
70976 }
70977 EXPORT_SYMBOL(kmem_cache_free);
70978
70979diff -urNp linux-2.6.32.43/mm/slub.c linux-2.6.32.43/mm/slub.c
70980--- linux-2.6.32.43/mm/slub.c 2011-03-27 14:31:47.000000000 -0400
70981+++ linux-2.6.32.43/mm/slub.c 2011-04-17 15:56:46.000000000 -0400
70982@@ -410,7 +410,7 @@ static void print_track(const char *s, s
70983 if (!t->addr)
70984 return;
70985
70986- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
70987+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
70988 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
70989 }
70990
70991@@ -1893,6 +1893,8 @@ void kmem_cache_free(struct kmem_cache *
70992
70993 page = virt_to_head_page(x);
70994
70995+ BUG_ON(!PageSlab(page));
70996+
70997 slab_free(s, page, x, _RET_IP_);
70998
70999 trace_kmem_cache_free(_RET_IP_, x);
71000@@ -1937,7 +1939,7 @@ static int slub_min_objects;
71001 * Merge control. If this is set then no merging of slab caches will occur.
71002 * (Could be removed. This was introduced to pacify the merge skeptics.)
71003 */
71004-static int slub_nomerge;
71005+static int slub_nomerge = 1;
71006
71007 /*
71008 * Calculate the order of allocation given an slab object size.
71009@@ -2493,7 +2495,7 @@ static int kmem_cache_open(struct kmem_c
71010 * list to avoid pounding the page allocator excessively.
71011 */
71012 set_min_partial(s, ilog2(s->size));
71013- s->refcount = 1;
71014+ atomic_set(&s->refcount, 1);
71015 #ifdef CONFIG_NUMA
71016 s->remote_node_defrag_ratio = 1000;
71017 #endif
71018@@ -2630,8 +2632,7 @@ static inline int kmem_cache_close(struc
71019 void kmem_cache_destroy(struct kmem_cache *s)
71020 {
71021 down_write(&slub_lock);
71022- s->refcount--;
71023- if (!s->refcount) {
71024+ if (atomic_dec_and_test(&s->refcount)) {
71025 list_del(&s->list);
71026 up_write(&slub_lock);
71027 if (kmem_cache_close(s)) {
71028@@ -2691,12 +2692,10 @@ static int __init setup_slub_nomerge(cha
71029 __setup("slub_nomerge", setup_slub_nomerge);
71030
71031 static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
71032- const char *name, int size, gfp_t gfp_flags)
71033+ const char *name, int size, gfp_t gfp_flags, unsigned int flags)
71034 {
71035- unsigned int flags = 0;
71036-
71037 if (gfp_flags & SLUB_DMA)
71038- flags = SLAB_CACHE_DMA;
71039+ flags |= SLAB_CACHE_DMA;
71040
71041 /*
71042 * This function is called with IRQs disabled during early-boot on
71043@@ -2915,6 +2914,46 @@ void *__kmalloc_node(size_t size, gfp_t
71044 EXPORT_SYMBOL(__kmalloc_node);
71045 #endif
71046
71047+void check_object_size(const void *ptr, unsigned long n, bool to)
71048+{
71049+
71050+#ifdef CONFIG_PAX_USERCOPY
71051+ struct page *page;
71052+ struct kmem_cache *s = NULL;
71053+ unsigned long offset;
71054+
71055+ if (!n)
71056+ return;
71057+
71058+ if (ZERO_OR_NULL_PTR(ptr))
71059+ goto report;
71060+
71061+ if (!virt_addr_valid(ptr))
71062+ return;
71063+
71064+ page = get_object_page(ptr);
71065+
71066+ if (!page) {
71067+ if (object_is_on_stack(ptr, n) == -1)
71068+ goto report;
71069+ return;
71070+ }
71071+
71072+ s = page->slab;
71073+ if (!(s->flags & SLAB_USERCOPY))
71074+ goto report;
71075+
71076+ offset = (ptr - page_address(page)) % s->size;
71077+ if (offset <= s->objsize && n <= s->objsize - offset)
71078+ return;
71079+
71080+report:
71081+ pax_report_usercopy(ptr, n, to, s ? s->name : NULL);
71082+#endif
71083+
71084+}
71085+EXPORT_SYMBOL(check_object_size);
71086+
71087 size_t ksize(const void *object)
71088 {
71089 struct page *page;
71090@@ -3185,8 +3224,8 @@ void __init kmem_cache_init(void)
71091 * kmem_cache_open for slab_state == DOWN.
71092 */
71093 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
71094- sizeof(struct kmem_cache_node), GFP_NOWAIT);
71095- kmalloc_caches[0].refcount = -1;
71096+ sizeof(struct kmem_cache_node), GFP_NOWAIT, 0);
71097+ atomic_set(&kmalloc_caches[0].refcount, -1);
71098 caches++;
71099
71100 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
71101@@ -3198,18 +3237,18 @@ void __init kmem_cache_init(void)
71102 /* Caches that are not of the two-to-the-power-of size */
71103 if (KMALLOC_MIN_SIZE <= 32) {
71104 create_kmalloc_cache(&kmalloc_caches[1],
71105- "kmalloc-96", 96, GFP_NOWAIT);
71106+ "kmalloc-96", 96, GFP_NOWAIT, SLAB_USERCOPY);
71107 caches++;
71108 }
71109 if (KMALLOC_MIN_SIZE <= 64) {
71110 create_kmalloc_cache(&kmalloc_caches[2],
71111- "kmalloc-192", 192, GFP_NOWAIT);
71112+ "kmalloc-192", 192, GFP_NOWAIT, SLAB_USERCOPY);
71113 caches++;
71114 }
71115
71116 for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
71117 create_kmalloc_cache(&kmalloc_caches[i],
71118- "kmalloc", 1 << i, GFP_NOWAIT);
71119+ "kmalloc", 1 << i, GFP_NOWAIT, SLAB_USERCOPY);
71120 caches++;
71121 }
71122
71123@@ -3293,7 +3332,7 @@ static int slab_unmergeable(struct kmem_
71124 /*
71125 * We may have set a slab to be unmergeable during bootstrap.
71126 */
71127- if (s->refcount < 0)
71128+ if (atomic_read(&s->refcount) < 0)
71129 return 1;
71130
71131 return 0;
71132@@ -3353,7 +3392,7 @@ struct kmem_cache *kmem_cache_create(con
71133 if (s) {
71134 int cpu;
71135
71136- s->refcount++;
71137+ atomic_inc(&s->refcount);
71138 /*
71139 * Adjust the object sizes so that we clear
71140 * the complete object on kzalloc.
71141@@ -3372,7 +3411,7 @@ struct kmem_cache *kmem_cache_create(con
71142
71143 if (sysfs_slab_alias(s, name)) {
71144 down_write(&slub_lock);
71145- s->refcount--;
71146+ atomic_dec(&s->refcount);
71147 up_write(&slub_lock);
71148 goto err;
71149 }
71150@@ -4101,7 +4140,7 @@ SLAB_ATTR_RO(ctor);
71151
71152 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
71153 {
71154- return sprintf(buf, "%d\n", s->refcount - 1);
71155+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
71156 }
71157 SLAB_ATTR_RO(aliases);
71158
71159@@ -4503,7 +4542,7 @@ static void kmem_cache_release(struct ko
71160 kfree(s);
71161 }
71162
71163-static struct sysfs_ops slab_sysfs_ops = {
71164+static const struct sysfs_ops slab_sysfs_ops = {
71165 .show = slab_attr_show,
71166 .store = slab_attr_store,
71167 };
71168@@ -4522,7 +4561,7 @@ static int uevent_filter(struct kset *ks
71169 return 0;
71170 }
71171
71172-static struct kset_uevent_ops slab_uevent_ops = {
71173+static const struct kset_uevent_ops slab_uevent_ops = {
71174 .filter = uevent_filter,
71175 };
71176
71177@@ -4785,7 +4824,13 @@ static const struct file_operations proc
71178
71179 static int __init slab_proc_init(void)
71180 {
71181- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations);
71182+ mode_t gr_mode = S_IRUGO;
71183+
71184+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71185+ gr_mode = S_IRUSR;
71186+#endif
71187+
71188+ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations);
71189 return 0;
71190 }
71191 module_init(slab_proc_init);
71192diff -urNp linux-2.6.32.43/mm/swap.c linux-2.6.32.43/mm/swap.c
71193--- linux-2.6.32.43/mm/swap.c 2011-03-27 14:31:47.000000000 -0400
71194+++ linux-2.6.32.43/mm/swap.c 2011-07-09 09:15:19.000000000 -0400
71195@@ -30,6 +30,7 @@
71196 #include <linux/notifier.h>
71197 #include <linux/backing-dev.h>
71198 #include <linux/memcontrol.h>
71199+#include <linux/hugetlb.h>
71200
71201 #include "internal.h"
71202
71203@@ -65,6 +66,8 @@ static void put_compound_page(struct pag
71204 compound_page_dtor *dtor;
71205
71206 dtor = get_compound_page_dtor(page);
71207+ if (!PageHuge(page))
71208+ BUG_ON(dtor != free_compound_page);
71209 (*dtor)(page);
71210 }
71211 }
71212diff -urNp linux-2.6.32.43/mm/util.c linux-2.6.32.43/mm/util.c
71213--- linux-2.6.32.43/mm/util.c 2011-03-27 14:31:47.000000000 -0400
71214+++ linux-2.6.32.43/mm/util.c 2011-04-17 15:56:46.000000000 -0400
71215@@ -228,6 +228,12 @@ EXPORT_SYMBOL(strndup_user);
71216 void arch_pick_mmap_layout(struct mm_struct *mm)
71217 {
71218 mm->mmap_base = TASK_UNMAPPED_BASE;
71219+
71220+#ifdef CONFIG_PAX_RANDMMAP
71221+ if (mm->pax_flags & MF_PAX_RANDMMAP)
71222+ mm->mmap_base += mm->delta_mmap;
71223+#endif
71224+
71225 mm->get_unmapped_area = arch_get_unmapped_area;
71226 mm->unmap_area = arch_unmap_area;
71227 }
71228diff -urNp linux-2.6.32.43/mm/vmalloc.c linux-2.6.32.43/mm/vmalloc.c
71229--- linux-2.6.32.43/mm/vmalloc.c 2011-03-27 14:31:47.000000000 -0400
71230+++ linux-2.6.32.43/mm/vmalloc.c 2011-04-17 15:56:46.000000000 -0400
71231@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd,
71232
71233 pte = pte_offset_kernel(pmd, addr);
71234 do {
71235- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71236- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71237+
71238+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71239+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
71240+ BUG_ON(!pte_exec(*pte));
71241+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
71242+ continue;
71243+ }
71244+#endif
71245+
71246+ {
71247+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
71248+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
71249+ }
71250 } while (pte++, addr += PAGE_SIZE, addr != end);
71251 }
71252
71253@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un
71254 unsigned long end, pgprot_t prot, struct page **pages, int *nr)
71255 {
71256 pte_t *pte;
71257+ int ret = -ENOMEM;
71258
71259 /*
71260 * nr is a running index into the array which helps higher level
71261@@ -101,17 +113,32 @@ static int vmap_pte_range(pmd_t *pmd, un
71262 pte = pte_alloc_kernel(pmd, addr);
71263 if (!pte)
71264 return -ENOMEM;
71265+
71266+ pax_open_kernel();
71267 do {
71268 struct page *page = pages[*nr];
71269
71270- if (WARN_ON(!pte_none(*pte)))
71271- return -EBUSY;
71272- if (WARN_ON(!page))
71273- return -ENOMEM;
71274+#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71275+ if (!(pgprot_val(prot) & _PAGE_NX))
71276+ BUG_ON(!pte_exec(*pte) || pte_pfn(*pte) != __pa(addr) >> PAGE_SHIFT);
71277+ else
71278+#endif
71279+
71280+ if (WARN_ON(!pte_none(*pte))) {
71281+ ret = -EBUSY;
71282+ goto out;
71283+ }
71284+ if (WARN_ON(!page)) {
71285+ ret = -ENOMEM;
71286+ goto out;
71287+ }
71288 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
71289 (*nr)++;
71290 } while (pte++, addr += PAGE_SIZE, addr != end);
71291- return 0;
71292+ ret = 0;
71293+out:
71294+ pax_close_kernel();
71295+ return ret;
71296 }
71297
71298 static int vmap_pmd_range(pud_t *pud, unsigned long addr,
71299@@ -192,11 +219,20 @@ int is_vmalloc_or_module_addr(const void
71300 * and fall back on vmalloc() if that fails. Others
71301 * just put it in the vmalloc space.
71302 */
71303-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
71304+#ifdef CONFIG_MODULES
71305+#ifdef MODULES_VADDR
71306 unsigned long addr = (unsigned long)x;
71307 if (addr >= MODULES_VADDR && addr < MODULES_END)
71308 return 1;
71309 #endif
71310+
71311+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
71312+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
71313+ return 1;
71314+#endif
71315+
71316+#endif
71317+
71318 return is_vmalloc_addr(x);
71319 }
71320
71321@@ -217,8 +253,14 @@ struct page *vmalloc_to_page(const void
71322
71323 if (!pgd_none(*pgd)) {
71324 pud_t *pud = pud_offset(pgd, addr);
71325+#ifdef CONFIG_X86
71326+ if (!pud_large(*pud))
71327+#endif
71328 if (!pud_none(*pud)) {
71329 pmd_t *pmd = pmd_offset(pud, addr);
71330+#ifdef CONFIG_X86
71331+ if (!pmd_large(*pmd))
71332+#endif
71333 if (!pmd_none(*pmd)) {
71334 pte_t *ptep, pte;
71335
71336@@ -292,13 +334,13 @@ static void __insert_vmap_area(struct vm
71337 struct rb_node *tmp;
71338
71339 while (*p) {
71340- struct vmap_area *tmp;
71341+ struct vmap_area *varea;
71342
71343 parent = *p;
71344- tmp = rb_entry(parent, struct vmap_area, rb_node);
71345- if (va->va_start < tmp->va_end)
71346+ varea = rb_entry(parent, struct vmap_area, rb_node);
71347+ if (va->va_start < varea->va_end)
71348 p = &(*p)->rb_left;
71349- else if (va->va_end > tmp->va_start)
71350+ else if (va->va_end > varea->va_start)
71351 p = &(*p)->rb_right;
71352 else
71353 BUG();
71354@@ -1232,6 +1274,16 @@ static struct vm_struct *__get_vm_area_n
71355 struct vm_struct *area;
71356
71357 BUG_ON(in_interrupt());
71358+
71359+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71360+ if (flags & VM_KERNEXEC) {
71361+ if (start != VMALLOC_START || end != VMALLOC_END)
71362+ return NULL;
71363+ start = (unsigned long)MODULES_EXEC_VADDR;
71364+ end = (unsigned long)MODULES_EXEC_END;
71365+ }
71366+#endif
71367+
71368 if (flags & VM_IOREMAP) {
71369 int bit = fls(size);
71370
71371@@ -1457,6 +1509,11 @@ void *vmap(struct page **pages, unsigned
71372 if (count > totalram_pages)
71373 return NULL;
71374
71375+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71376+ if (!(pgprot_val(prot) & _PAGE_NX))
71377+ flags |= VM_KERNEXEC;
71378+#endif
71379+
71380 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
71381 __builtin_return_address(0));
71382 if (!area)
71383@@ -1567,6 +1624,13 @@ static void *__vmalloc_node(unsigned lon
71384 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
71385 return NULL;
71386
71387+#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
71388+ if (!(pgprot_val(prot) & _PAGE_NX))
71389+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END,
71390+ node, gfp_mask, caller);
71391+ else
71392+#endif
71393+
71394 area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START,
71395 VMALLOC_END, node, gfp_mask, caller);
71396
71397@@ -1585,6 +1649,7 @@ static void *__vmalloc_node(unsigned lon
71398 return addr;
71399 }
71400
71401+#undef __vmalloc
71402 void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot)
71403 {
71404 return __vmalloc_node(size, 1, gfp_mask, prot, -1,
71405@@ -1601,6 +1666,7 @@ EXPORT_SYMBOL(__vmalloc);
71406 * For tight control over page level allocator and protection flags
71407 * use __vmalloc() instead.
71408 */
71409+#undef vmalloc
71410 void *vmalloc(unsigned long size)
71411 {
71412 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71413@@ -1615,6 +1681,7 @@ EXPORT_SYMBOL(vmalloc);
71414 * The resulting memory area is zeroed so it can be mapped to userspace
71415 * without leaking data.
71416 */
71417+#undef vmalloc_user
71418 void *vmalloc_user(unsigned long size)
71419 {
71420 struct vm_struct *area;
71421@@ -1642,6 +1709,7 @@ EXPORT_SYMBOL(vmalloc_user);
71422 * For tight control over page level allocator and protection flags
71423 * use __vmalloc() instead.
71424 */
71425+#undef vmalloc_node
71426 void *vmalloc_node(unsigned long size, int node)
71427 {
71428 return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL,
71429@@ -1664,10 +1732,10 @@ EXPORT_SYMBOL(vmalloc_node);
71430 * For tight control over page level allocator and protection flags
71431 * use __vmalloc() instead.
71432 */
71433-
71434+#undef vmalloc_exec
71435 void *vmalloc_exec(unsigned long size)
71436 {
71437- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
71438+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
71439 -1, __builtin_return_address(0));
71440 }
71441
71442@@ -1686,6 +1754,7 @@ void *vmalloc_exec(unsigned long size)
71443 * Allocate enough 32bit PA addressable pages to cover @size from the
71444 * page level allocator and map them into contiguous kernel virtual space.
71445 */
71446+#undef vmalloc_32
71447 void *vmalloc_32(unsigned long size)
71448 {
71449 return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL,
71450@@ -1700,6 +1769,7 @@ EXPORT_SYMBOL(vmalloc_32);
71451 * The resulting memory area is 32bit addressable and zeroed so it can be
71452 * mapped to userspace without leaking data.
71453 */
71454+#undef vmalloc_32_user
71455 void *vmalloc_32_user(unsigned long size)
71456 {
71457 struct vm_struct *area;
71458@@ -1964,6 +2034,8 @@ int remap_vmalloc_range(struct vm_area_s
71459 unsigned long uaddr = vma->vm_start;
71460 unsigned long usize = vma->vm_end - vma->vm_start;
71461
71462+ BUG_ON(vma->vm_mirror);
71463+
71464 if ((PAGE_SIZE-1) & (unsigned long)addr)
71465 return -EINVAL;
71466
71467diff -urNp linux-2.6.32.43/mm/vmstat.c linux-2.6.32.43/mm/vmstat.c
71468--- linux-2.6.32.43/mm/vmstat.c 2011-03-27 14:31:47.000000000 -0400
71469+++ linux-2.6.32.43/mm/vmstat.c 2011-04-17 15:56:46.000000000 -0400
71470@@ -74,7 +74,7 @@ void vm_events_fold_cpu(int cpu)
71471 *
71472 * vm_stat contains the global counters
71473 */
71474-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71475+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
71476 EXPORT_SYMBOL(vm_stat);
71477
71478 #ifdef CONFIG_SMP
71479@@ -324,7 +324,7 @@ void refresh_cpu_vm_stats(int cpu)
71480 v = p->vm_stat_diff[i];
71481 p->vm_stat_diff[i] = 0;
71482 local_irq_restore(flags);
71483- atomic_long_add(v, &zone->vm_stat[i]);
71484+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
71485 global_diff[i] += v;
71486 #ifdef CONFIG_NUMA
71487 /* 3 seconds idle till flush */
71488@@ -362,7 +362,7 @@ void refresh_cpu_vm_stats(int cpu)
71489
71490 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
71491 if (global_diff[i])
71492- atomic_long_add(global_diff[i], &vm_stat[i]);
71493+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
71494 }
71495
71496 #endif
71497@@ -953,10 +953,20 @@ static int __init setup_vmstat(void)
71498 start_cpu_timer(cpu);
71499 #endif
71500 #ifdef CONFIG_PROC_FS
71501- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
71502- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
71503- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
71504- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
71505+ {
71506+ mode_t gr_mode = S_IRUGO;
71507+#ifdef CONFIG_GRKERNSEC_PROC_ADD
71508+ gr_mode = S_IRUSR;
71509+#endif
71510+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
71511+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
71512+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
71513+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
71514+#else
71515+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
71516+#endif
71517+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
71518+ }
71519 #endif
71520 return 0;
71521 }
71522diff -urNp linux-2.6.32.43/net/8021q/vlan.c linux-2.6.32.43/net/8021q/vlan.c
71523--- linux-2.6.32.43/net/8021q/vlan.c 2011-03-27 14:31:47.000000000 -0400
71524+++ linux-2.6.32.43/net/8021q/vlan.c 2011-04-17 15:56:46.000000000 -0400
71525@@ -622,8 +622,7 @@ static int vlan_ioctl_handler(struct net
71526 err = -EPERM;
71527 if (!capable(CAP_NET_ADMIN))
71528 break;
71529- if ((args.u.name_type >= 0) &&
71530- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
71531+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
71532 struct vlan_net *vn;
71533
71534 vn = net_generic(net, vlan_net_id);
71535diff -urNp linux-2.6.32.43/net/atm/atm_misc.c linux-2.6.32.43/net/atm/atm_misc.c
71536--- linux-2.6.32.43/net/atm/atm_misc.c 2011-03-27 14:31:47.000000000 -0400
71537+++ linux-2.6.32.43/net/atm/atm_misc.c 2011-04-17 15:56:46.000000000 -0400
71538@@ -19,7 +19,7 @@ int atm_charge(struct atm_vcc *vcc,int t
71539 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
71540 return 1;
71541 atm_return(vcc,truesize);
71542- atomic_inc(&vcc->stats->rx_drop);
71543+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71544 return 0;
71545 }
71546
71547@@ -41,7 +41,7 @@ struct sk_buff *atm_alloc_charge(struct
71548 }
71549 }
71550 atm_return(vcc,guess);
71551- atomic_inc(&vcc->stats->rx_drop);
71552+ atomic_inc_unchecked(&vcc->stats->rx_drop);
71553 return NULL;
71554 }
71555
71556@@ -88,7 +88,7 @@ int atm_pcr_goal(const struct atm_trafpr
71557
71558 void sonet_copy_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71559 {
71560-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71561+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71562 __SONET_ITEMS
71563 #undef __HANDLE_ITEM
71564 }
71565@@ -96,7 +96,7 @@ void sonet_copy_stats(struct k_sonet_sta
71566
71567 void sonet_subtract_stats(struct k_sonet_stats *from,struct sonet_stats *to)
71568 {
71569-#define __HANDLE_ITEM(i) atomic_sub(to->i,&from->i)
71570+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
71571 __SONET_ITEMS
71572 #undef __HANDLE_ITEM
71573 }
71574diff -urNp linux-2.6.32.43/net/atm/lec.h linux-2.6.32.43/net/atm/lec.h
71575--- linux-2.6.32.43/net/atm/lec.h 2011-03-27 14:31:47.000000000 -0400
71576+++ linux-2.6.32.43/net/atm/lec.h 2011-08-05 20:33:55.000000000 -0400
71577@@ -48,7 +48,7 @@ struct lane2_ops {
71578 const u8 *tlvs, u32 sizeoftlvs);
71579 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
71580 const u8 *tlvs, u32 sizeoftlvs);
71581-};
71582+} __no_const;
71583
71584 /*
71585 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
71586diff -urNp linux-2.6.32.43/net/atm/mpc.c linux-2.6.32.43/net/atm/mpc.c
71587--- linux-2.6.32.43/net/atm/mpc.c 2011-03-27 14:31:47.000000000 -0400
71588+++ linux-2.6.32.43/net/atm/mpc.c 2011-08-05 20:33:55.000000000 -0400
71589@@ -291,8 +291,8 @@ static void start_mpc(struct mpoa_client
71590 printk("mpoa: (%s) start_mpc not starting\n", dev->name);
71591 else {
71592 mpc->old_ops = dev->netdev_ops;
71593- mpc->new_ops = *mpc->old_ops;
71594- mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71595+ memcpy((void *)&mpc->new_ops, mpc->old_ops, sizeof(mpc->new_ops));
71596+ *(void **)&mpc->new_ops.ndo_start_xmit = mpc_send_packet;
71597 dev->netdev_ops = &mpc->new_ops;
71598 }
71599 }
71600diff -urNp linux-2.6.32.43/net/atm/mpoa_caches.c linux-2.6.32.43/net/atm/mpoa_caches.c
71601--- linux-2.6.32.43/net/atm/mpoa_caches.c 2011-03-27 14:31:47.000000000 -0400
71602+++ linux-2.6.32.43/net/atm/mpoa_caches.c 2011-05-16 21:46:57.000000000 -0400
71603@@ -498,6 +498,8 @@ static void clear_expired(struct mpoa_cl
71604 struct timeval now;
71605 struct k_message msg;
71606
71607+ pax_track_stack();
71608+
71609 do_gettimeofday(&now);
71610
71611 write_lock_irq(&client->egress_lock);
71612diff -urNp linux-2.6.32.43/net/atm/proc.c linux-2.6.32.43/net/atm/proc.c
71613--- linux-2.6.32.43/net/atm/proc.c 2011-03-27 14:31:47.000000000 -0400
71614+++ linux-2.6.32.43/net/atm/proc.c 2011-04-17 15:56:46.000000000 -0400
71615@@ -43,9 +43,9 @@ static void add_stats(struct seq_file *s
71616 const struct k_atm_aal_stats *stats)
71617 {
71618 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
71619- atomic_read(&stats->tx),atomic_read(&stats->tx_err),
71620- atomic_read(&stats->rx),atomic_read(&stats->rx_err),
71621- atomic_read(&stats->rx_drop));
71622+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
71623+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
71624+ atomic_read_unchecked(&stats->rx_drop));
71625 }
71626
71627 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
71628@@ -188,7 +188,12 @@ static void vcc_info(struct seq_file *se
71629 {
71630 struct sock *sk = sk_atm(vcc);
71631
71632+#ifdef CONFIG_GRKERNSEC_HIDESYM
71633+ seq_printf(seq, "%p ", NULL);
71634+#else
71635 seq_printf(seq, "%p ", vcc);
71636+#endif
71637+
71638 if (!vcc->dev)
71639 seq_printf(seq, "Unassigned ");
71640 else
71641@@ -214,7 +219,11 @@ static void svc_info(struct seq_file *se
71642 {
71643 if (!vcc->dev)
71644 seq_printf(seq, sizeof(void *) == 4 ?
71645+#ifdef CONFIG_GRKERNSEC_HIDESYM
71646+ "N/A@%p%10s" : "N/A@%p%2s", NULL, "");
71647+#else
71648 "N/A@%p%10s" : "N/A@%p%2s", vcc, "");
71649+#endif
71650 else
71651 seq_printf(seq, "%3d %3d %5d ",
71652 vcc->dev->number, vcc->vpi, vcc->vci);
71653diff -urNp linux-2.6.32.43/net/atm/resources.c linux-2.6.32.43/net/atm/resources.c
71654--- linux-2.6.32.43/net/atm/resources.c 2011-03-27 14:31:47.000000000 -0400
71655+++ linux-2.6.32.43/net/atm/resources.c 2011-04-17 15:56:46.000000000 -0400
71656@@ -161,7 +161,7 @@ void atm_dev_deregister(struct atm_dev *
71657 static void copy_aal_stats(struct k_atm_aal_stats *from,
71658 struct atm_aal_stats *to)
71659 {
71660-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
71661+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
71662 __AAL_STAT_ITEMS
71663 #undef __HANDLE_ITEM
71664 }
71665@@ -170,7 +170,7 @@ static void copy_aal_stats(struct k_atm_
71666 static void subtract_aal_stats(struct k_atm_aal_stats *from,
71667 struct atm_aal_stats *to)
71668 {
71669-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
71670+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
71671 __AAL_STAT_ITEMS
71672 #undef __HANDLE_ITEM
71673 }
71674diff -urNp linux-2.6.32.43/net/bluetooth/l2cap.c linux-2.6.32.43/net/bluetooth/l2cap.c
71675--- linux-2.6.32.43/net/bluetooth/l2cap.c 2011-03-27 14:31:47.000000000 -0400
71676+++ linux-2.6.32.43/net/bluetooth/l2cap.c 2011-06-25 14:36:21.000000000 -0400
71677@@ -1885,7 +1885,7 @@ static int l2cap_sock_getsockopt_old(str
71678 err = -ENOTCONN;
71679 break;
71680 }
71681-
71682+ memset(&cinfo, 0, sizeof(cinfo));
71683 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
71684 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
71685
71686@@ -2719,7 +2719,7 @@ static inline int l2cap_config_req(struc
71687
71688 /* Reject if config buffer is too small. */
71689 len = cmd_len - sizeof(*req);
71690- if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71691+ if (len < 0 || l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
71692 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
71693 l2cap_build_conf_rsp(sk, rsp,
71694 L2CAP_CONF_REJECT, flags), rsp);
71695diff -urNp linux-2.6.32.43/net/bluetooth/rfcomm/sock.c linux-2.6.32.43/net/bluetooth/rfcomm/sock.c
71696--- linux-2.6.32.43/net/bluetooth/rfcomm/sock.c 2011-03-27 14:31:47.000000000 -0400
71697+++ linux-2.6.32.43/net/bluetooth/rfcomm/sock.c 2011-06-12 06:35:00.000000000 -0400
71698@@ -878,6 +878,7 @@ static int rfcomm_sock_getsockopt_old(st
71699
71700 l2cap_sk = rfcomm_pi(sk)->dlc->session->sock->sk;
71701
71702+ memset(&cinfo, 0, sizeof(cinfo));
71703 cinfo.hci_handle = l2cap_pi(l2cap_sk)->conn->hcon->handle;
71704 memcpy(cinfo.dev_class, l2cap_pi(l2cap_sk)->conn->hcon->dev_class, 3);
71705
71706diff -urNp linux-2.6.32.43/net/bridge/br_private.h linux-2.6.32.43/net/bridge/br_private.h
71707--- linux-2.6.32.43/net/bridge/br_private.h 2011-03-27 14:31:47.000000000 -0400
71708+++ linux-2.6.32.43/net/bridge/br_private.h 2011-04-17 15:56:46.000000000 -0400
71709@@ -254,7 +254,7 @@ extern void br_ifinfo_notify(int event,
71710
71711 #ifdef CONFIG_SYSFS
71712 /* br_sysfs_if.c */
71713-extern struct sysfs_ops brport_sysfs_ops;
71714+extern const struct sysfs_ops brport_sysfs_ops;
71715 extern int br_sysfs_addif(struct net_bridge_port *p);
71716
71717 /* br_sysfs_br.c */
71718diff -urNp linux-2.6.32.43/net/bridge/br_stp_if.c linux-2.6.32.43/net/bridge/br_stp_if.c
71719--- linux-2.6.32.43/net/bridge/br_stp_if.c 2011-03-27 14:31:47.000000000 -0400
71720+++ linux-2.6.32.43/net/bridge/br_stp_if.c 2011-04-17 15:56:46.000000000 -0400
71721@@ -146,7 +146,7 @@ static void br_stp_stop(struct net_bridg
71722 char *envp[] = { NULL };
71723
71724 if (br->stp_enabled == BR_USER_STP) {
71725- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1);
71726+ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
71727 printk(KERN_INFO "%s: userspace STP stopped, return code %d\n",
71728 br->dev->name, r);
71729
71730diff -urNp linux-2.6.32.43/net/bridge/br_sysfs_if.c linux-2.6.32.43/net/bridge/br_sysfs_if.c
71731--- linux-2.6.32.43/net/bridge/br_sysfs_if.c 2011-03-27 14:31:47.000000000 -0400
71732+++ linux-2.6.32.43/net/bridge/br_sysfs_if.c 2011-04-17 15:56:46.000000000 -0400
71733@@ -220,7 +220,7 @@ static ssize_t brport_store(struct kobje
71734 return ret;
71735 }
71736
71737-struct sysfs_ops brport_sysfs_ops = {
71738+const struct sysfs_ops brport_sysfs_ops = {
71739 .show = brport_show,
71740 .store = brport_store,
71741 };
71742diff -urNp linux-2.6.32.43/net/bridge/netfilter/ebtables.c linux-2.6.32.43/net/bridge/netfilter/ebtables.c
71743--- linux-2.6.32.43/net/bridge/netfilter/ebtables.c 2011-04-17 17:00:52.000000000 -0400
71744+++ linux-2.6.32.43/net/bridge/netfilter/ebtables.c 2011-05-16 21:46:57.000000000 -0400
71745@@ -1337,6 +1337,8 @@ static int copy_everything_to_user(struc
71746 unsigned int entries_size, nentries;
71747 char *entries;
71748
71749+ pax_track_stack();
71750+
71751 if (cmd == EBT_SO_GET_ENTRIES) {
71752 entries_size = t->private->entries_size;
71753 nentries = t->private->nentries;
71754diff -urNp linux-2.6.32.43/net/can/bcm.c linux-2.6.32.43/net/can/bcm.c
71755--- linux-2.6.32.43/net/can/bcm.c 2011-05-10 22:12:01.000000000 -0400
71756+++ linux-2.6.32.43/net/can/bcm.c 2011-05-10 22:12:34.000000000 -0400
71757@@ -164,9 +164,15 @@ static int bcm_proc_show(struct seq_file
71758 struct bcm_sock *bo = bcm_sk(sk);
71759 struct bcm_op *op;
71760
71761+#ifdef CONFIG_GRKERNSEC_HIDESYM
71762+ seq_printf(m, ">>> socket %p", NULL);
71763+ seq_printf(m, " / sk %p", NULL);
71764+ seq_printf(m, " / bo %p", NULL);
71765+#else
71766 seq_printf(m, ">>> socket %p", sk->sk_socket);
71767 seq_printf(m, " / sk %p", sk);
71768 seq_printf(m, " / bo %p", bo);
71769+#endif
71770 seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs);
71771 seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex));
71772 seq_printf(m, " <<<\n");
71773diff -urNp linux-2.6.32.43/net/core/dev.c linux-2.6.32.43/net/core/dev.c
71774--- linux-2.6.32.43/net/core/dev.c 2011-04-17 17:00:52.000000000 -0400
71775+++ linux-2.6.32.43/net/core/dev.c 2011-08-05 20:33:55.000000000 -0400
71776@@ -1047,10 +1047,14 @@ void dev_load(struct net *net, const cha
71777 if (no_module && capable(CAP_NET_ADMIN))
71778 no_module = request_module("netdev-%s", name);
71779 if (no_module && capable(CAP_SYS_MODULE)) {
71780+#ifdef CONFIG_GRKERNSEC_MODHARDEN
71781+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
71782+#else
71783 if (!request_module("%s", name))
71784 pr_err("Loading kernel module for a network device "
71785 "with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s "
71786 "instead\n", name);
71787+#endif
71788 }
71789 }
71790 EXPORT_SYMBOL(dev_load);
71791@@ -1654,7 +1658,7 @@ static inline int illegal_highdma(struct
71792
71793 struct dev_gso_cb {
71794 void (*destructor)(struct sk_buff *skb);
71795-};
71796+} __no_const;
71797
71798 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
71799
71800@@ -2063,7 +2067,7 @@ int netif_rx_ni(struct sk_buff *skb)
71801 }
71802 EXPORT_SYMBOL(netif_rx_ni);
71803
71804-static void net_tx_action(struct softirq_action *h)
71805+static void net_tx_action(void)
71806 {
71807 struct softnet_data *sd = &__get_cpu_var(softnet_data);
71808
71809@@ -2826,7 +2830,7 @@ void netif_napi_del(struct napi_struct *
71810 EXPORT_SYMBOL(netif_napi_del);
71811
71812
71813-static void net_rx_action(struct softirq_action *h)
71814+static void net_rx_action(void)
71815 {
71816 struct list_head *list = &__get_cpu_var(softnet_data).poll_list;
71817 unsigned long time_limit = jiffies + 2;
71818diff -urNp linux-2.6.32.43/net/core/flow.c linux-2.6.32.43/net/core/flow.c
71819--- linux-2.6.32.43/net/core/flow.c 2011-03-27 14:31:47.000000000 -0400
71820+++ linux-2.6.32.43/net/core/flow.c 2011-05-04 17:56:20.000000000 -0400
71821@@ -35,11 +35,11 @@ struct flow_cache_entry {
71822 atomic_t *object_ref;
71823 };
71824
71825-atomic_t flow_cache_genid = ATOMIC_INIT(0);
71826+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
71827
71828 static u32 flow_hash_shift;
71829 #define flow_hash_size (1 << flow_hash_shift)
71830-static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
71831+static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables);
71832
71833 #define flow_table(cpu) (per_cpu(flow_tables, cpu))
71834
71835@@ -52,7 +52,7 @@ struct flow_percpu_info {
71836 u32 hash_rnd;
71837 int count;
71838 };
71839-static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info) = { 0 };
71840+static DEFINE_PER_CPU(struct flow_percpu_info, flow_hash_info);
71841
71842 #define flow_hash_rnd_recalc(cpu) \
71843 (per_cpu(flow_hash_info, cpu).hash_rnd_recalc)
71844@@ -69,7 +69,7 @@ struct flow_flush_info {
71845 atomic_t cpuleft;
71846 struct completion completion;
71847 };
71848-static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets) = { NULL };
71849+static DEFINE_PER_CPU(struct tasklet_struct, flow_flush_tasklets);
71850
71851 #define flow_flush_tasklet(cpu) (&per_cpu(flow_flush_tasklets, cpu))
71852
71853@@ -190,7 +190,7 @@ void *flow_cache_lookup(struct net *net,
71854 if (fle->family == family &&
71855 fle->dir == dir &&
71856 flow_key_compare(key, &fle->key) == 0) {
71857- if (fle->genid == atomic_read(&flow_cache_genid)) {
71858+ if (fle->genid == atomic_read_unchecked(&flow_cache_genid)) {
71859 void *ret = fle->object;
71860
71861 if (ret)
71862@@ -228,7 +228,7 @@ nocache:
71863 err = resolver(net, key, family, dir, &obj, &obj_ref);
71864
71865 if (fle && !err) {
71866- fle->genid = atomic_read(&flow_cache_genid);
71867+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
71868
71869 if (fle->object)
71870 atomic_dec(fle->object_ref);
71871@@ -258,7 +258,7 @@ static void flow_cache_flush_tasklet(uns
71872
71873 fle = flow_table(cpu)[i];
71874 for (; fle; fle = fle->next) {
71875- unsigned genid = atomic_read(&flow_cache_genid);
71876+ unsigned genid = atomic_read_unchecked(&flow_cache_genid);
71877
71878 if (!fle->object || fle->genid == genid)
71879 continue;
71880diff -urNp linux-2.6.32.43/net/core/Makefile linux-2.6.32.43/net/core/Makefile
71881--- linux-2.6.32.43/net/core/Makefile 2011-03-27 14:31:47.000000000 -0400
71882+++ linux-2.6.32.43/net/core/Makefile 2011-08-07 19:48:09.000000000 -0400
71883@@ -3,7 +3,7 @@
71884 #
71885
71886 obj-y := sock.o request_sock.o skbuff.o iovec.o datagram.o stream.o scm.o \
71887- gen_stats.o gen_estimator.o net_namespace.o
71888+ gen_stats.o gen_estimator.o net_namespace.o secure_seq.o
71889
71890 obj-$(CONFIG_SYSCTL) += sysctl_net_core.o
71891 obj-$(CONFIG_HAS_DMA) += skb_dma_map.o
71892diff -urNp linux-2.6.32.43/net/core/rtnetlink.c linux-2.6.32.43/net/core/rtnetlink.c
71893--- linux-2.6.32.43/net/core/rtnetlink.c 2011-03-27 14:31:47.000000000 -0400
71894+++ linux-2.6.32.43/net/core/rtnetlink.c 2011-08-05 20:33:55.000000000 -0400
71895@@ -57,7 +57,7 @@ struct rtnl_link
71896 {
71897 rtnl_doit_func doit;
71898 rtnl_dumpit_func dumpit;
71899-};
71900+} __no_const;
71901
71902 static DEFINE_MUTEX(rtnl_mutex);
71903
71904diff -urNp linux-2.6.32.43/net/core/secure_seq.c linux-2.6.32.43/net/core/secure_seq.c
71905--- linux-2.6.32.43/net/core/secure_seq.c 1969-12-31 19:00:00.000000000 -0500
71906+++ linux-2.6.32.43/net/core/secure_seq.c 2011-08-07 19:48:09.000000000 -0400
71907@@ -0,0 +1,183 @@
71908+#include <linux/kernel.h>
71909+#include <linux/init.h>
71910+#include <linux/cryptohash.h>
71911+#include <linux/module.h>
71912+#include <linux/cache.h>
71913+#include <linux/random.h>
71914+#include <linux/hrtimer.h>
71915+#include <linux/ktime.h>
71916+#include <linux/string.h>
71917+
71918+#include <net/secure_seq.h>
71919+
71920+static u32 net_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned;
71921+
71922+static int __init net_secret_init(void)
71923+{
71924+ get_random_bytes(net_secret, sizeof(net_secret));
71925+ return 0;
71926+}
71927+late_initcall(net_secret_init);
71928+
71929+static u32 seq_scale(u32 seq)
71930+{
71931+ /*
71932+ * As close as possible to RFC 793, which
71933+ * suggests using a 250 kHz clock.
71934+ * Further reading shows this assumes 2 Mb/s networks.
71935+ * For 10 Mb/s Ethernet, a 1 MHz clock is appropriate.
71936+ * For 10 Gb/s Ethernet, a 1 GHz clock should be ok, but
71937+ * we also need to limit the resolution so that the u32 seq
71938+ * overlaps less than one time per MSL (2 minutes).
71939+ * Choosing a clock of 64 ns period is OK. (period of 274 s)
71940+ */
71941+ return seq + (ktime_to_ns(ktime_get_real()) >> 6);
71942+}
71943+
71944+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
71945+__u32 secure_tcpv6_sequence_number(__be32 *saddr, __be32 *daddr,
71946+ __be16 sport, __be16 dport)
71947+{
71948+ u32 secret[MD5_MESSAGE_BYTES / 4];
71949+ u32 hash[MD5_DIGEST_WORDS];
71950+ u32 i;
71951+
71952+ memcpy(hash, saddr, 16);
71953+ for (i = 0; i < 4; i++)
71954+ secret[i] = net_secret[i] + daddr[i];
71955+ secret[4] = net_secret[4] +
71956+ (((__force u16)sport << 16) + (__force u16)dport);
71957+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
71958+ secret[i] = net_secret[i];
71959+
71960+ md5_transform(hash, secret);
71961+
71962+ return seq_scale(hash[0]);
71963+}
71964+EXPORT_SYMBOL(secure_tcpv6_sequence_number);
71965+
71966+u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
71967+ __be16 dport)
71968+{
71969+ u32 secret[MD5_MESSAGE_BYTES / 4];
71970+ u32 hash[MD5_DIGEST_WORDS];
71971+ u32 i;
71972+
71973+ memcpy(hash, saddr, 16);
71974+ for (i = 0; i < 4; i++)
71975+ secret[i] = net_secret[i] + (__force u32) daddr[i];
71976+ secret[4] = net_secret[4] + (__force u32)dport;
71977+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
71978+ secret[i] = net_secret[i];
71979+
71980+ md5_transform(hash, secret);
71981+ return hash[0];
71982+}
71983+#endif
71984+
71985+#ifdef CONFIG_INET
71986+__u32 secure_ip_id(__be32 daddr)
71987+{
71988+ u32 hash[MD5_DIGEST_WORDS];
71989+
71990+ hash[0] = (__force __u32) daddr;
71991+ hash[1] = net_secret[13];
71992+ hash[2] = net_secret[14];
71993+ hash[3] = net_secret[15];
71994+
71995+ md5_transform(hash, net_secret);
71996+
71997+ return hash[0];
71998+}
71999+
72000+__u32 secure_ipv6_id(const __be32 daddr[4])
72001+{
72002+ __u32 hash[4];
72003+
72004+ memcpy(hash, daddr, 16);
72005+ md5_transform(hash, net_secret);
72006+
72007+ return hash[0];
72008+}
72009+
72010+__u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
72011+ __be16 sport, __be16 dport)
72012+{
72013+ u32 hash[MD5_DIGEST_WORDS];
72014+
72015+ hash[0] = (__force u32)saddr;
72016+ hash[1] = (__force u32)daddr;
72017+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
72018+ hash[3] = net_secret[15];
72019+
72020+ md5_transform(hash, net_secret);
72021+
72022+ return seq_scale(hash[0]);
72023+}
72024+
72025+u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport)
72026+{
72027+ u32 hash[MD5_DIGEST_WORDS];
72028+
72029+ hash[0] = (__force u32)saddr;
72030+ hash[1] = (__force u32)daddr;
72031+ hash[2] = (__force u32)dport ^ net_secret[14];
72032+ hash[3] = net_secret[15];
72033+
72034+ md5_transform(hash, net_secret);
72035+
72036+ return hash[0];
72037+}
72038+EXPORT_SYMBOL_GPL(secure_ipv4_port_ephemeral);
72039+#endif
72040+
72041+#if defined(CONFIG_IP_DCCP) || defined(CONFIG_IP_DCCP_MODULE)
72042+u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
72043+ __be16 sport, __be16 dport)
72044+{
72045+ u32 hash[MD5_DIGEST_WORDS];
72046+ u64 seq;
72047+
72048+ hash[0] = (__force u32)saddr;
72049+ hash[1] = (__force u32)daddr;
72050+ hash[2] = ((__force u16)sport << 16) + (__force u16)dport;
72051+ hash[3] = net_secret[15];
72052+
72053+ md5_transform(hash, net_secret);
72054+
72055+ seq = hash[0] | (((u64)hash[1]) << 32);
72056+ seq += ktime_to_ns(ktime_get_real());
72057+ seq &= (1ull << 48) - 1;
72058+
72059+ return seq;
72060+}
72061+EXPORT_SYMBOL(secure_dccp_sequence_number);
72062+
72063+#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
72064+u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
72065+ __be16 sport, __be16 dport)
72066+{
72067+ u32 secret[MD5_MESSAGE_BYTES / 4];
72068+ u32 hash[MD5_DIGEST_WORDS];
72069+ u64 seq;
72070+ u32 i;
72071+
72072+ memcpy(hash, saddr, 16);
72073+ for (i = 0; i < 4; i++)
72074+ secret[i] = net_secret[i] + daddr[i];
72075+ secret[4] = net_secret[4] +
72076+ (((__force u16)sport << 16) + (__force u16)dport);
72077+ for (i = 5; i < MD5_MESSAGE_BYTES / 4; i++)
72078+ secret[i] = net_secret[i];
72079+
72080+ md5_transform(hash, secret);
72081+
72082+ seq = hash[0] | (((u64)hash[1]) << 32);
72083+ seq += ktime_to_ns(ktime_get_real());
72084+ seq &= (1ull << 48) - 1;
72085+
72086+ return seq;
72087+}
72088+EXPORT_SYMBOL(secure_dccpv6_sequence_number);
72089+#endif
72090+#endif
72091diff -urNp linux-2.6.32.43/net/core/skbuff.c linux-2.6.32.43/net/core/skbuff.c
72092--- linux-2.6.32.43/net/core/skbuff.c 2011-03-27 14:31:47.000000000 -0400
72093+++ linux-2.6.32.43/net/core/skbuff.c 2011-05-16 21:46:57.000000000 -0400
72094@@ -1544,6 +1544,8 @@ int skb_splice_bits(struct sk_buff *skb,
72095 struct sk_buff *frag_iter;
72096 struct sock *sk = skb->sk;
72097
72098+ pax_track_stack();
72099+
72100 /*
72101 * __skb_splice_bits() only fails if the output has no room left,
72102 * so no point in going over the frag_list for the error case.
72103diff -urNp linux-2.6.32.43/net/core/sock.c linux-2.6.32.43/net/core/sock.c
72104--- linux-2.6.32.43/net/core/sock.c 2011-03-27 14:31:47.000000000 -0400
72105+++ linux-2.6.32.43/net/core/sock.c 2011-05-04 17:56:20.000000000 -0400
72106@@ -864,11 +864,15 @@ int sock_getsockopt(struct socket *sock,
72107 break;
72108
72109 case SO_PEERCRED:
72110+ {
72111+ struct ucred peercred;
72112 if (len > sizeof(sk->sk_peercred))
72113 len = sizeof(sk->sk_peercred);
72114- if (copy_to_user(optval, &sk->sk_peercred, len))
72115+ peercred = sk->sk_peercred;
72116+ if (copy_to_user(optval, &peercred, len))
72117 return -EFAULT;
72118 goto lenout;
72119+ }
72120
72121 case SO_PEERNAME:
72122 {
72123@@ -1892,7 +1896,7 @@ void sock_init_data(struct socket *sock,
72124 */
72125 smp_wmb();
72126 atomic_set(&sk->sk_refcnt, 1);
72127- atomic_set(&sk->sk_drops, 0);
72128+ atomic_set_unchecked(&sk->sk_drops, 0);
72129 }
72130 EXPORT_SYMBOL(sock_init_data);
72131
72132diff -urNp linux-2.6.32.43/net/dccp/ipv4.c linux-2.6.32.43/net/dccp/ipv4.c
72133--- linux-2.6.32.43/net/dccp/ipv4.c 2011-03-27 14:31:47.000000000 -0400
72134+++ linux-2.6.32.43/net/dccp/ipv4.c 2011-08-07 19:48:09.000000000 -0400
72135@@ -25,6 +25,7 @@
72136 #include <net/timewait_sock.h>
72137 #include <net/tcp_states.h>
72138 #include <net/xfrm.h>
72139+#include <net/secure_seq.h>
72140
72141 #include "ackvec.h"
72142 #include "ccid.h"
72143diff -urNp linux-2.6.32.43/net/dccp/ipv6.c linux-2.6.32.43/net/dccp/ipv6.c
72144--- linux-2.6.32.43/net/dccp/ipv6.c 2011-03-27 14:31:47.000000000 -0400
72145+++ linux-2.6.32.43/net/dccp/ipv6.c 2011-08-07 19:48:09.000000000 -0400
72146@@ -28,6 +28,7 @@
72147 #include <net/transp_v6.h>
72148 #include <net/ip6_checksum.h>
72149 #include <net/xfrm.h>
72150+#include <net/secure_seq.h>
72151
72152 #include "dccp.h"
72153 #include "ipv6.h"
72154@@ -69,13 +70,7 @@ static inline void dccp_v6_send_check(st
72155 dh->dccph_checksum = dccp_v6_csum_finish(skb, &np->saddr, &np->daddr);
72156 }
72157
72158-static inline __u32 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
72159- __be16 sport, __be16 dport )
72160-{
72161- return secure_tcpv6_sequence_number(saddr, daddr, sport, dport);
72162-}
72163-
72164-static inline __u32 dccp_v6_init_sequence(struct sk_buff *skb)
72165+static inline __u64 dccp_v6_init_sequence(struct sk_buff *skb)
72166 {
72167 return secure_dccpv6_sequence_number(ipv6_hdr(skb)->daddr.s6_addr32,
72168 ipv6_hdr(skb)->saddr.s6_addr32,
72169diff -urNp linux-2.6.32.43/net/decnet/sysctl_net_decnet.c linux-2.6.32.43/net/decnet/sysctl_net_decnet.c
72170--- linux-2.6.32.43/net/decnet/sysctl_net_decnet.c 2011-03-27 14:31:47.000000000 -0400
72171+++ linux-2.6.32.43/net/decnet/sysctl_net_decnet.c 2011-04-17 15:56:46.000000000 -0400
72172@@ -206,7 +206,7 @@ static int dn_node_address_handler(ctl_t
72173
72174 if (len > *lenp) len = *lenp;
72175
72176- if (copy_to_user(buffer, addr, len))
72177+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
72178 return -EFAULT;
72179
72180 *lenp = len;
72181@@ -327,7 +327,7 @@ static int dn_def_dev_handler(ctl_table
72182
72183 if (len > *lenp) len = *lenp;
72184
72185- if (copy_to_user(buffer, devname, len))
72186+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
72187 return -EFAULT;
72188
72189 *lenp = len;
72190diff -urNp linux-2.6.32.43/net/econet/Kconfig linux-2.6.32.43/net/econet/Kconfig
72191--- linux-2.6.32.43/net/econet/Kconfig 2011-03-27 14:31:47.000000000 -0400
72192+++ linux-2.6.32.43/net/econet/Kconfig 2011-04-17 15:56:46.000000000 -0400
72193@@ -4,7 +4,7 @@
72194
72195 config ECONET
72196 tristate "Acorn Econet/AUN protocols (EXPERIMENTAL)"
72197- depends on EXPERIMENTAL && INET
72198+ depends on EXPERIMENTAL && INET && BROKEN
72199 ---help---
72200 Econet is a fairly old and slow networking protocol mainly used by
72201 Acorn computers to access file and print servers. It uses native
72202diff -urNp linux-2.6.32.43/net/ieee802154/dgram.c linux-2.6.32.43/net/ieee802154/dgram.c
72203--- linux-2.6.32.43/net/ieee802154/dgram.c 2011-03-27 14:31:47.000000000 -0400
72204+++ linux-2.6.32.43/net/ieee802154/dgram.c 2011-05-04 17:56:28.000000000 -0400
72205@@ -318,7 +318,7 @@ out:
72206 static int dgram_rcv_skb(struct sock *sk, struct sk_buff *skb)
72207 {
72208 if (sock_queue_rcv_skb(sk, skb) < 0) {
72209- atomic_inc(&sk->sk_drops);
72210+ atomic_inc_unchecked(&sk->sk_drops);
72211 kfree_skb(skb);
72212 return NET_RX_DROP;
72213 }
72214diff -urNp linux-2.6.32.43/net/ieee802154/raw.c linux-2.6.32.43/net/ieee802154/raw.c
72215--- linux-2.6.32.43/net/ieee802154/raw.c 2011-03-27 14:31:47.000000000 -0400
72216+++ linux-2.6.32.43/net/ieee802154/raw.c 2011-05-04 17:56:28.000000000 -0400
72217@@ -206,7 +206,7 @@ out:
72218 static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
72219 {
72220 if (sock_queue_rcv_skb(sk, skb) < 0) {
72221- atomic_inc(&sk->sk_drops);
72222+ atomic_inc_unchecked(&sk->sk_drops);
72223 kfree_skb(skb);
72224 return NET_RX_DROP;
72225 }
72226diff -urNp linux-2.6.32.43/net/ipv4/inet_diag.c linux-2.6.32.43/net/ipv4/inet_diag.c
72227--- linux-2.6.32.43/net/ipv4/inet_diag.c 2011-07-13 17:23:04.000000000 -0400
72228+++ linux-2.6.32.43/net/ipv4/inet_diag.c 2011-06-20 19:31:13.000000000 -0400
72229@@ -113,8 +113,13 @@ static int inet_csk_diag_fill(struct soc
72230 r->idiag_retrans = 0;
72231
72232 r->id.idiag_if = sk->sk_bound_dev_if;
72233+#ifdef CONFIG_GRKERNSEC_HIDESYM
72234+ r->id.idiag_cookie[0] = 0;
72235+ r->id.idiag_cookie[1] = 0;
72236+#else
72237 r->id.idiag_cookie[0] = (u32)(unsigned long)sk;
72238 r->id.idiag_cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
72239+#endif
72240
72241 r->id.idiag_sport = inet->sport;
72242 r->id.idiag_dport = inet->dport;
72243@@ -200,8 +205,15 @@ static int inet_twsk_diag_fill(struct in
72244 r->idiag_family = tw->tw_family;
72245 r->idiag_retrans = 0;
72246 r->id.idiag_if = tw->tw_bound_dev_if;
72247+
72248+#ifdef CONFIG_GRKERNSEC_HIDESYM
72249+ r->id.idiag_cookie[0] = 0;
72250+ r->id.idiag_cookie[1] = 0;
72251+#else
72252 r->id.idiag_cookie[0] = (u32)(unsigned long)tw;
72253 r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1);
72254+#endif
72255+
72256 r->id.idiag_sport = tw->tw_sport;
72257 r->id.idiag_dport = tw->tw_dport;
72258 r->id.idiag_src[0] = tw->tw_rcv_saddr;
72259@@ -284,12 +296,14 @@ static int inet_diag_get_exact(struct sk
72260 if (sk == NULL)
72261 goto unlock;
72262
72263+#ifndef CONFIG_GRKERNSEC_HIDESYM
72264 err = -ESTALE;
72265 if ((req->id.idiag_cookie[0] != INET_DIAG_NOCOOKIE ||
72266 req->id.idiag_cookie[1] != INET_DIAG_NOCOOKIE) &&
72267 ((u32)(unsigned long)sk != req->id.idiag_cookie[0] ||
72268 (u32)((((unsigned long)sk) >> 31) >> 1) != req->id.idiag_cookie[1]))
72269 goto out;
72270+#endif
72271
72272 err = -ENOMEM;
72273 rep = alloc_skb(NLMSG_SPACE((sizeof(struct inet_diag_msg) +
72274@@ -579,8 +593,14 @@ static int inet_diag_fill_req(struct sk_
72275 r->idiag_retrans = req->retrans;
72276
72277 r->id.idiag_if = sk->sk_bound_dev_if;
72278+
72279+#ifdef CONFIG_GRKERNSEC_HIDESYM
72280+ r->id.idiag_cookie[0] = 0;
72281+ r->id.idiag_cookie[1] = 0;
72282+#else
72283 r->id.idiag_cookie[0] = (u32)(unsigned long)req;
72284 r->id.idiag_cookie[1] = (u32)(((unsigned long)req >> 31) >> 1);
72285+#endif
72286
72287 tmo = req->expires - jiffies;
72288 if (tmo < 0)
72289diff -urNp linux-2.6.32.43/net/ipv4/inet_hashtables.c linux-2.6.32.43/net/ipv4/inet_hashtables.c
72290--- linux-2.6.32.43/net/ipv4/inet_hashtables.c 2011-03-27 14:31:47.000000000 -0400
72291+++ linux-2.6.32.43/net/ipv4/inet_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72292@@ -18,11 +18,15 @@
72293 #include <linux/sched.h>
72294 #include <linux/slab.h>
72295 #include <linux/wait.h>
72296+#include <linux/security.h>
72297
72298 #include <net/inet_connection_sock.h>
72299 #include <net/inet_hashtables.h>
72300+#include <net/secure_seq.h>
72301 #include <net/ip.h>
72302
72303+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
72304+
72305 /*
72306 * Allocate and initialize a new local port bind bucket.
72307 * The bindhash mutex for snum's hash chain must be held here.
72308@@ -490,6 +494,8 @@ ok:
72309 }
72310 spin_unlock(&head->lock);
72311
72312+ gr_update_task_in_ip_table(current, inet_sk(sk));
72313+
72314 if (tw) {
72315 inet_twsk_deschedule(tw, death_row);
72316 inet_twsk_put(tw);
72317diff -urNp linux-2.6.32.43/net/ipv4/inetpeer.c linux-2.6.32.43/net/ipv4/inetpeer.c
72318--- linux-2.6.32.43/net/ipv4/inetpeer.c 2011-03-27 14:31:47.000000000 -0400
72319+++ linux-2.6.32.43/net/ipv4/inetpeer.c 2011-08-07 19:48:09.000000000 -0400
72320@@ -19,6 +19,7 @@
72321 #include <linux/net.h>
72322 #include <net/ip.h>
72323 #include <net/inetpeer.h>
72324+#include <net/secure_seq.h>
72325
72326 /*
72327 * Theory of operations.
72328@@ -366,6 +367,8 @@ struct inet_peer *inet_getpeer(__be32 da
72329 struct inet_peer *p, *n;
72330 struct inet_peer **stack[PEER_MAXDEPTH], ***stackptr;
72331
72332+ pax_track_stack();
72333+
72334 /* Look up for the address quickly. */
72335 read_lock_bh(&peer_pool_lock);
72336 p = lookup(daddr, NULL);
72337@@ -389,7 +392,7 @@ struct inet_peer *inet_getpeer(__be32 da
72338 return NULL;
72339 n->v4daddr = daddr;
72340 atomic_set(&n->refcnt, 1);
72341- atomic_set(&n->rid, 0);
72342+ atomic_set_unchecked(&n->rid, 0);
72343 n->ip_id_count = secure_ip_id(daddr);
72344 n->tcp_ts_stamp = 0;
72345
72346diff -urNp linux-2.6.32.43/net/ipv4/ip_fragment.c linux-2.6.32.43/net/ipv4/ip_fragment.c
72347--- linux-2.6.32.43/net/ipv4/ip_fragment.c 2011-03-27 14:31:47.000000000 -0400
72348+++ linux-2.6.32.43/net/ipv4/ip_fragment.c 2011-04-17 15:56:46.000000000 -0400
72349@@ -255,7 +255,7 @@ static inline int ip_frag_too_far(struct
72350 return 0;
72351
72352 start = qp->rid;
72353- end = atomic_inc_return(&peer->rid);
72354+ end = atomic_inc_return_unchecked(&peer->rid);
72355 qp->rid = end;
72356
72357 rc = qp->q.fragments && (end - start) > max;
72358diff -urNp linux-2.6.32.43/net/ipv4/ip_sockglue.c linux-2.6.32.43/net/ipv4/ip_sockglue.c
72359--- linux-2.6.32.43/net/ipv4/ip_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72360+++ linux-2.6.32.43/net/ipv4/ip_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72361@@ -1015,6 +1015,8 @@ static int do_ip_getsockopt(struct sock
72362 int val;
72363 int len;
72364
72365+ pax_track_stack();
72366+
72367 if (level != SOL_IP)
72368 return -EOPNOTSUPP;
72369
72370diff -urNp linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c
72371--- linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:00:52.000000000 -0400
72372+++ linux-2.6.32.43/net/ipv4/netfilter/arp_tables.c 2011-04-17 17:04:18.000000000 -0400
72373@@ -934,6 +934,7 @@ static int get_info(struct net *net, voi
72374 private = &tmp;
72375 }
72376 #endif
72377+ memset(&info, 0, sizeof(info));
72378 info.valid_hooks = t->valid_hooks;
72379 memcpy(info.hook_entry, private->hook_entry,
72380 sizeof(info.hook_entry));
72381diff -urNp linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c
72382--- linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:00:52.000000000 -0400
72383+++ linux-2.6.32.43/net/ipv4/netfilter/ip_tables.c 2011-04-17 17:04:18.000000000 -0400
72384@@ -1141,6 +1141,7 @@ static int get_info(struct net *net, voi
72385 private = &tmp;
72386 }
72387 #endif
72388+ memset(&info, 0, sizeof(info));
72389 info.valid_hooks = t->valid_hooks;
72390 memcpy(info.hook_entry, private->hook_entry,
72391 sizeof(info.hook_entry));
72392diff -urNp linux-2.6.32.43/net/ipv4/netfilter/nf_nat_proto_common.c linux-2.6.32.43/net/ipv4/netfilter/nf_nat_proto_common.c
72393--- linux-2.6.32.43/net/ipv4/netfilter/nf_nat_proto_common.c 2011-03-27 14:31:47.000000000 -0400
72394+++ linux-2.6.32.43/net/ipv4/netfilter/nf_nat_proto_common.c 2011-08-07 19:48:09.000000000 -0400
72395@@ -12,6 +12,7 @@
72396 #include <linux/ip.h>
72397
72398 #include <linux/netfilter.h>
72399+#include <net/secure_seq.h>
72400 #include <net/netfilter/nf_nat.h>
72401 #include <net/netfilter/nf_nat_core.h>
72402 #include <net/netfilter/nf_nat_rule.h>
72403diff -urNp linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c
72404--- linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-03-27 14:31:47.000000000 -0400
72405+++ linux-2.6.32.43/net/ipv4/netfilter/nf_nat_snmp_basic.c 2011-04-17 15:56:46.000000000 -0400
72406@@ -397,7 +397,7 @@ static unsigned char asn1_octets_decode(
72407
72408 *len = 0;
72409
72410- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC);
72411+ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC);
72412 if (*octets == NULL) {
72413 if (net_ratelimit())
72414 printk("OOM in bsalg (%d)\n", __LINE__);
72415diff -urNp linux-2.6.32.43/net/ipv4/raw.c linux-2.6.32.43/net/ipv4/raw.c
72416--- linux-2.6.32.43/net/ipv4/raw.c 2011-03-27 14:31:47.000000000 -0400
72417+++ linux-2.6.32.43/net/ipv4/raw.c 2011-05-04 17:59:08.000000000 -0400
72418@@ -292,7 +292,7 @@ static int raw_rcv_skb(struct sock * sk,
72419 /* Charge it to the socket. */
72420
72421 if (sock_queue_rcv_skb(sk, skb) < 0) {
72422- atomic_inc(&sk->sk_drops);
72423+ atomic_inc_unchecked(&sk->sk_drops);
72424 kfree_skb(skb);
72425 return NET_RX_DROP;
72426 }
72427@@ -303,7 +303,7 @@ static int raw_rcv_skb(struct sock * sk,
72428 int raw_rcv(struct sock *sk, struct sk_buff *skb)
72429 {
72430 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
72431- atomic_inc(&sk->sk_drops);
72432+ atomic_inc_unchecked(&sk->sk_drops);
72433 kfree_skb(skb);
72434 return NET_RX_DROP;
72435 }
72436@@ -724,15 +724,22 @@ static int raw_init(struct sock *sk)
72437
72438 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
72439 {
72440+ struct icmp_filter filter;
72441+
72442+ if (optlen < 0)
72443+ return -EINVAL;
72444 if (optlen > sizeof(struct icmp_filter))
72445 optlen = sizeof(struct icmp_filter);
72446- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
72447+ if (copy_from_user(&filter, optval, optlen))
72448 return -EFAULT;
72449+ memcpy(&raw_sk(sk)->filter, &filter, optlen);
72450+
72451 return 0;
72452 }
72453
72454 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
72455 {
72456+ struct icmp_filter filter;
72457 int len, ret = -EFAULT;
72458
72459 if (get_user(len, optlen))
72460@@ -743,8 +750,9 @@ static int raw_geticmpfilter(struct sock
72461 if (len > sizeof(struct icmp_filter))
72462 len = sizeof(struct icmp_filter);
72463 ret = -EFAULT;
72464+ memcpy(&filter, &raw_sk(sk)->filter, len);
72465 if (put_user(len, optlen) ||
72466- copy_to_user(optval, &raw_sk(sk)->filter, len))
72467+ copy_to_user(optval, &filter, len))
72468 goto out;
72469 ret = 0;
72470 out: return ret;
72471@@ -954,7 +962,13 @@ static void raw_sock_seq_show(struct seq
72472 sk_wmem_alloc_get(sp),
72473 sk_rmem_alloc_get(sp),
72474 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72475- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
72476+ atomic_read(&sp->sk_refcnt),
72477+#ifdef CONFIG_GRKERNSEC_HIDESYM
72478+ NULL,
72479+#else
72480+ sp,
72481+#endif
72482+ atomic_read_unchecked(&sp->sk_drops));
72483 }
72484
72485 static int raw_seq_show(struct seq_file *seq, void *v)
72486diff -urNp linux-2.6.32.43/net/ipv4/route.c linux-2.6.32.43/net/ipv4/route.c
72487--- linux-2.6.32.43/net/ipv4/route.c 2011-03-27 14:31:47.000000000 -0400
72488+++ linux-2.6.32.43/net/ipv4/route.c 2011-08-07 19:48:09.000000000 -0400
72489@@ -107,6 +107,7 @@
72490 #ifdef CONFIG_SYSCTL
72491 #include <linux/sysctl.h>
72492 #endif
72493+#include <net/secure_seq.h>
72494
72495 #define RT_FL_TOS(oldflp) \
72496 ((u32)(oldflp->fl4_tos & (IPTOS_RT_MASK | RTO_ONLINK)))
72497@@ -268,7 +269,7 @@ static inline unsigned int rt_hash(__be3
72498
72499 static inline int rt_genid(struct net *net)
72500 {
72501- return atomic_read(&net->ipv4.rt_genid);
72502+ return atomic_read_unchecked(&net->ipv4.rt_genid);
72503 }
72504
72505 #ifdef CONFIG_PROC_FS
72506@@ -888,7 +889,7 @@ static void rt_cache_invalidate(struct n
72507 unsigned char shuffle;
72508
72509 get_random_bytes(&shuffle, sizeof(shuffle));
72510- atomic_add(shuffle + 1U, &net->ipv4.rt_genid);
72511+ atomic_add_unchecked(shuffle + 1U, &net->ipv4.rt_genid);
72512 }
72513
72514 /*
72515@@ -3356,7 +3357,7 @@ static __net_initdata struct pernet_oper
72516
72517 static __net_init int rt_secret_timer_init(struct net *net)
72518 {
72519- atomic_set(&net->ipv4.rt_genid,
72520+ atomic_set_unchecked(&net->ipv4.rt_genid,
72521 (int) ((num_physpages ^ (num_physpages>>8)) ^
72522 (jiffies ^ (jiffies >> 7))));
72523
72524diff -urNp linux-2.6.32.43/net/ipv4/tcp.c linux-2.6.32.43/net/ipv4/tcp.c
72525--- linux-2.6.32.43/net/ipv4/tcp.c 2011-03-27 14:31:47.000000000 -0400
72526+++ linux-2.6.32.43/net/ipv4/tcp.c 2011-05-16 21:46:57.000000000 -0400
72527@@ -2085,6 +2085,8 @@ static int do_tcp_setsockopt(struct sock
72528 int val;
72529 int err = 0;
72530
72531+ pax_track_stack();
72532+
72533 /* This is a string value all the others are int's */
72534 if (optname == TCP_CONGESTION) {
72535 char name[TCP_CA_NAME_MAX];
72536@@ -2355,6 +2357,8 @@ static int do_tcp_getsockopt(struct sock
72537 struct tcp_sock *tp = tcp_sk(sk);
72538 int val, len;
72539
72540+ pax_track_stack();
72541+
72542 if (get_user(len, optlen))
72543 return -EFAULT;
72544
72545diff -urNp linux-2.6.32.43/net/ipv4/tcp_ipv4.c linux-2.6.32.43/net/ipv4/tcp_ipv4.c
72546--- linux-2.6.32.43/net/ipv4/tcp_ipv4.c 2011-03-27 14:31:47.000000000 -0400
72547+++ linux-2.6.32.43/net/ipv4/tcp_ipv4.c 2011-08-07 19:48:09.000000000 -0400
72548@@ -71,6 +71,7 @@
72549 #include <net/timewait_sock.h>
72550 #include <net/xfrm.h>
72551 #include <net/netdma.h>
72552+#include <net/secure_seq.h>
72553
72554 #include <linux/inet.h>
72555 #include <linux/ipv6.h>
72556@@ -84,6 +85,9 @@
72557 int sysctl_tcp_tw_reuse __read_mostly;
72558 int sysctl_tcp_low_latency __read_mostly;
72559
72560+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72561+extern int grsec_enable_blackhole;
72562+#endif
72563
72564 #ifdef CONFIG_TCP_MD5SIG
72565 static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk,
72566@@ -1542,6 +1546,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc
72567 return 0;
72568
72569 reset:
72570+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72571+ if (!grsec_enable_blackhole)
72572+#endif
72573 tcp_v4_send_reset(rsk, skb);
72574 discard:
72575 kfree_skb(skb);
72576@@ -1603,12 +1610,20 @@ int tcp_v4_rcv(struct sk_buff *skb)
72577 TCP_SKB_CB(skb)->sacked = 0;
72578
72579 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
72580- if (!sk)
72581+ if (!sk) {
72582+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72583+ ret = 1;
72584+#endif
72585 goto no_tcp_socket;
72586+ }
72587
72588 process:
72589- if (sk->sk_state == TCP_TIME_WAIT)
72590+ if (sk->sk_state == TCP_TIME_WAIT) {
72591+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72592+ ret = 2;
72593+#endif
72594 goto do_time_wait;
72595+ }
72596
72597 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
72598 goto discard_and_relse;
72599@@ -1650,6 +1665,10 @@ no_tcp_socket:
72600 bad_packet:
72601 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
72602 } else {
72603+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72604+ if (!grsec_enable_blackhole || (ret == 1 &&
72605+ (skb->dev->flags & IFF_LOOPBACK)))
72606+#endif
72607 tcp_v4_send_reset(NULL, skb);
72608 }
72609
72610@@ -2194,14 +2213,14 @@ int tcp_proc_register(struct net *net, s
72611 int rc = 0;
72612 struct proc_dir_entry *p;
72613
72614- afinfo->seq_fops.open = tcp_seq_open;
72615- afinfo->seq_fops.read = seq_read;
72616- afinfo->seq_fops.llseek = seq_lseek;
72617- afinfo->seq_fops.release = seq_release_net;
72618-
72619- afinfo->seq_ops.start = tcp_seq_start;
72620- afinfo->seq_ops.next = tcp_seq_next;
72621- afinfo->seq_ops.stop = tcp_seq_stop;
72622+ *(void **)&afinfo->seq_fops.open = tcp_seq_open;
72623+ *(void **)&afinfo->seq_fops.read = seq_read;
72624+ *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72625+ *(void **)&afinfo->seq_fops.release = seq_release_net;
72626+
72627+ *(void **)&afinfo->seq_ops.start = tcp_seq_start;
72628+ *(void **)&afinfo->seq_ops.next = tcp_seq_next;
72629+ *(void **)&afinfo->seq_ops.stop = tcp_seq_stop;
72630
72631 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72632 &afinfo->seq_fops, afinfo);
72633@@ -2237,7 +2256,11 @@ static void get_openreq4(struct sock *sk
72634 0, /* non standard timer */
72635 0, /* open_requests have no inode */
72636 atomic_read(&sk->sk_refcnt),
72637+#ifdef CONFIG_GRKERNSEC_HIDESYM
72638+ NULL,
72639+#else
72640 req,
72641+#endif
72642 len);
72643 }
72644
72645@@ -2279,7 +2302,12 @@ static void get_tcp4_sock(struct sock *s
72646 sock_i_uid(sk),
72647 icsk->icsk_probes_out,
72648 sock_i_ino(sk),
72649- atomic_read(&sk->sk_refcnt), sk,
72650+ atomic_read(&sk->sk_refcnt),
72651+#ifdef CONFIG_GRKERNSEC_HIDESYM
72652+ NULL,
72653+#else
72654+ sk,
72655+#endif
72656 jiffies_to_clock_t(icsk->icsk_rto),
72657 jiffies_to_clock_t(icsk->icsk_ack.ato),
72658 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
72659@@ -2307,7 +2335,13 @@ static void get_timewait4_sock(struct in
72660 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n",
72661 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
72662 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
72663- atomic_read(&tw->tw_refcnt), tw, len);
72664+ atomic_read(&tw->tw_refcnt),
72665+#ifdef CONFIG_GRKERNSEC_HIDESYM
72666+ NULL,
72667+#else
72668+ tw,
72669+#endif
72670+ len);
72671 }
72672
72673 #define TMPSZ 150
72674diff -urNp linux-2.6.32.43/net/ipv4/tcp_minisocks.c linux-2.6.32.43/net/ipv4/tcp_minisocks.c
72675--- linux-2.6.32.43/net/ipv4/tcp_minisocks.c 2011-03-27 14:31:47.000000000 -0400
72676+++ linux-2.6.32.43/net/ipv4/tcp_minisocks.c 2011-04-17 15:56:46.000000000 -0400
72677@@ -26,6 +26,10 @@
72678 #include <net/inet_common.h>
72679 #include <net/xfrm.h>
72680
72681+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72682+extern int grsec_enable_blackhole;
72683+#endif
72684+
72685 #ifdef CONFIG_SYSCTL
72686 #define SYNC_INIT 0 /* let the user enable it */
72687 #else
72688@@ -672,6 +676,10 @@ listen_overflow:
72689
72690 embryonic_reset:
72691 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS);
72692+
72693+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72694+ if (!grsec_enable_blackhole)
72695+#endif
72696 if (!(flg & TCP_FLAG_RST))
72697 req->rsk_ops->send_reset(sk, skb);
72698
72699diff -urNp linux-2.6.32.43/net/ipv4/tcp_output.c linux-2.6.32.43/net/ipv4/tcp_output.c
72700--- linux-2.6.32.43/net/ipv4/tcp_output.c 2011-03-27 14:31:47.000000000 -0400
72701+++ linux-2.6.32.43/net/ipv4/tcp_output.c 2011-05-16 21:46:57.000000000 -0400
72702@@ -2234,6 +2234,8 @@ struct sk_buff *tcp_make_synack(struct s
72703 __u8 *md5_hash_location;
72704 int mss;
72705
72706+ pax_track_stack();
72707+
72708 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC);
72709 if (skb == NULL)
72710 return NULL;
72711diff -urNp linux-2.6.32.43/net/ipv4/tcp_probe.c linux-2.6.32.43/net/ipv4/tcp_probe.c
72712--- linux-2.6.32.43/net/ipv4/tcp_probe.c 2011-03-27 14:31:47.000000000 -0400
72713+++ linux-2.6.32.43/net/ipv4/tcp_probe.c 2011-04-17 15:56:46.000000000 -0400
72714@@ -200,7 +200,7 @@ static ssize_t tcpprobe_read(struct file
72715 if (cnt + width >= len)
72716 break;
72717
72718- if (copy_to_user(buf + cnt, tbuf, width))
72719+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
72720 return -EFAULT;
72721 cnt += width;
72722 }
72723diff -urNp linux-2.6.32.43/net/ipv4/tcp_timer.c linux-2.6.32.43/net/ipv4/tcp_timer.c
72724--- linux-2.6.32.43/net/ipv4/tcp_timer.c 2011-03-27 14:31:47.000000000 -0400
72725+++ linux-2.6.32.43/net/ipv4/tcp_timer.c 2011-04-17 15:56:46.000000000 -0400
72726@@ -21,6 +21,10 @@
72727 #include <linux/module.h>
72728 #include <net/tcp.h>
72729
72730+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72731+extern int grsec_lastack_retries;
72732+#endif
72733+
72734 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
72735 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
72736 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
72737@@ -164,6 +168,13 @@ static int tcp_write_timeout(struct sock
72738 }
72739 }
72740
72741+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72742+ if ((sk->sk_state == TCP_LAST_ACK) &&
72743+ (grsec_lastack_retries > 0) &&
72744+ (grsec_lastack_retries < retry_until))
72745+ retry_until = grsec_lastack_retries;
72746+#endif
72747+
72748 if (retransmits_timed_out(sk, retry_until)) {
72749 /* Has it gone just too far? */
72750 tcp_write_err(sk);
72751diff -urNp linux-2.6.32.43/net/ipv4/udp.c linux-2.6.32.43/net/ipv4/udp.c
72752--- linux-2.6.32.43/net/ipv4/udp.c 2011-07-13 17:23:04.000000000 -0400
72753+++ linux-2.6.32.43/net/ipv4/udp.c 2011-08-05 20:33:55.000000000 -0400
72754@@ -86,6 +86,7 @@
72755 #include <linux/types.h>
72756 #include <linux/fcntl.h>
72757 #include <linux/module.h>
72758+#include <linux/security.h>
72759 #include <linux/socket.h>
72760 #include <linux/sockios.h>
72761 #include <linux/igmp.h>
72762@@ -106,6 +107,10 @@
72763 #include <net/xfrm.h>
72764 #include "udp_impl.h"
72765
72766+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72767+extern int grsec_enable_blackhole;
72768+#endif
72769+
72770 struct udp_table udp_table;
72771 EXPORT_SYMBOL(udp_table);
72772
72773@@ -371,6 +376,9 @@ found:
72774 return s;
72775 }
72776
72777+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
72778+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
72779+
72780 /*
72781 * This routine is called by the ICMP module when it gets some
72782 * sort of error condition. If err < 0 then the socket should
72783@@ -639,9 +647,18 @@ int udp_sendmsg(struct kiocb *iocb, stru
72784 dport = usin->sin_port;
72785 if (dport == 0)
72786 return -EINVAL;
72787+
72788+ err = gr_search_udp_sendmsg(sk, usin);
72789+ if (err)
72790+ return err;
72791 } else {
72792 if (sk->sk_state != TCP_ESTABLISHED)
72793 return -EDESTADDRREQ;
72794+
72795+ err = gr_search_udp_sendmsg(sk, NULL);
72796+ if (err)
72797+ return err;
72798+
72799 daddr = inet->daddr;
72800 dport = inet->dport;
72801 /* Open fast path for connected socket.
72802@@ -945,6 +962,10 @@ try_again:
72803 if (!skb)
72804 goto out;
72805
72806+ err = gr_search_udp_recvmsg(sk, skb);
72807+ if (err)
72808+ goto out_free;
72809+
72810 ulen = skb->len - sizeof(struct udphdr);
72811 copied = len;
72812 if (copied > ulen)
72813@@ -1068,7 +1089,7 @@ static int __udp_queue_rcv_skb(struct so
72814 if (rc == -ENOMEM) {
72815 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
72816 is_udplite);
72817- atomic_inc(&sk->sk_drops);
72818+ atomic_inc_unchecked(&sk->sk_drops);
72819 }
72820 goto drop;
72821 }
72822@@ -1338,6 +1359,9 @@ int __udp4_lib_rcv(struct sk_buff *skb,
72823 goto csum_error;
72824
72825 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
72826+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
72827+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
72828+#endif
72829 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
72830
72831 /*
72832@@ -1719,14 +1743,14 @@ int udp_proc_register(struct net *net, s
72833 struct proc_dir_entry *p;
72834 int rc = 0;
72835
72836- afinfo->seq_fops.open = udp_seq_open;
72837- afinfo->seq_fops.read = seq_read;
72838- afinfo->seq_fops.llseek = seq_lseek;
72839- afinfo->seq_fops.release = seq_release_net;
72840-
72841- afinfo->seq_ops.start = udp_seq_start;
72842- afinfo->seq_ops.next = udp_seq_next;
72843- afinfo->seq_ops.stop = udp_seq_stop;
72844+ *(void **)&afinfo->seq_fops.open = udp_seq_open;
72845+ *(void **)&afinfo->seq_fops.read = seq_read;
72846+ *(void **)&afinfo->seq_fops.llseek = seq_lseek;
72847+ *(void **)&afinfo->seq_fops.release = seq_release_net;
72848+
72849+ *(void **)&afinfo->seq_ops.start = udp_seq_start;
72850+ *(void **)&afinfo->seq_ops.next = udp_seq_next;
72851+ *(void **)&afinfo->seq_ops.stop = udp_seq_stop;
72852
72853 p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
72854 &afinfo->seq_fops, afinfo);
72855@@ -1758,8 +1782,13 @@ static void udp4_format_sock(struct sock
72856 sk_wmem_alloc_get(sp),
72857 sk_rmem_alloc_get(sp),
72858 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp),
72859- atomic_read(&sp->sk_refcnt), sp,
72860- atomic_read(&sp->sk_drops), len);
72861+ atomic_read(&sp->sk_refcnt),
72862+#ifdef CONFIG_GRKERNSEC_HIDESYM
72863+ NULL,
72864+#else
72865+ sp,
72866+#endif
72867+ atomic_read_unchecked(&sp->sk_drops), len);
72868 }
72869
72870 int udp4_seq_show(struct seq_file *seq, void *v)
72871diff -urNp linux-2.6.32.43/net/ipv6/inet6_connection_sock.c linux-2.6.32.43/net/ipv6/inet6_connection_sock.c
72872--- linux-2.6.32.43/net/ipv6/inet6_connection_sock.c 2011-03-27 14:31:47.000000000 -0400
72873+++ linux-2.6.32.43/net/ipv6/inet6_connection_sock.c 2011-05-04 17:56:28.000000000 -0400
72874@@ -152,7 +152,7 @@ void __inet6_csk_dst_store(struct sock *
72875 #ifdef CONFIG_XFRM
72876 {
72877 struct rt6_info *rt = (struct rt6_info *)dst;
72878- rt->rt6i_flow_cache_genid = atomic_read(&flow_cache_genid);
72879+ rt->rt6i_flow_cache_genid = atomic_read_unchecked(&flow_cache_genid);
72880 }
72881 #endif
72882 }
72883@@ -167,7 +167,7 @@ struct dst_entry *__inet6_csk_dst_check(
72884 #ifdef CONFIG_XFRM
72885 if (dst) {
72886 struct rt6_info *rt = (struct rt6_info *)dst;
72887- if (rt->rt6i_flow_cache_genid != atomic_read(&flow_cache_genid)) {
72888+ if (rt->rt6i_flow_cache_genid != atomic_read_unchecked(&flow_cache_genid)) {
72889 sk->sk_dst_cache = NULL;
72890 dst_release(dst);
72891 dst = NULL;
72892diff -urNp linux-2.6.32.43/net/ipv6/inet6_hashtables.c linux-2.6.32.43/net/ipv6/inet6_hashtables.c
72893--- linux-2.6.32.43/net/ipv6/inet6_hashtables.c 2011-03-27 14:31:47.000000000 -0400
72894+++ linux-2.6.32.43/net/ipv6/inet6_hashtables.c 2011-08-07 19:48:09.000000000 -0400
72895@@ -20,6 +20,7 @@
72896 #include <net/inet_connection_sock.h>
72897 #include <net/inet_hashtables.h>
72898 #include <net/inet6_hashtables.h>
72899+#include <net/secure_seq.h>
72900 #include <net/ip.h>
72901
72902 void __inet6_hash(struct sock *sk)
72903@@ -118,7 +119,7 @@ out:
72904 }
72905 EXPORT_SYMBOL(__inet6_lookup_established);
72906
72907-static int inline compute_score(struct sock *sk, struct net *net,
72908+static inline int compute_score(struct sock *sk, struct net *net,
72909 const unsigned short hnum,
72910 const struct in6_addr *daddr,
72911 const int dif)
72912diff -urNp linux-2.6.32.43/net/ipv6/ipv6_sockglue.c linux-2.6.32.43/net/ipv6/ipv6_sockglue.c
72913--- linux-2.6.32.43/net/ipv6/ipv6_sockglue.c 2011-03-27 14:31:47.000000000 -0400
72914+++ linux-2.6.32.43/net/ipv6/ipv6_sockglue.c 2011-05-16 21:46:57.000000000 -0400
72915@@ -130,6 +130,8 @@ static int do_ipv6_setsockopt(struct soc
72916 int val, valbool;
72917 int retv = -ENOPROTOOPT;
72918
72919+ pax_track_stack();
72920+
72921 if (optval == NULL)
72922 val=0;
72923 else {
72924@@ -881,6 +883,8 @@ static int do_ipv6_getsockopt(struct soc
72925 int len;
72926 int val;
72927
72928+ pax_track_stack();
72929+
72930 if (ip6_mroute_opt(optname))
72931 return ip6_mroute_getsockopt(sk, optname, optval, optlen);
72932
72933diff -urNp linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c
72934--- linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:00:52.000000000 -0400
72935+++ linux-2.6.32.43/net/ipv6/netfilter/ip6_tables.c 2011-04-17 17:04:18.000000000 -0400
72936@@ -1173,6 +1173,7 @@ static int get_info(struct net *net, voi
72937 private = &tmp;
72938 }
72939 #endif
72940+ memset(&info, 0, sizeof(info));
72941 info.valid_hooks = t->valid_hooks;
72942 memcpy(info.hook_entry, private->hook_entry,
72943 sizeof(info.hook_entry));
72944diff -urNp linux-2.6.32.43/net/ipv6/raw.c linux-2.6.32.43/net/ipv6/raw.c
72945--- linux-2.6.32.43/net/ipv6/raw.c 2011-03-27 14:31:47.000000000 -0400
72946+++ linux-2.6.32.43/net/ipv6/raw.c 2011-05-16 21:46:57.000000000 -0400
72947@@ -375,14 +375,14 @@ static inline int rawv6_rcv_skb(struct s
72948 {
72949 if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
72950 skb_checksum_complete(skb)) {
72951- atomic_inc(&sk->sk_drops);
72952+ atomic_inc_unchecked(&sk->sk_drops);
72953 kfree_skb(skb);
72954 return NET_RX_DROP;
72955 }
72956
72957 /* Charge it to the socket. */
72958 if (sock_queue_rcv_skb(sk,skb)<0) {
72959- atomic_inc(&sk->sk_drops);
72960+ atomic_inc_unchecked(&sk->sk_drops);
72961 kfree_skb(skb);
72962 return NET_RX_DROP;
72963 }
72964@@ -403,7 +403,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72965 struct raw6_sock *rp = raw6_sk(sk);
72966
72967 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
72968- atomic_inc(&sk->sk_drops);
72969+ atomic_inc_unchecked(&sk->sk_drops);
72970 kfree_skb(skb);
72971 return NET_RX_DROP;
72972 }
72973@@ -427,7 +427,7 @@ int rawv6_rcv(struct sock *sk, struct sk
72974
72975 if (inet->hdrincl) {
72976 if (skb_checksum_complete(skb)) {
72977- atomic_inc(&sk->sk_drops);
72978+ atomic_inc_unchecked(&sk->sk_drops);
72979 kfree_skb(skb);
72980 return NET_RX_DROP;
72981 }
72982@@ -518,7 +518,7 @@ csum_copy_err:
72983 as some normal condition.
72984 */
72985 err = (flags&MSG_DONTWAIT) ? -EAGAIN : -EHOSTUNREACH;
72986- atomic_inc(&sk->sk_drops);
72987+ atomic_inc_unchecked(&sk->sk_drops);
72988 goto out;
72989 }
72990
72991@@ -600,7 +600,7 @@ out:
72992 return err;
72993 }
72994
72995-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
72996+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
72997 struct flowi *fl, struct rt6_info *rt,
72998 unsigned int flags)
72999 {
73000@@ -738,6 +738,8 @@ static int rawv6_sendmsg(struct kiocb *i
73001 u16 proto;
73002 int err;
73003
73004+ pax_track_stack();
73005+
73006 /* Rough check on arithmetic overflow,
73007 better check is made in ip6_append_data().
73008 */
73009@@ -916,12 +918,17 @@ do_confirm:
73010 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
73011 char __user *optval, int optlen)
73012 {
73013+ struct icmp6_filter filter;
73014+
73015 switch (optname) {
73016 case ICMPV6_FILTER:
73017+ if (optlen < 0)
73018+ return -EINVAL;
73019 if (optlen > sizeof(struct icmp6_filter))
73020 optlen = sizeof(struct icmp6_filter);
73021- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
73022+ if (copy_from_user(&filter, optval, optlen))
73023 return -EFAULT;
73024+ memcpy(&raw6_sk(sk)->filter, &filter, optlen);
73025 return 0;
73026 default:
73027 return -ENOPROTOOPT;
73028@@ -933,6 +940,7 @@ static int rawv6_seticmpfilter(struct so
73029 static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
73030 char __user *optval, int __user *optlen)
73031 {
73032+ struct icmp6_filter filter;
73033 int len;
73034
73035 switch (optname) {
73036@@ -945,7 +953,8 @@ static int rawv6_geticmpfilter(struct so
73037 len = sizeof(struct icmp6_filter);
73038 if (put_user(len, optlen))
73039 return -EFAULT;
73040- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
73041+ memcpy(&filter, &raw6_sk(sk)->filter, len);
73042+ if (copy_to_user(optval, &filter, len))
73043 return -EFAULT;
73044 return 0;
73045 default:
73046@@ -1241,7 +1250,13 @@ static void raw6_sock_seq_show(struct se
73047 0, 0L, 0,
73048 sock_i_uid(sp), 0,
73049 sock_i_ino(sp),
73050- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
73051+ atomic_read(&sp->sk_refcnt),
73052+#ifdef CONFIG_GRKERNSEC_HIDESYM
73053+ NULL,
73054+#else
73055+ sp,
73056+#endif
73057+ atomic_read_unchecked(&sp->sk_drops));
73058 }
73059
73060 static int raw6_seq_show(struct seq_file *seq, void *v)
73061diff -urNp linux-2.6.32.43/net/ipv6/tcp_ipv6.c linux-2.6.32.43/net/ipv6/tcp_ipv6.c
73062--- linux-2.6.32.43/net/ipv6/tcp_ipv6.c 2011-03-27 14:31:47.000000000 -0400
73063+++ linux-2.6.32.43/net/ipv6/tcp_ipv6.c 2011-08-07 19:48:09.000000000 -0400
73064@@ -60,6 +60,7 @@
73065 #include <net/timewait_sock.h>
73066 #include <net/netdma.h>
73067 #include <net/inet_common.h>
73068+#include <net/secure_seq.h>
73069
73070 #include <asm/uaccess.h>
73071
73072@@ -88,6 +89,10 @@ static struct tcp_md5sig_key *tcp_v6_md5
73073 }
73074 #endif
73075
73076+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73077+extern int grsec_enable_blackhole;
73078+#endif
73079+
73080 static void tcp_v6_hash(struct sock *sk)
73081 {
73082 if (sk->sk_state != TCP_CLOSE) {
73083@@ -1578,6 +1583,9 @@ static int tcp_v6_do_rcv(struct sock *sk
73084 return 0;
73085
73086 reset:
73087+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73088+ if (!grsec_enable_blackhole)
73089+#endif
73090 tcp_v6_send_reset(sk, skb);
73091 discard:
73092 if (opt_skb)
73093@@ -1655,12 +1663,20 @@ static int tcp_v6_rcv(struct sk_buff *sk
73094 TCP_SKB_CB(skb)->sacked = 0;
73095
73096 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
73097- if (!sk)
73098+ if (!sk) {
73099+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73100+ ret = 1;
73101+#endif
73102 goto no_tcp_socket;
73103+ }
73104
73105 process:
73106- if (sk->sk_state == TCP_TIME_WAIT)
73107+ if (sk->sk_state == TCP_TIME_WAIT) {
73108+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73109+ ret = 2;
73110+#endif
73111 goto do_time_wait;
73112+ }
73113
73114 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb))
73115 goto discard_and_relse;
73116@@ -1700,6 +1716,10 @@ no_tcp_socket:
73117 bad_packet:
73118 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
73119 } else {
73120+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73121+ if (!grsec_enable_blackhole || (ret == 1 &&
73122+ (skb->dev->flags & IFF_LOOPBACK)))
73123+#endif
73124 tcp_v6_send_reset(NULL, skb);
73125 }
73126
73127@@ -1915,7 +1935,13 @@ static void get_openreq6(struct seq_file
73128 uid,
73129 0, /* non standard timer */
73130 0, /* open_requests have no inode */
73131- 0, req);
73132+ 0,
73133+#ifdef CONFIG_GRKERNSEC_HIDESYM
73134+ NULL
73135+#else
73136+ req
73137+#endif
73138+ );
73139 }
73140
73141 static void get_tcp6_sock(struct seq_file *seq, struct sock *sp, int i)
73142@@ -1965,7 +1991,12 @@ static void get_tcp6_sock(struct seq_fil
73143 sock_i_uid(sp),
73144 icsk->icsk_probes_out,
73145 sock_i_ino(sp),
73146- atomic_read(&sp->sk_refcnt), sp,
73147+ atomic_read(&sp->sk_refcnt),
73148+#ifdef CONFIG_GRKERNSEC_HIDESYM
73149+ NULL,
73150+#else
73151+ sp,
73152+#endif
73153 jiffies_to_clock_t(icsk->icsk_rto),
73154 jiffies_to_clock_t(icsk->icsk_ack.ato),
73155 (icsk->icsk_ack.quick << 1 ) | icsk->icsk_ack.pingpong,
73156@@ -2000,7 +2031,13 @@ static void get_timewait6_sock(struct se
73157 dest->s6_addr32[2], dest->s6_addr32[3], destp,
73158 tw->tw_substate, 0, 0,
73159 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0,
73160- atomic_read(&tw->tw_refcnt), tw);
73161+ atomic_read(&tw->tw_refcnt),
73162+#ifdef CONFIG_GRKERNSEC_HIDESYM
73163+ NULL
73164+#else
73165+ tw
73166+#endif
73167+ );
73168 }
73169
73170 static int tcp6_seq_show(struct seq_file *seq, void *v)
73171diff -urNp linux-2.6.32.43/net/ipv6/udp.c linux-2.6.32.43/net/ipv6/udp.c
73172--- linux-2.6.32.43/net/ipv6/udp.c 2011-07-13 17:23:04.000000000 -0400
73173+++ linux-2.6.32.43/net/ipv6/udp.c 2011-07-13 17:23:27.000000000 -0400
73174@@ -49,6 +49,10 @@
73175 #include <linux/seq_file.h>
73176 #include "udp_impl.h"
73177
73178+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73179+extern int grsec_enable_blackhole;
73180+#endif
73181+
73182 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
73183 {
73184 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
73185@@ -391,7 +395,7 @@ int udpv6_queue_rcv_skb(struct sock * sk
73186 if (rc == -ENOMEM) {
73187 UDP6_INC_STATS_BH(sock_net(sk),
73188 UDP_MIB_RCVBUFERRORS, is_udplite);
73189- atomic_inc(&sk->sk_drops);
73190+ atomic_inc_unchecked(&sk->sk_drops);
73191 }
73192 goto drop;
73193 }
73194@@ -590,6 +594,9 @@ int __udp6_lib_rcv(struct sk_buff *skb,
73195 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS,
73196 proto == IPPROTO_UDPLITE);
73197
73198+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
73199+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
73200+#endif
73201 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0, dev);
73202
73203 kfree_skb(skb);
73204@@ -1209,8 +1216,13 @@ static void udp6_sock_seq_show(struct se
73205 0, 0L, 0,
73206 sock_i_uid(sp), 0,
73207 sock_i_ino(sp),
73208- atomic_read(&sp->sk_refcnt), sp,
73209- atomic_read(&sp->sk_drops));
73210+ atomic_read(&sp->sk_refcnt),
73211+#ifdef CONFIG_GRKERNSEC_HIDESYM
73212+ NULL,
73213+#else
73214+ sp,
73215+#endif
73216+ atomic_read_unchecked(&sp->sk_drops));
73217 }
73218
73219 int udp6_seq_show(struct seq_file *seq, void *v)
73220diff -urNp linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c
73221--- linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c 2011-03-27 14:31:47.000000000 -0400
73222+++ linux-2.6.32.43/net/irda/ircomm/ircomm_tty.c 2011-04-17 15:56:46.000000000 -0400
73223@@ -280,16 +280,16 @@ static int ircomm_tty_block_til_ready(st
73224 add_wait_queue(&self->open_wait, &wait);
73225
73226 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
73227- __FILE__,__LINE__, tty->driver->name, self->open_count );
73228+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73229
73230 /* As far as I can see, we protect open_count - Jean II */
73231 spin_lock_irqsave(&self->spinlock, flags);
73232 if (!tty_hung_up_p(filp)) {
73233 extra_count = 1;
73234- self->open_count--;
73235+ local_dec(&self->open_count);
73236 }
73237 spin_unlock_irqrestore(&self->spinlock, flags);
73238- self->blocked_open++;
73239+ local_inc(&self->blocked_open);
73240
73241 while (1) {
73242 if (tty->termios->c_cflag & CBAUD) {
73243@@ -329,7 +329,7 @@ static int ircomm_tty_block_til_ready(st
73244 }
73245
73246 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
73247- __FILE__,__LINE__, tty->driver->name, self->open_count );
73248+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count) );
73249
73250 schedule();
73251 }
73252@@ -340,13 +340,13 @@ static int ircomm_tty_block_til_ready(st
73253 if (extra_count) {
73254 /* ++ is not atomic, so this should be protected - Jean II */
73255 spin_lock_irqsave(&self->spinlock, flags);
73256- self->open_count++;
73257+ local_inc(&self->open_count);
73258 spin_unlock_irqrestore(&self->spinlock, flags);
73259 }
73260- self->blocked_open--;
73261+ local_dec(&self->blocked_open);
73262
73263 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
73264- __FILE__,__LINE__, tty->driver->name, self->open_count);
73265+ __FILE__,__LINE__, tty->driver->name, local_read(&self->open_count));
73266
73267 if (!retval)
73268 self->flags |= ASYNC_NORMAL_ACTIVE;
73269@@ -415,14 +415,14 @@ static int ircomm_tty_open(struct tty_st
73270 }
73271 /* ++ is not atomic, so this should be protected - Jean II */
73272 spin_lock_irqsave(&self->spinlock, flags);
73273- self->open_count++;
73274+ local_inc(&self->open_count);
73275
73276 tty->driver_data = self;
73277 self->tty = tty;
73278 spin_unlock_irqrestore(&self->spinlock, flags);
73279
73280 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
73281- self->line, self->open_count);
73282+ self->line, local_read(&self->open_count));
73283
73284 /* Not really used by us, but lets do it anyway */
73285 self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0;
73286@@ -511,7 +511,7 @@ static void ircomm_tty_close(struct tty_
73287 return;
73288 }
73289
73290- if ((tty->count == 1) && (self->open_count != 1)) {
73291+ if ((tty->count == 1) && (local_read(&self->open_count) != 1)) {
73292 /*
73293 * Uh, oh. tty->count is 1, which means that the tty
73294 * structure will be freed. state->count should always
73295@@ -521,16 +521,16 @@ static void ircomm_tty_close(struct tty_
73296 */
73297 IRDA_DEBUG(0, "%s(), bad serial port count; "
73298 "tty->count is 1, state->count is %d\n", __func__ ,
73299- self->open_count);
73300- self->open_count = 1;
73301+ local_read(&self->open_count));
73302+ local_set(&self->open_count, 1);
73303 }
73304
73305- if (--self->open_count < 0) {
73306+ if (local_dec_return(&self->open_count) < 0) {
73307 IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n",
73308- __func__, self->line, self->open_count);
73309- self->open_count = 0;
73310+ __func__, self->line, local_read(&self->open_count));
73311+ local_set(&self->open_count, 0);
73312 }
73313- if (self->open_count) {
73314+ if (local_read(&self->open_count)) {
73315 spin_unlock_irqrestore(&self->spinlock, flags);
73316
73317 IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ );
73318@@ -562,7 +562,7 @@ static void ircomm_tty_close(struct tty_
73319 tty->closing = 0;
73320 self->tty = NULL;
73321
73322- if (self->blocked_open) {
73323+ if (local_read(&self->blocked_open)) {
73324 if (self->close_delay)
73325 schedule_timeout_interruptible(self->close_delay);
73326 wake_up_interruptible(&self->open_wait);
73327@@ -1017,7 +1017,7 @@ static void ircomm_tty_hangup(struct tty
73328 spin_lock_irqsave(&self->spinlock, flags);
73329 self->flags &= ~ASYNC_NORMAL_ACTIVE;
73330 self->tty = NULL;
73331- self->open_count = 0;
73332+ local_set(&self->open_count, 0);
73333 spin_unlock_irqrestore(&self->spinlock, flags);
73334
73335 wake_up_interruptible(&self->open_wait);
73336@@ -1369,7 +1369,7 @@ static void ircomm_tty_line_info(struct
73337 seq_putc(m, '\n');
73338
73339 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
73340- seq_printf(m, "Open count: %d\n", self->open_count);
73341+ seq_printf(m, "Open count: %d\n", local_read(&self->open_count));
73342 seq_printf(m, "Max data size: %d\n", self->max_data_size);
73343 seq_printf(m, "Max header size: %d\n", self->max_header_size);
73344
73345diff -urNp linux-2.6.32.43/net/iucv/af_iucv.c linux-2.6.32.43/net/iucv/af_iucv.c
73346--- linux-2.6.32.43/net/iucv/af_iucv.c 2011-03-27 14:31:47.000000000 -0400
73347+++ linux-2.6.32.43/net/iucv/af_iucv.c 2011-05-04 17:56:28.000000000 -0400
73348@@ -651,10 +651,10 @@ static int iucv_sock_autobind(struct soc
73349
73350 write_lock_bh(&iucv_sk_list.lock);
73351
73352- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
73353+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73354 while (__iucv_get_sock_by_name(name)) {
73355 sprintf(name, "%08x",
73356- atomic_inc_return(&iucv_sk_list.autobind_name));
73357+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
73358 }
73359
73360 write_unlock_bh(&iucv_sk_list.lock);
73361diff -urNp linux-2.6.32.43/net/key/af_key.c linux-2.6.32.43/net/key/af_key.c
73362--- linux-2.6.32.43/net/key/af_key.c 2011-03-27 14:31:47.000000000 -0400
73363+++ linux-2.6.32.43/net/key/af_key.c 2011-05-16 21:46:57.000000000 -0400
73364@@ -2489,6 +2489,8 @@ static int pfkey_migrate(struct sock *sk
73365 struct xfrm_migrate m[XFRM_MAX_DEPTH];
73366 struct xfrm_kmaddress k;
73367
73368+ pax_track_stack();
73369+
73370 if (!present_and_same_family(ext_hdrs[SADB_EXT_ADDRESS_SRC - 1],
73371 ext_hdrs[SADB_EXT_ADDRESS_DST - 1]) ||
73372 !ext_hdrs[SADB_X_EXT_POLICY - 1]) {
73373@@ -3660,7 +3662,11 @@ static int pfkey_seq_show(struct seq_fil
73374 seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n");
73375 else
73376 seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n",
73377+#ifdef CONFIG_GRKERNSEC_HIDESYM
73378+ NULL,
73379+#else
73380 s,
73381+#endif
73382 atomic_read(&s->sk_refcnt),
73383 sk_rmem_alloc_get(s),
73384 sk_wmem_alloc_get(s),
73385diff -urNp linux-2.6.32.43/net/lapb/lapb_iface.c linux-2.6.32.43/net/lapb/lapb_iface.c
73386--- linux-2.6.32.43/net/lapb/lapb_iface.c 2011-03-27 14:31:47.000000000 -0400
73387+++ linux-2.6.32.43/net/lapb/lapb_iface.c 2011-08-05 20:33:55.000000000 -0400
73388@@ -157,7 +157,7 @@ int lapb_register(struct net_device *dev
73389 goto out;
73390
73391 lapb->dev = dev;
73392- lapb->callbacks = *callbacks;
73393+ lapb->callbacks = callbacks;
73394
73395 __lapb_insert_cb(lapb);
73396
73397@@ -379,32 +379,32 @@ int lapb_data_received(struct net_device
73398
73399 void lapb_connect_confirmation(struct lapb_cb *lapb, int reason)
73400 {
73401- if (lapb->callbacks.connect_confirmation)
73402- lapb->callbacks.connect_confirmation(lapb->dev, reason);
73403+ if (lapb->callbacks->connect_confirmation)
73404+ lapb->callbacks->connect_confirmation(lapb->dev, reason);
73405 }
73406
73407 void lapb_connect_indication(struct lapb_cb *lapb, int reason)
73408 {
73409- if (lapb->callbacks.connect_indication)
73410- lapb->callbacks.connect_indication(lapb->dev, reason);
73411+ if (lapb->callbacks->connect_indication)
73412+ lapb->callbacks->connect_indication(lapb->dev, reason);
73413 }
73414
73415 void lapb_disconnect_confirmation(struct lapb_cb *lapb, int reason)
73416 {
73417- if (lapb->callbacks.disconnect_confirmation)
73418- lapb->callbacks.disconnect_confirmation(lapb->dev, reason);
73419+ if (lapb->callbacks->disconnect_confirmation)
73420+ lapb->callbacks->disconnect_confirmation(lapb->dev, reason);
73421 }
73422
73423 void lapb_disconnect_indication(struct lapb_cb *lapb, int reason)
73424 {
73425- if (lapb->callbacks.disconnect_indication)
73426- lapb->callbacks.disconnect_indication(lapb->dev, reason);
73427+ if (lapb->callbacks->disconnect_indication)
73428+ lapb->callbacks->disconnect_indication(lapb->dev, reason);
73429 }
73430
73431 int lapb_data_indication(struct lapb_cb *lapb, struct sk_buff *skb)
73432 {
73433- if (lapb->callbacks.data_indication)
73434- return lapb->callbacks.data_indication(lapb->dev, skb);
73435+ if (lapb->callbacks->data_indication)
73436+ return lapb->callbacks->data_indication(lapb->dev, skb);
73437
73438 kfree_skb(skb);
73439 return NET_RX_SUCCESS; /* For now; must be != NET_RX_DROP */
73440@@ -414,8 +414,8 @@ int lapb_data_transmit(struct lapb_cb *l
73441 {
73442 int used = 0;
73443
73444- if (lapb->callbacks.data_transmit) {
73445- lapb->callbacks.data_transmit(lapb->dev, skb);
73446+ if (lapb->callbacks->data_transmit) {
73447+ lapb->callbacks->data_transmit(lapb->dev, skb);
73448 used = 1;
73449 }
73450
73451diff -urNp linux-2.6.32.43/net/mac80211/cfg.c linux-2.6.32.43/net/mac80211/cfg.c
73452--- linux-2.6.32.43/net/mac80211/cfg.c 2011-03-27 14:31:47.000000000 -0400
73453+++ linux-2.6.32.43/net/mac80211/cfg.c 2011-04-17 15:56:46.000000000 -0400
73454@@ -1369,7 +1369,7 @@ static int ieee80211_set_bitrate_mask(st
73455 return err;
73456 }
73457
73458-struct cfg80211_ops mac80211_config_ops = {
73459+const struct cfg80211_ops mac80211_config_ops = {
73460 .add_virtual_intf = ieee80211_add_iface,
73461 .del_virtual_intf = ieee80211_del_iface,
73462 .change_virtual_intf = ieee80211_change_iface,
73463diff -urNp linux-2.6.32.43/net/mac80211/cfg.h linux-2.6.32.43/net/mac80211/cfg.h
73464--- linux-2.6.32.43/net/mac80211/cfg.h 2011-03-27 14:31:47.000000000 -0400
73465+++ linux-2.6.32.43/net/mac80211/cfg.h 2011-04-17 15:56:46.000000000 -0400
73466@@ -4,6 +4,6 @@
73467 #ifndef __CFG_H
73468 #define __CFG_H
73469
73470-extern struct cfg80211_ops mac80211_config_ops;
73471+extern const struct cfg80211_ops mac80211_config_ops;
73472
73473 #endif /* __CFG_H */
73474diff -urNp linux-2.6.32.43/net/mac80211/debugfs_key.c linux-2.6.32.43/net/mac80211/debugfs_key.c
73475--- linux-2.6.32.43/net/mac80211/debugfs_key.c 2011-03-27 14:31:47.000000000 -0400
73476+++ linux-2.6.32.43/net/mac80211/debugfs_key.c 2011-04-17 15:56:46.000000000 -0400
73477@@ -211,9 +211,13 @@ static ssize_t key_key_read(struct file
73478 size_t count, loff_t *ppos)
73479 {
73480 struct ieee80211_key *key = file->private_data;
73481- int i, res, bufsize = 2 * key->conf.keylen + 2;
73482+ int i, bufsize = 2 * key->conf.keylen + 2;
73483 char *buf = kmalloc(bufsize, GFP_KERNEL);
73484 char *p = buf;
73485+ ssize_t res;
73486+
73487+ if (buf == NULL)
73488+ return -ENOMEM;
73489
73490 for (i = 0; i < key->conf.keylen; i++)
73491 p += scnprintf(p, bufsize + buf - p, "%02x", key->conf.key[i]);
73492diff -urNp linux-2.6.32.43/net/mac80211/debugfs_sta.c linux-2.6.32.43/net/mac80211/debugfs_sta.c
73493--- linux-2.6.32.43/net/mac80211/debugfs_sta.c 2011-03-27 14:31:47.000000000 -0400
73494+++ linux-2.6.32.43/net/mac80211/debugfs_sta.c 2011-05-16 21:46:57.000000000 -0400
73495@@ -124,6 +124,8 @@ static ssize_t sta_agg_status_read(struc
73496 int i;
73497 struct sta_info *sta = file->private_data;
73498
73499+ pax_track_stack();
73500+
73501 spin_lock_bh(&sta->lock);
73502 p += scnprintf(p, sizeof(buf)+buf-p, "next dialog_token is %#02x\n",
73503 sta->ampdu_mlme.dialog_token_allocator + 1);
73504diff -urNp linux-2.6.32.43/net/mac80211/ieee80211_i.h linux-2.6.32.43/net/mac80211/ieee80211_i.h
73505--- linux-2.6.32.43/net/mac80211/ieee80211_i.h 2011-03-27 14:31:47.000000000 -0400
73506+++ linux-2.6.32.43/net/mac80211/ieee80211_i.h 2011-04-17 15:56:46.000000000 -0400
73507@@ -25,6 +25,7 @@
73508 #include <linux/etherdevice.h>
73509 #include <net/cfg80211.h>
73510 #include <net/mac80211.h>
73511+#include <asm/local.h>
73512 #include "key.h"
73513 #include "sta_info.h"
73514
73515@@ -635,7 +636,7 @@ struct ieee80211_local {
73516 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
73517 spinlock_t queue_stop_reason_lock;
73518
73519- int open_count;
73520+ local_t open_count;
73521 int monitors, cooked_mntrs;
73522 /* number of interfaces with corresponding FIF_ flags */
73523 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll;
73524diff -urNp linux-2.6.32.43/net/mac80211/iface.c linux-2.6.32.43/net/mac80211/iface.c
73525--- linux-2.6.32.43/net/mac80211/iface.c 2011-03-27 14:31:47.000000000 -0400
73526+++ linux-2.6.32.43/net/mac80211/iface.c 2011-04-17 15:56:46.000000000 -0400
73527@@ -166,7 +166,7 @@ static int ieee80211_open(struct net_dev
73528 break;
73529 }
73530
73531- if (local->open_count == 0) {
73532+ if (local_read(&local->open_count) == 0) {
73533 res = drv_start(local);
73534 if (res)
73535 goto err_del_bss;
73536@@ -196,7 +196,7 @@ static int ieee80211_open(struct net_dev
73537 * Validate the MAC address for this device.
73538 */
73539 if (!is_valid_ether_addr(dev->dev_addr)) {
73540- if (!local->open_count)
73541+ if (!local_read(&local->open_count))
73542 drv_stop(local);
73543 return -EADDRNOTAVAIL;
73544 }
73545@@ -292,7 +292,7 @@ static int ieee80211_open(struct net_dev
73546
73547 hw_reconf_flags |= __ieee80211_recalc_idle(local);
73548
73549- local->open_count++;
73550+ local_inc(&local->open_count);
73551 if (hw_reconf_flags) {
73552 ieee80211_hw_config(local, hw_reconf_flags);
73553 /*
73554@@ -320,7 +320,7 @@ static int ieee80211_open(struct net_dev
73555 err_del_interface:
73556 drv_remove_interface(local, &conf);
73557 err_stop:
73558- if (!local->open_count)
73559+ if (!local_read(&local->open_count))
73560 drv_stop(local);
73561 err_del_bss:
73562 sdata->bss = NULL;
73563@@ -420,7 +420,7 @@ static int ieee80211_stop(struct net_dev
73564 WARN_ON(!list_empty(&sdata->u.ap.vlans));
73565 }
73566
73567- local->open_count--;
73568+ local_dec(&local->open_count);
73569
73570 switch (sdata->vif.type) {
73571 case NL80211_IFTYPE_AP_VLAN:
73572@@ -526,7 +526,7 @@ static int ieee80211_stop(struct net_dev
73573
73574 ieee80211_recalc_ps(local, -1);
73575
73576- if (local->open_count == 0) {
73577+ if (local_read(&local->open_count) == 0) {
73578 ieee80211_clear_tx_pending(local);
73579 ieee80211_stop_device(local);
73580
73581diff -urNp linux-2.6.32.43/net/mac80211/main.c linux-2.6.32.43/net/mac80211/main.c
73582--- linux-2.6.32.43/net/mac80211/main.c 2011-05-10 22:12:02.000000000 -0400
73583+++ linux-2.6.32.43/net/mac80211/main.c 2011-05-10 22:12:34.000000000 -0400
73584@@ -145,7 +145,7 @@ int ieee80211_hw_config(struct ieee80211
73585 local->hw.conf.power_level = power;
73586 }
73587
73588- if (changed && local->open_count) {
73589+ if (changed && local_read(&local->open_count)) {
73590 ret = drv_config(local, changed);
73591 /*
73592 * Goal:
73593diff -urNp linux-2.6.32.43/net/mac80211/mlme.c linux-2.6.32.43/net/mac80211/mlme.c
73594--- linux-2.6.32.43/net/mac80211/mlme.c 2011-03-27 14:31:47.000000000 -0400
73595+++ linux-2.6.32.43/net/mac80211/mlme.c 2011-05-16 21:46:57.000000000 -0400
73596@@ -1438,6 +1438,8 @@ ieee80211_rx_mgmt_assoc_resp(struct ieee
73597 bool have_higher_than_11mbit = false, newsta = false;
73598 u16 ap_ht_cap_flags;
73599
73600+ pax_track_stack();
73601+
73602 /*
73603 * AssocResp and ReassocResp have identical structure, so process both
73604 * of them in this function.
73605diff -urNp linux-2.6.32.43/net/mac80211/pm.c linux-2.6.32.43/net/mac80211/pm.c
73606--- linux-2.6.32.43/net/mac80211/pm.c 2011-03-27 14:31:47.000000000 -0400
73607+++ linux-2.6.32.43/net/mac80211/pm.c 2011-04-17 15:56:46.000000000 -0400
73608@@ -107,7 +107,7 @@ int __ieee80211_suspend(struct ieee80211
73609 }
73610
73611 /* stop hardware - this must stop RX */
73612- if (local->open_count)
73613+ if (local_read(&local->open_count))
73614 ieee80211_stop_device(local);
73615
73616 local->suspended = true;
73617diff -urNp linux-2.6.32.43/net/mac80211/rate.c linux-2.6.32.43/net/mac80211/rate.c
73618--- linux-2.6.32.43/net/mac80211/rate.c 2011-03-27 14:31:47.000000000 -0400
73619+++ linux-2.6.32.43/net/mac80211/rate.c 2011-04-17 15:56:46.000000000 -0400
73620@@ -287,7 +287,7 @@ int ieee80211_init_rate_ctrl_alg(struct
73621 struct rate_control_ref *ref, *old;
73622
73623 ASSERT_RTNL();
73624- if (local->open_count)
73625+ if (local_read(&local->open_count))
73626 return -EBUSY;
73627
73628 ref = rate_control_alloc(name, local);
73629diff -urNp linux-2.6.32.43/net/mac80211/tx.c linux-2.6.32.43/net/mac80211/tx.c
73630--- linux-2.6.32.43/net/mac80211/tx.c 2011-03-27 14:31:47.000000000 -0400
73631+++ linux-2.6.32.43/net/mac80211/tx.c 2011-04-17 15:56:46.000000000 -0400
73632@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct
73633 return cpu_to_le16(dur);
73634 }
73635
73636-static int inline is_ieee80211_device(struct ieee80211_local *local,
73637+static inline int is_ieee80211_device(struct ieee80211_local *local,
73638 struct net_device *dev)
73639 {
73640 return local == wdev_priv(dev->ieee80211_ptr);
73641diff -urNp linux-2.6.32.43/net/mac80211/util.c linux-2.6.32.43/net/mac80211/util.c
73642--- linux-2.6.32.43/net/mac80211/util.c 2011-03-27 14:31:47.000000000 -0400
73643+++ linux-2.6.32.43/net/mac80211/util.c 2011-04-17 15:56:46.000000000 -0400
73644@@ -1042,7 +1042,7 @@ int ieee80211_reconfig(struct ieee80211_
73645 local->resuming = true;
73646
73647 /* restart hardware */
73648- if (local->open_count) {
73649+ if (local_read(&local->open_count)) {
73650 /*
73651 * Upon resume hardware can sometimes be goofy due to
73652 * various platform / driver / bus issues, so restarting
73653diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c
73654--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c 2011-03-27 14:31:47.000000000 -0400
73655+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_app.c 2011-05-17 19:26:34.000000000 -0400
73656@@ -564,7 +564,7 @@ static const struct file_operations ip_v
73657 .open = ip_vs_app_open,
73658 .read = seq_read,
73659 .llseek = seq_lseek,
73660- .release = seq_release,
73661+ .release = seq_release_net,
73662 };
73663 #endif
73664
73665diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c
73666--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c 2011-03-27 14:31:47.000000000 -0400
73667+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_conn.c 2011-05-17 19:26:34.000000000 -0400
73668@@ -453,10 +453,10 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, s
73669 /* if the connection is not template and is created
73670 * by sync, preserve the activity flag.
73671 */
73672- cp->flags |= atomic_read(&dest->conn_flags) &
73673+ cp->flags |= atomic_read_unchecked(&dest->conn_flags) &
73674 (~IP_VS_CONN_F_INACTIVE);
73675 else
73676- cp->flags |= atomic_read(&dest->conn_flags);
73677+ cp->flags |= atomic_read_unchecked(&dest->conn_flags);
73678 cp->dest = dest;
73679
73680 IP_VS_DBG_BUF(7, "Bind-dest %s c:%s:%d v:%s:%d "
73681@@ -723,7 +723,7 @@ ip_vs_conn_new(int af, int proto, const
73682 atomic_set(&cp->refcnt, 1);
73683
73684 atomic_set(&cp->n_control, 0);
73685- atomic_set(&cp->in_pkts, 0);
73686+ atomic_set_unchecked(&cp->in_pkts, 0);
73687
73688 atomic_inc(&ip_vs_conn_count);
73689 if (flags & IP_VS_CONN_F_NO_CPORT)
73690@@ -871,7 +871,7 @@ static const struct file_operations ip_v
73691 .open = ip_vs_conn_open,
73692 .read = seq_read,
73693 .llseek = seq_lseek,
73694- .release = seq_release,
73695+ .release = seq_release_net,
73696 };
73697
73698 static const char *ip_vs_origin_name(unsigned flags)
73699@@ -934,7 +934,7 @@ static const struct file_operations ip_v
73700 .open = ip_vs_conn_sync_open,
73701 .read = seq_read,
73702 .llseek = seq_lseek,
73703- .release = seq_release,
73704+ .release = seq_release_net,
73705 };
73706
73707 #endif
73708@@ -961,7 +961,7 @@ static inline int todrop_entry(struct ip
73709
73710 /* Don't drop the entry if its number of incoming packets is not
73711 located in [0, 8] */
73712- i = atomic_read(&cp->in_pkts);
73713+ i = atomic_read_unchecked(&cp->in_pkts);
73714 if (i > 8 || i < 0) return 0;
73715
73716 if (!todrop_rate[i]) return 0;
73717diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c
73718--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c 2011-03-27 14:31:47.000000000 -0400
73719+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_core.c 2011-05-04 17:56:28.000000000 -0400
73720@@ -485,7 +485,7 @@ int ip_vs_leave(struct ip_vs_service *sv
73721 ret = cp->packet_xmit(skb, cp, pp);
73722 /* do not touch skb anymore */
73723
73724- atomic_inc(&cp->in_pkts);
73725+ atomic_inc_unchecked(&cp->in_pkts);
73726 ip_vs_conn_put(cp);
73727 return ret;
73728 }
73729@@ -1357,7 +1357,7 @@ ip_vs_in(unsigned int hooknum, struct sk
73730 * Sync connection if it is about to close to
73731 * encorage the standby servers to update the connections timeout
73732 */
73733- pkts = atomic_add_return(1, &cp->in_pkts);
73734+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
73735 if (af == AF_INET &&
73736 (ip_vs_sync_state & IP_VS_STATE_MASTER) &&
73737 (((cp->protocol != IPPROTO_TCP ||
73738diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c
73739--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c 2011-03-27 14:31:47.000000000 -0400
73740+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_ctl.c 2011-05-17 19:26:34.000000000 -0400
73741@@ -792,7 +792,7 @@ __ip_vs_update_dest(struct ip_vs_service
73742 ip_vs_rs_hash(dest);
73743 write_unlock_bh(&__ip_vs_rs_lock);
73744 }
73745- atomic_set(&dest->conn_flags, conn_flags);
73746+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
73747
73748 /* bind the service */
73749 if (!dest->svc) {
73750@@ -1888,7 +1888,7 @@ static int ip_vs_info_seq_show(struct se
73751 " %-7s %-6d %-10d %-10d\n",
73752 &dest->addr.in6,
73753 ntohs(dest->port),
73754- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73755+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73756 atomic_read(&dest->weight),
73757 atomic_read(&dest->activeconns),
73758 atomic_read(&dest->inactconns));
73759@@ -1899,7 +1899,7 @@ static int ip_vs_info_seq_show(struct se
73760 "%-7s %-6d %-10d %-10d\n",
73761 ntohl(dest->addr.ip),
73762 ntohs(dest->port),
73763- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
73764+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
73765 atomic_read(&dest->weight),
73766 atomic_read(&dest->activeconns),
73767 atomic_read(&dest->inactconns));
73768@@ -1927,7 +1927,7 @@ static const struct file_operations ip_v
73769 .open = ip_vs_info_open,
73770 .read = seq_read,
73771 .llseek = seq_lseek,
73772- .release = seq_release_private,
73773+ .release = seq_release_net,
73774 };
73775
73776 #endif
73777@@ -1976,7 +1976,7 @@ static const struct file_operations ip_v
73778 .open = ip_vs_stats_seq_open,
73779 .read = seq_read,
73780 .llseek = seq_lseek,
73781- .release = single_release,
73782+ .release = single_release_net,
73783 };
73784
73785 #endif
73786@@ -2292,7 +2292,7 @@ __ip_vs_get_dest_entries(const struct ip
73787
73788 entry.addr = dest->addr.ip;
73789 entry.port = dest->port;
73790- entry.conn_flags = atomic_read(&dest->conn_flags);
73791+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
73792 entry.weight = atomic_read(&dest->weight);
73793 entry.u_threshold = dest->u_threshold;
73794 entry.l_threshold = dest->l_threshold;
73795@@ -2353,6 +2353,8 @@ do_ip_vs_get_ctl(struct sock *sk, int cm
73796 unsigned char arg[128];
73797 int ret = 0;
73798
73799+ pax_track_stack();
73800+
73801 if (!capable(CAP_NET_ADMIN))
73802 return -EPERM;
73803
73804@@ -2802,7 +2804,7 @@ static int ip_vs_genl_fill_dest(struct s
73805 NLA_PUT_U16(skb, IPVS_DEST_ATTR_PORT, dest->port);
73806
73807 NLA_PUT_U32(skb, IPVS_DEST_ATTR_FWD_METHOD,
73808- atomic_read(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73809+ atomic_read_unchecked(&dest->conn_flags) & IP_VS_CONN_F_FWD_MASK);
73810 NLA_PUT_U32(skb, IPVS_DEST_ATTR_WEIGHT, atomic_read(&dest->weight));
73811 NLA_PUT_U32(skb, IPVS_DEST_ATTR_U_THRESH, dest->u_threshold);
73812 NLA_PUT_U32(skb, IPVS_DEST_ATTR_L_THRESH, dest->l_threshold);
73813diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c
73814--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c 2011-03-27 14:31:47.000000000 -0400
73815+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_sync.c 2011-05-04 17:56:28.000000000 -0400
73816@@ -438,7 +438,7 @@ static void ip_vs_process_message(const
73817
73818 if (opt)
73819 memcpy(&cp->in_seq, opt, sizeof(*opt));
73820- atomic_set(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73821+ atomic_set_unchecked(&cp->in_pkts, sysctl_ip_vs_sync_threshold[0]);
73822 cp->state = state;
73823 cp->old_state = cp->state;
73824 /*
73825diff -urNp linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c
73826--- linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c 2011-03-27 14:31:47.000000000 -0400
73827+++ linux-2.6.32.43/net/netfilter/ipvs/ip_vs_xmit.c 2011-05-04 17:56:28.000000000 -0400
73828@@ -875,7 +875,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, str
73829 else
73830 rc = NF_ACCEPT;
73831 /* do not touch skb anymore */
73832- atomic_inc(&cp->in_pkts);
73833+ atomic_inc_unchecked(&cp->in_pkts);
73834 goto out;
73835 }
73836
73837@@ -949,7 +949,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb,
73838 else
73839 rc = NF_ACCEPT;
73840 /* do not touch skb anymore */
73841- atomic_inc(&cp->in_pkts);
73842+ atomic_inc_unchecked(&cp->in_pkts);
73843 goto out;
73844 }
73845
73846diff -urNp linux-2.6.32.43/net/netfilter/Kconfig linux-2.6.32.43/net/netfilter/Kconfig
73847--- linux-2.6.32.43/net/netfilter/Kconfig 2011-03-27 14:31:47.000000000 -0400
73848+++ linux-2.6.32.43/net/netfilter/Kconfig 2011-04-17 15:56:46.000000000 -0400
73849@@ -635,6 +635,16 @@ config NETFILTER_XT_MATCH_ESP
73850
73851 To compile it as a module, choose M here. If unsure, say N.
73852
73853+config NETFILTER_XT_MATCH_GRADM
73854+ tristate '"gradm" match support'
73855+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
73856+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
73857+ ---help---
73858+ The gradm match allows to match on grsecurity RBAC being enabled.
73859+ It is useful when iptables rules are applied early on bootup to
73860+ prevent connections to the machine (except from a trusted host)
73861+ while the RBAC system is disabled.
73862+
73863 config NETFILTER_XT_MATCH_HASHLIMIT
73864 tristate '"hashlimit" match support'
73865 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
73866diff -urNp linux-2.6.32.43/net/netfilter/Makefile linux-2.6.32.43/net/netfilter/Makefile
73867--- linux-2.6.32.43/net/netfilter/Makefile 2011-03-27 14:31:47.000000000 -0400
73868+++ linux-2.6.32.43/net/netfilter/Makefile 2011-04-17 15:56:46.000000000 -0400
73869@@ -68,6 +68,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_CONNTRAC
73870 obj-$(CONFIG_NETFILTER_XT_MATCH_DCCP) += xt_dccp.o
73871 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
73872 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
73873+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
73874 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
73875 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
73876 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
73877diff -urNp linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c
73878--- linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c 2011-03-27 14:31:47.000000000 -0400
73879+++ linux-2.6.32.43/net/netfilter/nf_conntrack_netlink.c 2011-04-17 15:56:46.000000000 -0400
73880@@ -706,7 +706,7 @@ ctnetlink_parse_tuple_proto(struct nlatt
73881 static int
73882 ctnetlink_parse_tuple(const struct nlattr * const cda[],
73883 struct nf_conntrack_tuple *tuple,
73884- enum ctattr_tuple type, u_int8_t l3num)
73885+ enum ctattr_type type, u_int8_t l3num)
73886 {
73887 struct nlattr *tb[CTA_TUPLE_MAX+1];
73888 int err;
73889diff -urNp linux-2.6.32.43/net/netfilter/nfnetlink_log.c linux-2.6.32.43/net/netfilter/nfnetlink_log.c
73890--- linux-2.6.32.43/net/netfilter/nfnetlink_log.c 2011-03-27 14:31:47.000000000 -0400
73891+++ linux-2.6.32.43/net/netfilter/nfnetlink_log.c 2011-05-04 17:56:28.000000000 -0400
73892@@ -68,7 +68,7 @@ struct nfulnl_instance {
73893 };
73894
73895 static DEFINE_RWLOCK(instances_lock);
73896-static atomic_t global_seq;
73897+static atomic_unchecked_t global_seq;
73898
73899 #define INSTANCE_BUCKETS 16
73900 static struct hlist_head instance_table[INSTANCE_BUCKETS];
73901@@ -493,7 +493,7 @@ __build_packet_message(struct nfulnl_ins
73902 /* global sequence number */
73903 if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
73904 NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
73905- htonl(atomic_inc_return(&global_seq)));
73906+ htonl(atomic_inc_return_unchecked(&global_seq)));
73907
73908 if (data_len) {
73909 struct nlattr *nla;
73910diff -urNp linux-2.6.32.43/net/netfilter/xt_gradm.c linux-2.6.32.43/net/netfilter/xt_gradm.c
73911--- linux-2.6.32.43/net/netfilter/xt_gradm.c 1969-12-31 19:00:00.000000000 -0500
73912+++ linux-2.6.32.43/net/netfilter/xt_gradm.c 2011-04-17 15:56:46.000000000 -0400
73913@@ -0,0 +1,51 @@
73914+/*
73915+ * gradm match for netfilter
73916